gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
"""Core authentication views."""
import logging
import oath
from django.http import (
HttpResponse, HttpResponseRedirect, Http404, JsonResponse)
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils import translation
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from django.utils.translation import ugettext as _
from django.views import generic
from django.views.decorators.cache import never_cache
from django.contrib.auth import (
authenticate, login, logout, views as auth_views
)
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.tokens import default_token_generator
import django_otp
from modoboa.core import forms
from modoboa.core.password_hashers import get_password_hasher
from modoboa.lib import cryptutils
from modoboa.lib.views import UserFormKwargsMixin
from modoboa.parameters import tools as param_tools
from .. import models
from .. import sms_backends
from .. import signals
from .base import find_nextlocation
logger = logging.getLogger("modoboa.auth")
def dologin(request):
"""Try to authenticate."""
error = None
if request.method == "POST":
form = forms.LoginForm(request.POST)
if form.is_valid():
logger = logging.getLogger("modoboa.auth")
user = authenticate(username=form.cleaned_data["username"],
password=form.cleaned_data["password"])
if user and user.is_active:
condition = (
user.is_local and
param_tools.get_global_parameter(
"update_scheme", raise_exception=False)
)
if condition:
# check if password scheme is correct
scheme = param_tools.get_global_parameter(
"password_scheme", raise_exception=False)
# use SHA512CRYPT as default fallback
if scheme is None:
pwhash = get_password_hasher('sha512crypt')()
else:
pwhash = get_password_hasher(scheme)()
if not user.password.startswith(pwhash.scheme):
logging.info(
_("Password scheme mismatch. Updating %s password"),
user.username
)
user.set_password(form.cleaned_data["password"])
user.save()
if pwhash.needs_rehash(user.password):
logging.info(
_("Password hash parameter missmatch. "
"Updating %s password"),
user.username
)
user.set_password(form.cleaned_data["password"])
user.save()
login(request, user)
if not form.cleaned_data["rememberme"]:
request.session.set_expiry(0)
translation.activate(request.user.language)
request.session[translation.LANGUAGE_SESSION_KEY] = (
request.user.language)
logger.info(
_("User '%s' successfully logged in") % user.username
)
signals.user_login.send(
sender="dologin",
username=form.cleaned_data["username"],
password=form.cleaned_data["password"])
return HttpResponseRedirect(find_nextlocation(request, user))
error = _(
"Your username and password didn't match. Please try again.")
logger.warning(
"Failed connection attempt from '%(addr)s' as user '%(user)s'"
% {"addr": request.META["REMOTE_ADDR"],
"user": form.cleaned_data["username"]}
)
nextlocation = request.POST.get("next", "")
httpcode = 401
else:
form = forms.LoginForm()
nextlocation = request.GET.get("next", "")
httpcode = 200
announcements = signals.get_announcements.send(
sender="login", location="loginpage")
announcements = [announcement[1] for announcement in announcements]
return HttpResponse(
render_to_string(
"registration/login.html", {
"form": form, "error": error, "next": nextlocation,
"annoucements": announcements},
request),
status=httpcode)
dologin = never_cache(dologin)
def dologout(request):
"""Logout current user."""
if not request.user.is_anonymous:
signals.user_logout.send(sender="dologout", request=request)
logger = logging.getLogger("modoboa.auth")
logger.info(
_("User '{}' successfully logged out").format(
request.user.username))
logout(request)
return HttpResponseRedirect(reverse("core:login"))
class PasswordResetView(auth_views.PasswordResetView):
"""Custom view to override form."""
form_class = forms.PasswordResetForm
def setup(self, request, *args, **kwargs):
super().setup(request, *args, **kwargs)
self.from_email = request.localconfig.parameters.get_value(
"sender_address"
)
def get_context_data(self, **kwargs):
"""Include help text."""
context = super().get_context_data(**kwargs)
context["announcement"] = (
self.request.localconfig.parameters
.get_value("password_recovery_msg")
)
return context
def form_valid(self, form):
"""Redirect to code verification page if needed."""
sms_password_recovery = (
self.request.localconfig.parameters
.get_value("sms_password_recovery")
)
if not sms_password_recovery:
return super().form_valid(form)
user = models.User._default_manager.filter(
email=form.cleaned_data["email"], phone_number__isnull=False
).first()
if not user:
# Fallback to email
return super().form_valid(form)
backend = sms_backends.get_active_backend(
self.request.localconfig.parameters)
secret = cryptutils.random_hex_key(20)
code = oath.totp(secret)
text = _(
"Please use the following code to recover your Modoboa password: {}"
.format(code)
)
if not backend.send(text, [str(user.phone_number)]):
return super().form_valid(form)
self.request.session["user_pk"] = user.pk
self.request.session["totp_secret"] = secret
return HttpResponseRedirect(reverse("password_reset_confirm_code"))
class VerifySMSCodeView(generic.FormView):
"""View to verify a code received by SMS."""
form_class = forms.VerifySMSCodeForm
template_name = "registration/password_reset_confirm_code.html"
def get_form_kwargs(self):
"""Include totp secret in kwargs."""
kwargs = super().get_form_kwargs()
try:
kwargs.update({"totp_secret": self.request.session["totp_secret"]})
except KeyError:
raise Http404
return kwargs
def form_valid(self, form):
"""Redirect to reset password form."""
user = models.User.objects.get(pk=self.request.session.pop("user_pk"))
self.request.session.pop("totp_secret")
token = default_token_generator.make_token(user)
uid = urlsafe_base64_encode(force_bytes(user.pk))
url = reverse("password_reset_confirm", args=[uid, token])
return HttpResponseRedirect(url)
class ResendSMSCodeView(generic.View):
"""A view to resend validation code."""
def get(self, request, *args, **kwargs):
sms_password_recovery = (
self.request.localconfig.parameters
.get_value("sms_password_recovery")
)
if not sms_password_recovery:
raise Http404
try:
user = models.User._default_manager.get(
pk=self.request.session["user_pk"])
except KeyError:
raise Http404
backend = sms_backends.get_active_backend(
self.request.localconfig.parameters)
secret = cryptutils.random_hex_key(20)
code = oath.totp(secret)
text = _(
"Please use the following code to recover your Modoboa password: {}"
.format(code)
)
if not backend.send(text, [user.phone_number]):
raise Http404
self.request.session["totp_secret"] = secret
return JsonResponse({"status": "ok"})
class TwoFactorCodeVerifyView(LoginRequiredMixin,
UserFormKwargsMixin,
generic.FormView):
"""View to verify a 2FA code after login."""
form_class = forms.Verify2FACodeForm
template_name = "registration/twofactor_code_verify.html"
def form_valid(self, form):
"""Login user."""
django_otp.login(self.request, form.cleaned_data["tfa_code"])
return HttpResponseRedirect(
find_nextlocation(self.request, self.request.user)
)
|
|
import numpy as np
import matplotlib.pyplot as plt
import time
import random
import bisect
import json
import sys
from numpy import linalg as alg
from scipy import sparse
from sklearn import cross_validation as cv
from itertools import product
from collections import defaultdict
from functools import partial
from multiprocessing import Pool
# Read all data and metadata and match dimensions
def read_data(collection, dataset):
# collection: data collection folder
# dataset: dataset folder
# Read data and metadata
R = np.loadtxt('../data/' + collection + '/' + dataset + '/playcounts.txt', delimiter=",")
R = sparse.coo_matrix((R[:, 2], (R[:, 0], R[:, 1])))
num_users_R, num_items_R = R.shape
U = np.loadtxt('../data/' + collection + '/' + dataset + '/user_tags.txt', delimiter=",")
U = sparse.coo_matrix((U[:, 2], (U[:, 0], U[:, 1])))
num_users_U, num_tags_U = U.shape
T = np.loadtxt('../data/' + collection + '/' + dataset + '/item_tags.txt', delimiter=",")
T = sparse.coo_matrix((T[:, 2], (T[:, 0], T[:, 1])))
num_items_T, num_tags_T = T.shape
# Find maximum dataset dimensions
num_users = max(num_users_R, num_users_U)
num_items = max(num_items_R, num_items_T)
num_tags = max(num_tags_U, num_tags_T)
# Resize data and metadata
# R.reshape((num_users, num_items)) # not implemented yet...
# (coo is convenient for further fold preparation)
R = sparse.coo_matrix((R.data, (R.row, R.col)),
shape=(num_users, num_items))
# U.reshape((num_users, num_user_tags)) # not implemented yet...
U = sparse.csr_matrix((U.data, (U.row, U.col)),
shape=(num_users, num_tags))
# T.reshape((num_items, num_item_tags)) # not implemented yet...
T = sparse.csr_matrix((T.data, (T.row, T.col)),
shape=(num_items, num_tags))
data = {'full_R': R, 'U': U, 'T': T}
return data
# Weighting and folds for the ratings data
def get_data(full_R, num_folds, alpha):
# full_R: ratings in coo format
# num_folds: data splits
# alpha: weight for the binary ratings
# Make data splits balancing users in each fold
splits = cv.StratifiedKFold(full_R.row, n_folds=num_folds, random_state=1)
data = []
test_indices = open('test_' + dataset + '_WTMF.txt', 'wa')
for train, test in splits:
# Train data (remind R is in coo format)
R = sparse.csr_matrix((full_R.data[train], (full_R.row[train],
full_R.col[train])),
shape=full_R.shape)
# P = R > 0 is really not needed through the code
# Weight data
weights = 1. + alpha * np.log(1. + R.data)
W = sparse.csr_matrix((weights, R.nonzero()), shape=full_R.shape)
# Test data
Rt = sparse.coo_matrix((full_R.data[test], (full_R.row[test],
full_R.col[test])),
shape=full_R.shape)
fold_data = {'WR': W, 'Rt': Rt}
data.append(fold_data)
# Store test indices for further mpr calculation
np.savetxt(test_indices, test, fmt='%i')
test_indices.close()
return data
# Weighting for the tags
def get_metadata(U, T, beta, gamma):
# U, T: user and item tags in csr format
# beta, gamma: weights for user and item tags
# Weight user- and item-tags data
Uweights = 1. + beta * np.log(1. + U.data)
WU = sparse.csr_matrix((Uweights, U.nonzero()), shape=U.shape)
Tweights = 1. + gamma * np.log(1. + T.data)
WT = sparse.csr_matrix((Tweights, T.nonzero()), shape=T.shape)
metadata = {'WU': WU, 'WT': WT}
return metadata
# RMSE function
def loss_function(WR, WU, WT, P, Q, X, mu_user, mu_item):
# WR, WU, WT: weight matrices in sparse format
# P, Q, X: factor matrices
# mu_user, mu_item: weights for the user- and item-tags losses
# Loss in the ratings reconstruction
loss_ratings = 0.
for u, WRu in enumerate(WR):
WRu_dense = WRu.toarray()
Rbu_dense = np.ones(WRu.shape)
Rbu_dense[WRu_dense == 0.] = 0.
WRu_dense[WRu_dense == 0.] = 1. # blank cells are in fact 1s in W
PutQ = P[u].dot(Q.T)
loss_ratings += np.sum(WRu_dense * ((Rbu_dense - PutQ) ** 2))
# Loss in the user-tags reconstruction
loss_user_tags = 0.
for u, WUu in enumerate(WU):
WUu_dense = WUu.toarray()
Ubu_dense = np.ones(WUu.shape)
Ubu_dense[WUu_dense == 0.] = 0.
WUu_dense[WUu_dense == 0.] = 1. # blank cells are in fact 1s in W
PutX = P[u].dot(X.T)
loss_user_tags += np.sum(WUu_dense * ((Ubu_dense - PutX) ** 2))
# Loss in the user-tags reconstruction
loss_item_tags = 0.
for i, WTi in enumerate(WT):
WTi_dense = WTi.toarray()
Tbi_dense = np.ones(WTi.shape)
Tbi_dense[WTi_dense == 0.] = 0.
WTi_dense[WTi_dense == 0.] = 1. # blank cells are in fact 1s in W
QitX = Q[i].dot(X.T)
loss_item_tags += np.sum(WTi_dense * ((Tbi_dense - QitX) ** 2))
return loss_ratings + mu_user * loss_user_tags + mu_item * loss_item_tags
# Objective function
def cost_function(WR, WU, WT, P, Q, X, mu_user, mu_item, eta):
# WR, WU, WT: weight matrices in sparse format
# P, Q, X: factor matrices
# mu_user, mu_item: weights for the user- and item-tags losses
# eta: regularization term
# Loss
loss = loss_function(WR, WU, WT, P, Q, X, mu_user, mu_item)
# Regularization error
reg_P = (P ** 2).sum()
reg_Q = (Q ** 2).sum()
reg_X = (X ** 2).sum()
return loss + eta * (reg_P + reg_Q + reg_X)
# Train and test a given fold (convenient for parallel cross-validation)
def run_this_fold(experiment, N_values, metadata, fold_and_data):
# experiment: set of parameters for the current experiment
# N_values: lengths of the recommendation lists
# metadata: user- and item-tags data
# fold_and_data: list including fold and data
# fold number, used to iterate
# data: split of data for the given fold
fold = fold_and_data[0]
data = fold_and_data[1]
results = defaultdict(list)
print ('\tWTMF with ' + str(experiment['num_iterations']) +
' it. of ALS. Launching fold ' + str(fold + 1) + '...')
# Train
P, Q = train_WTMF(data['WR'], metadata['WU'], metadata['WT'], False, fold,
**experiment)
# Test
for N in N_values:
mpr_num, mpr_den, rank = test_topN(P, Q, data['Rt'], N, False, fold,
experiment)
# Save results for each fold and each value of N
this_result = {'mpr_num': mpr_num, 'mpr_den': mpr_den, 'rank': rank,
'fold': fold}
results[N] = this_result
return results
# Train WeightedTags MF for implicit feedback
def train_WTMF(WR, WU, WT, plot, fold, alpha, beta, gamma,
mu_user, mu_item, eta, num_factors, num_iterations):
# WR, WU, WT: weight matrices in sparse format
# plot: should the train error evolution be plotted?
# fold: integer indicating which fold is being trained
# alpha, beta, gamma: weights for the decomposition
# mu_user, mu_item: weights for the user- and item-tags losses
# eta: regularization term
# num_factors, num_iterations: training options
# Random factors initialization
np.random.seed(1)
num_users, num_items = WR.shape
if WU.shape[1] != WT.shape[1]:
sys.exit("Tags are not correctly merged.")
num_tags = WU.shape[1]
P = np.random.rand(num_users, num_factors)
Q = np.random.rand(num_items, num_factors)
X = np.random.rand(num_tags, num_factors)
# Iterate Alternating Least Squares
# cost = [] # just for plot
for iteration in range(num_iterations):
t0 = time.time()
# Common for all users, items and tags
tPP = P.T.dot(P)
tQQ = Q.T.dot(Q)
tXX = X.T.dot(X)
reg = eta * np.eye(num_factors)
# loop over users (index u)
for u, WRu in enumerate(WR):
# Use only active items for user u to speed-up
maskWR = WRu.nonzero()[1]
WRu_mask = WRu.data
WRu_mask_I = WRu_mask - np.array([1])
Q_mask = Q[maskWR, :]
# Rbu_mask = Rb.getrow(u).data # this is all 1, don't need it!
# Use only active tags for user u to speed-up
maskWU = WU.getrow(u).nonzero()[1]
WUu_mask = WU.getrow(u).data
WUu_mask_I = WUu_mask - np.array([1])
X_mask = X[maskWU, :]
# Ubu_mask = Ub.getrow(u).data # this is all 1, don't need it!
A_this_user = Q_mask.T.dot(WRu_mask_I[:, np.newaxis] * Q_mask)
A_this_user_tag = X_mask.T.dot(WUu_mask_I[:, np.newaxis] * X_mask)
A_user = tQQ + A_this_user + mu_user * (tXX + A_this_user_tag) + reg
# b_user = (Q_mask.T * (WRu_mask * Ru_mask)[np.newaxis, :]).sum(1)
b_user = (Q_mask.T * WRu_mask[np.newaxis, :]).sum(1)
# b_user_tag = mu_user * (X_mask.T *
# (WUu_mask * Uu_mask)[np.newaxis, :]).sum(1)
b_user_tag = mu_user * (X_mask.T * WUu_mask[np.newaxis, :]).sum(1)
P[u] = alg.solve(A_user, b_user + b_user_tag)
# loop over items (index i)
for i, WRi in enumerate(WR.T):
# Use only active users for item i to speed-up
maskWR = WRi.nonzero()[1]
WRi_mask = WRi.data
WRi_mask_I = WRi_mask - np.array([1])
P_mask = P[maskWR, :]
# Rbi_mask = Rb.getcol(i).data # this is all 1, don't need it!
# Use only active tags for item i to speed-up
maskWT = WT.getrow(i).nonzero()[1]
WTi_mask = WT.getrow(i).data
WTi_mask_I = WTi_mask - np.array([1])
X_mask = X[maskWT, :]
# Tbi_mask = Tb.getrow(i).data # this is all 1, don't need it!
A_this_item = P_mask.T.dot(WRi_mask_I[:, np.newaxis] * P_mask)
A_this_item_tag = X_mask.T.dot(WTi_mask_I[:, np.newaxis] * X_mask)
A_item = tPP + A_this_item + mu_item * (tXX + A_this_item_tag) + reg
# b_item = (P_mask.T * (WRi_mask * Ri_mask)[np.newaxis, :]).sum(1)
b_item = (P_mask.T * WRi_mask[np.newaxis, :]).sum(1)
# b_item_tag = mu_item * (X_mask.T *
# (WTi_mask * Ti_mask)[np.newaxis, :]).sum(1)
b_item_tag = mu_item * (X_mask.T * WTi_mask[np.newaxis, :]).sum(1)
Q[i] = alg.solve(A_item, b_item + b_item_tag)
# loop over tags (index t)
for t, WUt in enumerate(WU.T):
# Use only active users for tag t to speed-up
maskWU = WUt.nonzero()[1]
WUt_mask = WUt.data
WUt_mask_I = WUt_mask - np.array([1])
P_mask = P[maskWU, :]
# Ubt_mask = Ubt.data # this is all 1, don't need it!
# Use only active items for tag t to speed-up
# Note on getcol: Returns a (m x 1) CSR matrix (column vector).
# Thus, we use nonzero()[0] instead of nonzero()[1]
maskWT = WT.getcol(t).nonzero()[0]
WTt_mask = WT.getcol(t).data
WTt_mask_I = WTt_mask - np.array([1])
Q_mask = Q[maskWT, :]
# Tbt_mask = Tb.getcol(t).data # this is all 1, don't need it!
A_this_tag_user = P_mask.T.dot(WUt_mask_I[:, np.newaxis] * P_mask)
A_this_tag_item = Q_mask.T.dot(WTt_mask_I[:, np.newaxis] * Q_mask)
A_tag = (mu_user * (tPP + A_this_tag_user) +
mu_item * (tQQ + A_this_tag_item) + reg)
# b_tag_user = mu_user * (P_mask.T * (WUt_mask * Ut_mask)[np.newaxis, :]).sum(1)
b_tag_user = mu_user * (P_mask.T * WUt_mask[np.newaxis, :]).sum(1)
# b_tag_item = mu_item * (Q_mask.T * (WTt_mask * Tt_mask)[np.newaxis, :]).sum(1)
b_tag_item = mu_item * (Q_mask.T * WTt_mask[np.newaxis, :]).sum(1)
X[t] = alg.solve(A_tag, b_tag_user + b_tag_item)
t1 = time.time()
print ('\t\tTraining WTMF on fold ' + str(fold) + ', it. ' +
str(iteration) + ': ' + str(t1 - t0) + 's')
# cost.append(cost_function(WR, WU, WT, P, Q, X, mu_user, mu_item, eta))
if plot:
plt.figure()
plt.title('WTMF training' + '\n' +
r'$\alpha = $' + str(alpha) + r', $\beta$ = ' + str(beta) +
', $\gamma$ = ' + str(gamma) + ', $\eta$ = ' + str(eta) +
', $\mu_{user}$ = ' + str(mu_user) + ', $\mu_{item}$ = ' +
str(mu_item) + '\nnum_factors = ' + str(num_factors) +
', num_iterations =' + str(num_iterations))
plt.plot(cost, label='cost',
marker='s', linestyle=':', color='m', linewidth=2)
plt.xlabel('Iteration Number')
plt.ylabel('Mean Squared Error')
plt.legend()
plt.subplots_adjust(top=0.85)
plt.show()
return P, Q
# Test by Mean Percentage Ranking
def test_topN(X, Y, Rt, N, plot, fold, parameters):
# X, Y: latent factor arrays
# Rt: test data
# N: length of the recommendation
# plot: should the rank be plotted?
# fold: integer indicating which fold is being trained
# parameters: to further pass to plot
# Initialize values
mpr_numerator = 0
rank = Rt.nnz * [None]
t0 = time.time()
# Loop over test set
# print '\t\tTesting by Mean Percentage Ranking at ' + str(N) + '...'
u_old = -1
for k, (u, i, rt) in enumerate(zip(Rt.row, Rt.col, Rt.data)):
if u != u_old:
Zu = X[u].dot(Y.T)
u_old = u
random.seed(1)
Zu_sample = random.sample(np.hstack((Zu[:i], Zu[(i + 1):])), N)
Zu_sample.sort()
# position of Zu[i] in Zu_sample but reversed order
rank[k] = N - bisect.bisect(Zu_sample, Zu[i])
mpr_numerator += rt * rank[k] / float(N)
t1 = time.time()
print ('\t\tTesting MPR at ' + str(N) + ' on fold ' + str(fold + 1) +
': ' + str(t1 - t0) + 's')
if plot:
plot_rank(rank, N, **parameters)
return mpr_numerator, Rt.data.sum(), rank
# Join results of MPR for each fold and each value of N
def join_folds_multiN(results, num_folds, N_values, plot, parameters):
# results: result for each fold
# num_folds: number of data splits
# N_values: possible values for the length of the recommendation
# plot: should the rank be plotted?
# parameters: to further pass to plot
out_mpr = defaultdict()
out_rank = defaultdict()
for N in N_values:
# Initialize values
mpr_num = 0.
mpr_den = 0.
rank = []
print '\tJoining results of MPR at ' + str(N) + ' for each fold...'
for fold in range(num_folds):
mpr_num += results[fold][N]['mpr_num']
mpr_den += results[fold][N]['mpr_den']
rank += results[fold][N]['rank']
if plot:
plot_rank(rank, N, **parameters)
out_mpr[N] = mpr_num / mpr_den
out_rank[N] = rank
return out_mpr, out_rank
# Plot rank density and ecdf
def plot_rank(rank, N, alpha, beta, gamma, mu_user, mu_item, eta,
num_factors, num_iterations):
# rank: position of each element in the test set
# N: length of the recommendation
count, bins = np.histogram(rank, bins=100)
ecdf = np.cumsum(count) / float(np.sum(count))
fig, ax1 = plt.subplots()
plt.title('WeightedTagsMF test at Top' + str(N) + '\n' +
r'$\alpha = $' + str(alpha) + r', $\beta$ = ' + str(beta) +
', $\gamma$ = ' + str(gamma) + ', $\eta$ = ' + str(eta) +
', $\mu_{user}$ = ' + str(mu_user) + ', $\mu_{item}$ = ' +
str(mu_item) + '\nnum_factors = ' + str(num_factors) +
', num_iterations =' + str(num_iterations))
ax1.plot(bins[1:], count, label='count',
linestyle='-', color='b', linewidth=2)
ax1.set_xlabel('Rank')
ax1.set_ylabel('Density [count]')
ax1.set_ylim([0, max(count)])
ax1.legend(loc=2) # top left
ax2 = ax1.twinx()
ax2.plot(bins[1:], ecdf, label='ecdf',
linestyle='--', color='g', linewidth=2)
ax2.set_ylabel('Cumulative Distribution [%]')
ax2.set_ylim([0, 1])
ax2.legend(loc=1) # top right
plt.subplots_adjust(top=0.85)
plt.show()
# Go!
# Parameters for all experiments
param = {'alpha': [120.],
'beta': [120.],
'gamma': [120.],
'mu_user': [0.5],
'mu_item': [0.5],
'eta': [100., 1000.],
'num_factors': [10],
'num_iterations': [5]}
N_values = [100]
num_folds = 5
if len(sys.argv) > 1:
collection = sys.argv[1]
dataset = sys.argv[2]
else:
collection = 'dummy_collection'
dataset = 'dummy_dataset'
# Create all possible experiments
param_names = sorted(param)
experiments = [dict(zip(param_names, prod))
for prod in product(*(param[name] for name in param_names))]
num_experiments = len(experiments)
# Run all experiments
data = read_data(collection, dataset)
for k, experiment in enumerate(experiments):
print 'Experiment ' + str(k + 1) + ' out of ' + str(num_experiments)
t0 = time.time()
# Data for this experiment
data_folds = get_data(data['full_R'], num_folds, experiment['alpha'])
# data_folds = get_data(dataset, num_folds, experiment['alpha'])
metadata = get_metadata(data['U'], data['T'], experiment['beta'],
experiment['gamma'])
# Pool of workers for parallel num_folds-CV and
# special function callable through fun(all_param, looping_index)
pool = Pool(processes=num_folds)
run_folds = partial(run_this_fold, experiment, N_values, metadata)
# Parallel loop over the folds
results = pool.map(run_folds, list(enumerate(data_folds)))
pool.close()
pool.join()
# Join CV results and save this experiment's result
mpr, rank = join_folds_multiN(results, num_folds, N_values, False,
experiment)
# if we only want the mpr ...
experiments[k]['mpr'] = mpr
# if we want to save rank too we should do...
# this_experiment = {'mpr': mpr, 'rank': rank}
# experiments[k].update(this_experiment)
t1 = time.time()
print '\ttime elapsed in experiment ' + str(k + 1) + ': ' + str(t1 - t0)
# Save results in json format
print '\tSaving results to file...'
with open('WTMF_' + dataset + '.json', 'w') as WTMF_output:
json.dump(experiments, WTMF_output)
WTMF_output.close()
|
|
"""Offer reusable conditions."""
import asyncio
from collections import deque
from datetime import datetime, timedelta
import functools as ft
import logging
import re
import sys
from typing import Any, Callable, Container, List, Optional, Set, Union, cast
from homeassistant.components import zone as zone_cmp
from homeassistant.components.device_automation import (
async_get_device_automation_platform,
)
from homeassistant.const import (
ATTR_GPS_ACCURACY,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_ABOVE,
CONF_AFTER,
CONF_ATTRIBUTE,
CONF_BEFORE,
CONF_BELOW,
CONF_CONDITION,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_STATE,
CONF_VALUE_TEMPLATE,
CONF_WEEKDAY,
CONF_ZONE,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
SUN_EVENT_SUNRISE,
SUN_EVENT_SUNSET,
WEEKDAYS,
)
from homeassistant.core import HomeAssistant, State, callback
from homeassistant.exceptions import HomeAssistantError, TemplateError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.sun import get_astral_event_date
from homeassistant.helpers.template import Template
from homeassistant.helpers.typing import ConfigType, TemplateVarsType
from homeassistant.util.async_ import run_callback_threadsafe
import homeassistant.util.dt as dt_util
FROM_CONFIG_FORMAT = "{}_from_config"
ASYNC_FROM_CONFIG_FORMAT = "async_{}_from_config"
_LOGGER = logging.getLogger(__name__)
INPUT_ENTITY_ID = re.compile(
r"^input_(?:select|text|number|boolean|datetime)\.(?!.+__)(?!_)[\da-z_]+(?<!_)$"
)
ConditionCheckerType = Callable[[HomeAssistant, TemplateVarsType], bool]
async def async_from_config(
hass: HomeAssistant,
config: Union[ConfigType, Template],
config_validation: bool = True,
) -> ConditionCheckerType:
"""Turn a condition configuration into a method.
Should be run on the event loop.
"""
if isinstance(config, Template):
# We got a condition template, wrap it in a configuration to pass along.
config = {
CONF_CONDITION: "template",
CONF_VALUE_TEMPLATE: config,
}
condition = config.get(CONF_CONDITION)
for fmt in (ASYNC_FROM_CONFIG_FORMAT, FROM_CONFIG_FORMAT):
factory = getattr(sys.modules[__name__], fmt.format(condition), None)
if factory:
break
if factory is None:
raise HomeAssistantError(f'Invalid condition "{condition}" specified {config}')
# Check for partials to properly determine if coroutine function
check_factory = factory
while isinstance(check_factory, ft.partial):
check_factory = check_factory.func
if asyncio.iscoroutinefunction(check_factory):
return cast(
ConditionCheckerType, await factory(hass, config, config_validation)
)
return cast(ConditionCheckerType, factory(config, config_validation))
async def async_and_from_config(
hass: HomeAssistant, config: ConfigType, config_validation: bool = True
) -> ConditionCheckerType:
"""Create multi condition matcher using 'AND'."""
if config_validation:
config = cv.AND_CONDITION_SCHEMA(config)
checks = [
await async_from_config(hass, entry, False) for entry in config["conditions"]
]
def if_and_condition(
hass: HomeAssistant, variables: TemplateVarsType = None
) -> bool:
"""Test and condition."""
try:
for check in checks:
if not check(hass, variables):
return False
except Exception as ex: # pylint: disable=broad-except
_LOGGER.warning("Error during and-condition: %s", ex)
return False
return True
return if_and_condition
async def async_or_from_config(
hass: HomeAssistant, config: ConfigType, config_validation: bool = True
) -> ConditionCheckerType:
"""Create multi condition matcher using 'OR'."""
if config_validation:
config = cv.OR_CONDITION_SCHEMA(config)
checks = [
await async_from_config(hass, entry, False) for entry in config["conditions"]
]
def if_or_condition(
hass: HomeAssistant, variables: TemplateVarsType = None
) -> bool:
"""Test and condition."""
try:
for check in checks:
if check(hass, variables):
return True
except Exception as ex: # pylint: disable=broad-except
_LOGGER.warning("Error during or-condition: %s", ex)
return False
return if_or_condition
async def async_not_from_config(
hass: HomeAssistant, config: ConfigType, config_validation: bool = True
) -> ConditionCheckerType:
"""Create multi condition matcher using 'NOT'."""
if config_validation:
config = cv.NOT_CONDITION_SCHEMA(config)
checks = [
await async_from_config(hass, entry, False) for entry in config["conditions"]
]
def if_not_condition(
hass: HomeAssistant, variables: TemplateVarsType = None
) -> bool:
"""Test not condition."""
try:
for check in checks:
if check(hass, variables):
return False
except Exception as ex: # pylint: disable=broad-except
_LOGGER.warning("Error during not-condition: %s", ex)
return True
return if_not_condition
def numeric_state(
hass: HomeAssistant,
entity: Union[None, str, State],
below: Optional[Union[float, str]] = None,
above: Optional[Union[float, str]] = None,
value_template: Optional[Template] = None,
variables: TemplateVarsType = None,
) -> bool:
"""Test a numeric state condition."""
return run_callback_threadsafe(
hass.loop,
async_numeric_state,
hass,
entity,
below,
above,
value_template,
variables,
).result()
def async_numeric_state(
hass: HomeAssistant,
entity: Union[None, str, State],
below: Optional[Union[float, str]] = None,
above: Optional[Union[float, str]] = None,
value_template: Optional[Template] = None,
variables: TemplateVarsType = None,
attribute: Optional[str] = None,
) -> bool:
"""Test a numeric state condition."""
if isinstance(entity, str):
entity = hass.states.get(entity)
if entity is None or (attribute is not None and attribute not in entity.attributes):
return False
value: Any = None
if value_template is None:
if attribute is None:
value = entity.state
else:
value = entity.attributes.get(attribute)
else:
variables = dict(variables or {})
variables["state"] = entity
try:
value = value_template.async_render(variables)
except TemplateError as ex:
_LOGGER.error("Template error: %s", ex)
return False
if value in (STATE_UNAVAILABLE, STATE_UNKNOWN):
return False
try:
fvalue = float(value)
except ValueError:
_LOGGER.warning(
"Value cannot be processed as a number: %s (Offending entity: %s)",
entity,
value,
)
return False
if below is not None:
if isinstance(below, str):
below_entity = hass.states.get(below)
if (
not below_entity
or below_entity.state in (STATE_UNAVAILABLE, STATE_UNKNOWN)
or fvalue >= float(below_entity.state)
):
return False
elif fvalue >= below:
return False
if above is not None:
if isinstance(above, str):
above_entity = hass.states.get(above)
if (
not above_entity
or above_entity.state in (STATE_UNAVAILABLE, STATE_UNKNOWN)
or fvalue <= float(above_entity.state)
):
return False
elif fvalue <= above:
return False
return True
def async_numeric_state_from_config(
config: ConfigType, config_validation: bool = True
) -> ConditionCheckerType:
"""Wrap action method with state based condition."""
if config_validation:
config = cv.NUMERIC_STATE_CONDITION_SCHEMA(config)
entity_ids = config.get(CONF_ENTITY_ID, [])
attribute = config.get(CONF_ATTRIBUTE)
below = config.get(CONF_BELOW)
above = config.get(CONF_ABOVE)
value_template = config.get(CONF_VALUE_TEMPLATE)
def if_numeric_state(
hass: HomeAssistant, variables: TemplateVarsType = None
) -> bool:
"""Test numeric state condition."""
if value_template is not None:
value_template.hass = hass
return all(
async_numeric_state(
hass, entity_id, below, above, value_template, variables, attribute
)
for entity_id in entity_ids
)
return if_numeric_state
def state(
hass: HomeAssistant,
entity: Union[None, str, State],
req_state: Any,
for_period: Optional[timedelta] = None,
attribute: Optional[str] = None,
) -> bool:
"""Test if state matches requirements.
Async friendly.
"""
if isinstance(entity, str):
entity = hass.states.get(entity)
if entity is None or (attribute is not None and attribute not in entity.attributes):
return False
assert isinstance(entity, State)
if attribute is None:
value: Any = entity.state
else:
value = entity.attributes.get(attribute)
if not isinstance(req_state, list):
req_state = [req_state]
is_state = False
for req_state_value in req_state:
state_value = req_state_value
if (
isinstance(req_state_value, str)
and INPUT_ENTITY_ID.match(req_state_value) is not None
):
state_entity = hass.states.get(req_state_value)
if not state_entity:
continue
state_value = state_entity.state
is_state = value == state_value
if is_state:
break
if for_period is None or not is_state:
return is_state
return dt_util.utcnow() - for_period > entity.last_changed
def state_from_config(
config: ConfigType, config_validation: bool = True
) -> ConditionCheckerType:
"""Wrap action method with state based condition."""
if config_validation:
config = cv.STATE_CONDITION_SCHEMA(config)
entity_ids = config.get(CONF_ENTITY_ID, [])
req_states: Union[str, List[str]] = config.get(CONF_STATE, [])
for_period = config.get("for")
attribute = config.get(CONF_ATTRIBUTE)
if not isinstance(req_states, list):
req_states = [req_states]
def if_state(hass: HomeAssistant, variables: TemplateVarsType = None) -> bool:
"""Test if condition."""
return all(
state(hass, entity_id, req_states, for_period, attribute)
for entity_id in entity_ids
)
return if_state
def sun(
hass: HomeAssistant,
before: Optional[str] = None,
after: Optional[str] = None,
before_offset: Optional[timedelta] = None,
after_offset: Optional[timedelta] = None,
) -> bool:
"""Test if current time matches sun requirements."""
utcnow = dt_util.utcnow()
today = dt_util.as_local(utcnow).date()
before_offset = before_offset or timedelta(0)
after_offset = after_offset or timedelta(0)
sunrise_today = get_astral_event_date(hass, SUN_EVENT_SUNRISE, today)
sunset_today = get_astral_event_date(hass, SUN_EVENT_SUNSET, today)
sunrise = sunrise_today
sunset = sunset_today
if today > dt_util.as_local(
cast(datetime, sunrise_today)
).date() and SUN_EVENT_SUNRISE in (before, after):
tomorrow = dt_util.as_local(utcnow + timedelta(days=1)).date()
sunrise_tomorrow = get_astral_event_date(hass, SUN_EVENT_SUNRISE, tomorrow)
sunrise = sunrise_tomorrow
if today > dt_util.as_local(
cast(datetime, sunset_today)
).date() and SUN_EVENT_SUNSET in (before, after):
tomorrow = dt_util.as_local(utcnow + timedelta(days=1)).date()
sunset_tomorrow = get_astral_event_date(hass, SUN_EVENT_SUNSET, tomorrow)
sunset = sunset_tomorrow
if sunrise is None and SUN_EVENT_SUNRISE in (before, after):
# There is no sunrise today
return False
if sunset is None and SUN_EVENT_SUNSET in (before, after):
# There is no sunset today
return False
if before == SUN_EVENT_SUNRISE and utcnow > cast(datetime, sunrise) + before_offset:
return False
if before == SUN_EVENT_SUNSET and utcnow > cast(datetime, sunset) + before_offset:
return False
if after == SUN_EVENT_SUNRISE and utcnow < cast(datetime, sunrise) + after_offset:
return False
if after == SUN_EVENT_SUNSET and utcnow < cast(datetime, sunset) + after_offset:
return False
return True
def sun_from_config(
config: ConfigType, config_validation: bool = True
) -> ConditionCheckerType:
"""Wrap action method with sun based condition."""
if config_validation:
config = cv.SUN_CONDITION_SCHEMA(config)
before = config.get("before")
after = config.get("after")
before_offset = config.get("before_offset")
after_offset = config.get("after_offset")
def time_if(hass: HomeAssistant, variables: TemplateVarsType = None) -> bool:
"""Validate time based if-condition."""
return sun(hass, before, after, before_offset, after_offset)
return time_if
def template(
hass: HomeAssistant, value_template: Template, variables: TemplateVarsType = None
) -> bool:
"""Test if template condition matches."""
return run_callback_threadsafe(
hass.loop, async_template, hass, value_template, variables
).result()
def async_template(
hass: HomeAssistant, value_template: Template, variables: TemplateVarsType = None
) -> bool:
"""Test if template condition matches."""
try:
value: str = value_template.async_render(variables, parse_result=False)
except TemplateError as ex:
_LOGGER.error("Error during template condition: %s", ex)
return False
return value.lower() == "true"
def async_template_from_config(
config: ConfigType, config_validation: bool = True
) -> ConditionCheckerType:
"""Wrap action method with state based condition."""
if config_validation:
config = cv.TEMPLATE_CONDITION_SCHEMA(config)
value_template = cast(Template, config.get(CONF_VALUE_TEMPLATE))
def template_if(hass: HomeAssistant, variables: TemplateVarsType = None) -> bool:
"""Validate template based if-condition."""
value_template.hass = hass
return async_template(hass, value_template, variables)
return template_if
def time(
hass: HomeAssistant,
before: Optional[Union[dt_util.dt.time, str]] = None,
after: Optional[Union[dt_util.dt.time, str]] = None,
weekday: Union[None, str, Container[str]] = None,
) -> bool:
"""Test if local time condition matches.
Handle the fact that time is continuous and we may be testing for
a period that crosses midnight. In that case it is easier to test
for the opposite. "(23:59 <= now < 00:01)" would be the same as
"not (00:01 <= now < 23:59)".
"""
now = dt_util.now()
now_time = now.time()
if after is None:
after = dt_util.dt.time(0)
elif isinstance(after, str):
after_entity = hass.states.get(after)
if not after_entity:
return False
after = dt_util.dt.time(
after_entity.attributes.get("hour", 23),
after_entity.attributes.get("minute", 59),
after_entity.attributes.get("second", 59),
)
if before is None:
before = dt_util.dt.time(23, 59, 59, 999999)
elif isinstance(before, str):
before_entity = hass.states.get(before)
if not before_entity:
return False
before = dt_util.dt.time(
before_entity.attributes.get("hour", 23),
before_entity.attributes.get("minute", 59),
before_entity.attributes.get("second", 59),
999999,
)
if after < before:
if not after <= now_time < before:
return False
else:
if before <= now_time < after:
return False
if weekday is not None:
now_weekday = WEEKDAYS[now.weekday()]
if (
isinstance(weekday, str)
and weekday != now_weekday
or now_weekday not in weekday
):
return False
return True
def time_from_config(
config: ConfigType, config_validation: bool = True
) -> ConditionCheckerType:
"""Wrap action method with time based condition."""
if config_validation:
config = cv.TIME_CONDITION_SCHEMA(config)
before = config.get(CONF_BEFORE)
after = config.get(CONF_AFTER)
weekday = config.get(CONF_WEEKDAY)
def time_if(hass: HomeAssistant, variables: TemplateVarsType = None) -> bool:
"""Validate time based if-condition."""
return time(hass, before, after, weekday)
return time_if
def zone(
hass: HomeAssistant,
zone_ent: Union[None, str, State],
entity: Union[None, str, State],
) -> bool:
"""Test if zone-condition matches.
Async friendly.
"""
if isinstance(zone_ent, str):
zone_ent = hass.states.get(zone_ent)
if zone_ent is None:
return False
if isinstance(entity, str):
entity = hass.states.get(entity)
if entity is None:
return False
latitude = entity.attributes.get(ATTR_LATITUDE)
longitude = entity.attributes.get(ATTR_LONGITUDE)
if latitude is None or longitude is None:
return False
return zone_cmp.in_zone(
zone_ent, latitude, longitude, entity.attributes.get(ATTR_GPS_ACCURACY, 0)
)
def zone_from_config(
config: ConfigType, config_validation: bool = True
) -> ConditionCheckerType:
"""Wrap action method with zone based condition."""
if config_validation:
config = cv.ZONE_CONDITION_SCHEMA(config)
entity_ids = config.get(CONF_ENTITY_ID, [])
zone_entity_ids = config.get(CONF_ZONE, [])
def if_in_zone(hass: HomeAssistant, variables: TemplateVarsType = None) -> bool:
"""Test if condition."""
return all(
any(
zone(hass, zone_entity_id, entity_id)
for zone_entity_id in zone_entity_ids
)
for entity_id in entity_ids
)
return if_in_zone
async def async_device_from_config(
hass: HomeAssistant, config: ConfigType, config_validation: bool = True
) -> ConditionCheckerType:
"""Test a device condition."""
if config_validation:
config = cv.DEVICE_CONDITION_SCHEMA(config)
platform = await async_get_device_automation_platform(
hass, config[CONF_DOMAIN], "condition"
)
return cast(
ConditionCheckerType,
platform.async_condition_from_config(config, config_validation), # type: ignore
)
async def async_validate_condition_config(
hass: HomeAssistant, config: Union[ConfigType, Template]
) -> Union[ConfigType, Template]:
"""Validate config."""
if isinstance(config, Template):
return config
condition = config[CONF_CONDITION]
if condition in ("and", "not", "or"):
conditions = []
for sub_cond in config["conditions"]:
sub_cond = await async_validate_condition_config(hass, sub_cond)
conditions.append(sub_cond)
config["conditions"] = conditions
if condition == "device":
config = cv.DEVICE_CONDITION_SCHEMA(config)
assert not isinstance(config, Template)
platform = await async_get_device_automation_platform(
hass, config[CONF_DOMAIN], "condition"
)
return cast(ConfigType, platform.CONDITION_SCHEMA(config)) # type: ignore
return config
@callback
def async_extract_entities(config: Union[ConfigType, Template]) -> Set[str]:
"""Extract entities from a condition."""
referenced: Set[str] = set()
to_process = deque([config])
while to_process:
config = to_process.popleft()
if isinstance(config, Template):
continue
condition = config[CONF_CONDITION]
if condition in ("and", "not", "or"):
to_process.extend(config["conditions"])
continue
entity_ids = config.get(CONF_ENTITY_ID)
if isinstance(entity_ids, str):
entity_ids = [entity_ids]
if entity_ids is not None:
referenced.update(entity_ids)
return referenced
@callback
def async_extract_devices(config: Union[ConfigType, Template]) -> Set[str]:
"""Extract devices from a condition."""
referenced = set()
to_process = deque([config])
while to_process:
config = to_process.popleft()
if isinstance(config, Template):
continue
condition = config[CONF_CONDITION]
if condition in ("and", "not", "or"):
to_process.extend(config["conditions"])
continue
if condition != "device":
continue
device_id = config.get(CONF_DEVICE_ID)
if device_id is not None:
referenced.add(device_id)
return referenced
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.variables_helper."""
import os
import tensorflow as tf
from object_detection.utils import variables_helper
class FilterVariablesTest(tf.test.TestCase):
def _create_variables(self):
return [tf.Variable(1.0, name='FeatureExtractor/InceptionV3/weights'),
tf.Variable(1.0, name='FeatureExtractor/InceptionV3/biases'),
tf.Variable(1.0, name='StackProposalGenerator/weights'),
tf.Variable(1.0, name='StackProposalGenerator/biases')]
def test_return_all_variables_when_empty_regex(self):
variables = self._create_variables()
out_variables = variables_helper.filter_variables(variables, [''])
self.assertItemsEqual(out_variables, variables)
def test_return_variables_which_do_not_match_single_regex(self):
variables = self._create_variables()
out_variables = variables_helper.filter_variables(variables,
['FeatureExtractor/.*'])
self.assertItemsEqual(out_variables, variables[2:])
def test_return_variables_which_do_not_match_any_regex_in_list(self):
variables = self._create_variables()
out_variables = variables_helper.filter_variables(variables, [
'FeatureExtractor.*biases', 'StackProposalGenerator.*biases'
])
self.assertItemsEqual(out_variables, [variables[0], variables[2]])
def test_return_variables_matching_empty_regex_list(self):
variables = self._create_variables()
out_variables = variables_helper.filter_variables(
variables, [''], invert=True)
self.assertItemsEqual(out_variables, [])
def test_return_variables_matching_some_regex_in_list(self):
variables = self._create_variables()
out_variables = variables_helper.filter_variables(
variables,
['FeatureExtractor.*biases', 'StackProposalGenerator.*biases'],
invert=True)
self.assertItemsEqual(out_variables, [variables[1], variables[3]])
class MultiplyGradientsMatchingRegexTest(tf.test.TestCase):
def _create_grads_and_vars(self):
return [(tf.constant(1.0),
tf.Variable(1.0, name='FeatureExtractor/InceptionV3/weights')),
(tf.constant(2.0),
tf.Variable(2.0, name='FeatureExtractor/InceptionV3/biases')),
(tf.constant(3.0),
tf.Variable(3.0, name='StackProposalGenerator/weights')),
(tf.constant(4.0),
tf.Variable(4.0, name='StackProposalGenerator/biases'))]
def test_multiply_all_feature_extractor_variables(self):
grads_and_vars = self._create_grads_and_vars()
regex_list = ['FeatureExtractor/.*']
multiplier = 0.0
grads_and_vars = variables_helper.multiply_gradients_matching_regex(
grads_and_vars, regex_list, multiplier)
exp_output = [(0.0, 1.0), (0.0, 2.0), (3.0, 3.0), (4.0, 4.0)]
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
output = sess.run(grads_and_vars)
self.assertItemsEqual(output, exp_output)
def test_multiply_all_bias_variables(self):
grads_and_vars = self._create_grads_and_vars()
regex_list = ['.*/biases']
multiplier = 0.0
grads_and_vars = variables_helper.multiply_gradients_matching_regex(
grads_and_vars, regex_list, multiplier)
exp_output = [(1.0, 1.0), (0.0, 2.0), (3.0, 3.0), (0.0, 4.0)]
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
output = sess.run(grads_and_vars)
self.assertItemsEqual(output, exp_output)
class FreezeGradientsMatchingRegexTest(tf.test.TestCase):
def _create_grads_and_vars(self):
return [(tf.constant(1.0),
tf.Variable(1.0, name='FeatureExtractor/InceptionV3/weights')),
(tf.constant(2.0),
tf.Variable(2.0, name='FeatureExtractor/InceptionV3/biases')),
(tf.constant(3.0),
tf.Variable(3.0, name='StackProposalGenerator/weights')),
(tf.constant(4.0),
tf.Variable(4.0, name='StackProposalGenerator/biases'))]
def test_freeze_all_feature_extractor_variables(self):
grads_and_vars = self._create_grads_and_vars()
regex_list = ['FeatureExtractor/.*']
grads_and_vars = variables_helper.freeze_gradients_matching_regex(
grads_and_vars, regex_list)
exp_output = [(3.0, 3.0), (4.0, 4.0)]
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
output = sess.run(grads_and_vars)
self.assertItemsEqual(output, exp_output)
class GetVariablesAvailableInCheckpointTest(tf.test.TestCase):
def test_return_all_variables_from_checkpoint(self):
variables = [
tf.Variable(1.0, name='weights'),
tf.Variable(1.0, name='biases')
]
checkpoint_path = os.path.join(self.get_temp_dir(), 'graph.pb')
init_op = tf.global_variables_initializer()
saver = tf.train.Saver(variables)
with self.test_session() as sess:
sess.run(init_op)
saver.save(sess, checkpoint_path)
out_variables = variables_helper.get_variables_available_in_checkpoint(
variables, checkpoint_path)
self.assertItemsEqual(out_variables, variables)
def test_return_variables_available_in_checkpoint(self):
checkpoint_path = os.path.join(self.get_temp_dir(), 'graph.pb')
weight_variable = tf.Variable(1.0, name='weights')
global_step = tf.train.get_or_create_global_step()
graph1_variables = [
weight_variable,
global_step
]
init_op = tf.global_variables_initializer()
saver = tf.train.Saver(graph1_variables)
with self.test_session() as sess:
sess.run(init_op)
saver.save(sess, checkpoint_path)
graph2_variables = graph1_variables + [tf.Variable(1.0, name='biases')]
out_variables = variables_helper.get_variables_available_in_checkpoint(
graph2_variables, checkpoint_path, include_global_step=False)
self.assertItemsEqual(out_variables, [weight_variable])
def test_return_variables_available_an_checkpoint_with_dict_inputs(self):
checkpoint_path = os.path.join(self.get_temp_dir(), 'graph.pb')
graph1_variables = [
tf.Variable(1.0, name='ckpt_weights'),
]
init_op = tf.global_variables_initializer()
saver = tf.train.Saver(graph1_variables)
with self.test_session() as sess:
sess.run(init_op)
saver.save(sess, checkpoint_path)
graph2_variables_dict = {
'ckpt_weights': tf.Variable(1.0, name='weights'),
'ckpt_biases': tf.Variable(1.0, name='biases')
}
out_variables = variables_helper.get_variables_available_in_checkpoint(
graph2_variables_dict, checkpoint_path)
self.assertTrue(isinstance(out_variables, dict))
self.assertItemsEqual(out_variables.keys(), ['ckpt_weights'])
self.assertTrue(out_variables['ckpt_weights'].op.name == 'weights')
def test_return_variables_with_correct_sizes(self):
checkpoint_path = os.path.join(self.get_temp_dir(), 'graph.pb')
bias_variable = tf.Variable(3.0, name='biases')
global_step = tf.train.get_or_create_global_step()
graph1_variables = [
tf.Variable([[1.0, 2.0], [3.0, 4.0]], name='weights'),
bias_variable,
global_step
]
init_op = tf.global_variables_initializer()
saver = tf.train.Saver(graph1_variables)
with self.test_session() as sess:
sess.run(init_op)
saver.save(sess, checkpoint_path)
graph2_variables = [
tf.Variable([1.0, 2.0], name='weights'), # Note the new variable shape.
bias_variable,
global_step
]
out_variables = variables_helper.get_variables_available_in_checkpoint(
graph2_variables, checkpoint_path, include_global_step=True)
self.assertItemsEqual(out_variables, [bias_variable, global_step])
if __name__ == '__main__':
tf.test.main()
|
|
import sublime
import sublime_plugin
import re
import os
import codecs
# import sys
# import platform
# from .sublime_helper import *
try:
from . import scopes
except (ImportError, ValueError):
import scopes
try:
from .sublime_helper import SublimeHelper
except (ImportError, ValueError):
from sublime_helper import SublimeHelper
fountain_scope = scopes.fountain_scope
action_scope = scopes.action_scope
boneyard_scope = scopes.boneyard_scope
dialogue_scope = scopes.dialogue_scope
lyrics_scope = scopes.lyrics_scope
character_scope = scopes.character_scope
parenthetical_scope = scopes.parenthetical_scope
note_scope = scopes.note_scope
scene_scope = scopes.scene_scope
character_list_scope = scopes.character_list_scope
section_scope = scopes.section_scope
synopses_scope = scopes.synopses_scope
pagebreak_scope = scopes.pagebreak_scope
title_page_scope = scopes.title_page_scope
center_scope = scopes.center_scope
transition_scope = scopes.transition_scope
user = ''
# user_os = platform.system()
class Characters(sublime_plugin.EventListener):
characters = []
person = ''
# lower_characters = []
# camel_characters = []
current_character = ''
previous_line = 0
current_line = 0
filename = ''
def modified_character(self, view):
if view.settings().get('syntax') == 'Packages/Fountainhead/Fountainhead.tmLanguage':
# if 'Fountainhead.tmLanguage' in view.settings().get('syntax'):
# if sublime.load_settings('Fountainhead.sublime-settings').get('characters', True):
if view.settings().get('characters', True):
if self.characters == []:
self.on_activated(view)
view.set_status('CharacterList', '')
if view.rowcol(view.sel()[0].end())[0] != self.current_line:
self.previous_line = self.current_line
self.current_line = view.rowcol(view.sel()[0].end())[0]
# if view.scope_name(view.text_point(self.previous_line, 0)) == 'text.fountain dialogue entity.name.class ':
# if view.scope_name(view.text_point(self.previous_line, 0)) == 'text.fountain dialogue entity.name.class ':
if view.scope_name(view.text_point(self.previous_line, 0)) == fountain_scope + dialogue_scope + character_scope:
# get character name from line
s = SublimeHelper()
self.current_character = view.substr(view.line(view.text_point(self.previous_line, 0)))
character = s.line_string(view)
name = self.current_character.split(' (O.S.)')[0]
name = name.split(' (V.O.)')[0]
name = name.split(' (OS)')[0]
name = name.split(' (VO)')[0]
name = name.split(" (CONT'D)")[0]
if name[0] == ' ' or name[0] == '\t':
name = re.split(r'^\s*', name)[1]
if name not in self.characters and name != '' and name is not None:
self.characters.append(name)
self.characters = sorted(self.characters)
ShowCharactersCommand.characters = self.characters
# Create Fountainhead directory if it doesn't exist
packages_directory = sublime.packages_path() + '/User/Fountainhead/'
if not os.path.exists(packages_directory):
os.mkdir(packages_directory)
completions_file = packages_directory + 'Characters.sublime-completions'
# if user_os == 'Windows':
# print("Sorry, not supported at this time.")
# elif user_os == 'Darwin':
completions = codecs.open(completions_file, 'w', 'utf8')
# completions.write('{\n\t\t"scope": "text.fountain - comment - string - dialogue - entity.other.attribute-name - entity.other.inherited-class - foreground - meta.diff - entity.name.function - entity.name.tag - entity.name.class - variable.parameter",\n\n\t\t"completions":\n\t\t[')
completions.write('{\n\t\t"scope": ' + '"' + fountain_scope + '- ' + boneyard_scope + '- ' + action_scope + '- ' + dialogue_scope + '- ' + lyrics_scope + '- ' + character_scope + '- ' + parenthetical_scope + '- ' + note_scope + '- ' + scene_scope + '- ' + section_scope + '- ' + synopses_scope + '- ' + pagebreak_scope + '- ' + title_page_scope + '- ' + center_scope + '- ' + transition_scope[0:-1] + '",\n\n\t\t"completions":\n\t\t[')
length = len(self.characters)
character_counter = 0
for character in self.characters:
if character_counter < length - 1:
completions.write('"%s",' % character)
character_counter += 1
else:
completions.write('"%s"' % character)
completions.write(']\n}')
completions.close()
# Not needed since characters are no longer converted to lowercase
# if name[0] != '@':
# if name.lower() not in self.lower_characters:
# self.lower_characters.append(name.lower())
# self.lower_characters = sorted(self.lower_characters)
# completions = codecs.open(completions_file, 'w', 'utf8')
# completions.write('{\n\t\t"scope": "text.fountain - comment - string - entity.other.attribute-name - entity.other.inherited-class - foreground - meta.diff - entity.name.function - entity.name.tag - entity.name.class - variable.parameter",\n\n\t\t"completions":\n\t\t[')
# length = len(self.lower_characters)
# character_counter = 0
# for character in self.lower_characters:
# if character_counter < length - 1:
# completions.write('"%s",' % character)
# character_counter += 1
# else:
# completions.write('"%s"' % character)
# completions.write(']\n}')
# completions.close()
# elif name[0] == '@':
# if name not in self.lower_characters:
# self.lower_characters.append(name)
# self.lower_characters = sorted(self.lower_characters)
# completions = codecs.open(completions_file, 'w', 'utf8')
# completions.write('{\n\t\t"scope": "text.fountain - comment - string - entity.other.attribute-name - entity.other.inherited-class - foreground - meta.diff - entity.name.function - entity.name.tag - entity.name.class - variable.parameter",\n\n\t\t"completions":\n\t\t[')
# length = len(self.lower_characters)
# character_counter = 0
# for character in self.lower_characters:
# if character_counter < length - 1:
# completions.write('"%s",' % character)
# character_counter += 1
# else:
# completions.write('"%s"' % character)
# completions.write(']\n}')
# completions.close()
# Clear out character list message
view.set_status('CharacterList',
'')
def on_modified_async(self, view):
if int(sublime.version()) >= 3000:
self.modified_character(view)
def on_modified(self, view):
if int(sublime.version()) < 3000:
self.modified_character(view)
def on_activated(self, view):
if view.settings().get('syntax') == 'Packages/Fountainhead/Fountainhead.tmLanguage':
# s = view.settings().get('syntax')
# while s is None:
# s = view.settings().get('syntax')
# if 'Fountainhead.tmLanguage' in s:
# if sublime.load_settings('Fountainhead.sublime-settings').get('characters', True):
if view.settings().get('characters', True):
if self.filename == view.file_name() and len(self.characters) > 0:
pass
# print(view.file_name())
else:
view.set_status('CharacterList',
'FINDING CHARACTERS...')
self.characters = []
# self.lower_characters = []
counter = 0
self.filename = view.file_name()
try:
while counter >= 0:
# character = view.substr(view.find_by_selector('text.fountain dialogue entity.name.class ')[counter])
character = view.substr(view.find_by_selector(fountain_scope + dialogue_scope + character_scope)[counter])
name = character.split(' (O.S.)')[0]
name = name.split(' (V.O.)')[0]
name = name.split(' (OS)')[0]
name = name.split(' (VO)')[0]
name = name.split(" (CONT'D)")[0]
if name[0] == ' ' or name[0] == '\t':
name = (re.split(r'^\s*', name))[1]
if name not in self.characters and name != '' and name is not None:
self.characters.append(name)
counter += 1
except IndexError:
pass
# for character in self.characters:
# if character[0] != '@':
# self.lower_characters.append(character.lower())
# if character[0] == '@':
# self.lower_characters.append(character)
# self.lower_characters = sorted(self.lower_characters)
self.characters = sorted(self.characters)
# proc_env = os.environ.copy()
# encoding = sys.getfilesystemencoding()
# for k, v in proc_env.items():
# proc_env[k] = os.path.expandvars(v).encode(encoding)
# user = (proc_env['HOME']).decode(encoding='UTF-8')
# completions = open(user + '/Library/Application Support/Sublime Text 3/Packages/Fountainhead/Characters.sublime-completions', 'w')
# packages_directory = sublime.packages_path()
# completions_file = packages_directory + '/Fountainhead/Characters.sublime-completions'
# Create Fountainhead directory if it doesn't exist
packages_directory = sublime.packages_path() + '/User/Fountainhead/'
if not os.path.exists(packages_directory):
os.mkdir(packages_directory)
completions_file = packages_directory + 'Characters.sublime-completions'
completions = codecs.open(completions_file, 'w', 'utf8')
# completions.write('{\n\t\t"scope": "text.fountain - comment - string - dialogue - entity.other.attribute-name - entity.other.inherited-class - foreground - meta.diff - entity.name.function - entity.name.tag - entity.name.class - variable.parameter",\n\n\t\t"completions":\n\t\t[')
completions.write('{\n\t\t"scope": ' + '"' + fountain_scope + '- ' + boneyard_scope + '- ' + action_scope + '- ' + dialogue_scope + '- ' + lyrics_scope + '- ' + character_scope + '- ' + parenthetical_scope + '- ' + note_scope + '- ' + scene_scope + '- ' + section_scope + '- ' + synopses_scope + '- ' + pagebreak_scope + '- ' + title_page_scope + '- ' + center_scope + '- ' + transition_scope[0:-1] + '",\n\n\t\t"completions":\n\t\t[')
# length = len(self.lower_characters)
length = len(self.characters)
character_counter = 0
# for character in self.lower_characters:
for character in self.characters:
if character_counter < length - 1:
completions.write('"%s",' % character)
character_counter += 1
else:
completions.write('"%s"' % character)
completions.write(']\n}')
completions.close()
# Print confirmation message
view.set_status('CharacterList',
'CHARACTERS FOUND!')
# ShowCharactersCommand.unsorted_characters = self.characters
ShowCharactersCommand.characters = self.characters
class UpdateCharacterListCommand(sublime_plugin.TextCommand):
characters = []
filename = ''
def run(self, edit):
self.characters = []
c = Characters()
c.on_activated(self.view)
class ShowCharactersCommand(sublime_plugin.TextCommand):
person = ''
# unsorted_characters = []
# sorted_characters = []
characters = []
def run(self, edit):
# if sublime.load_settings('Fountainhead.sublime-settings').get('characters', True) and int(sublime.version()) >= 3000:
if self.view.settings().get('characters', True) and int(sublime.version()) >= 3000:
# self.sorted_characters = sorted(self.unsorted_characters)
# self.view.show_popup_menu(self.sorted_characters, self.on_done)
self.view.show_popup_menu(self.characters, self.on_done)
self.view.run_command('insert', {"characters": self.person})
def on_done(self, index):
if index == -1:
self.person = ''
else:
# self.person = self.sorted_characters[index]
self.person = self.characters[index]
|
|
__author__ = 'thomas'
from jinja2 import nodes
from jinja2.ext import Extension
import inspect
class Property(object):
def __init__(self, type=None, template=None, css_classes="", label=None):
self.type = type
self.template = template
self.css_classes = css_classes
self.label = label
class ViewModelMeta(type):
def __new__(meta, class_name, bases, new_attrs):
cls = type.__new__(meta, class_name, bases, new_attrs)
cls._properties = {}
for name, prop in new_attrs.items():
if isinstance(prop, Property):
prop.name = name
cls._properties[name] = prop
return cls
class ViewModel(object):
__metaclass__ = ViewModelMeta
includes = []
excludes = []
def __init__(self, obj_type=None):
self.type = obj_type
self.all_models = None
print self.specified_properties
@property
def specified_properties(self):
return set(self.includes + [name for name in self._properties])
def get_property(self, property_name):
if property_name in self._properties:
return self._properties[property_name]
else:
prop = Property(property_name)
prop.name = property_name
return prop
class PropertiesList(object):
def __init__(self, view_model, obj):
self.view_model = view_model
self.obj = obj
def __iter__(self):
if self.view_model.includes:
return iter(self.view_model.specified_properties)
return iter(self.get_all_public_not_excluded_members())
def get_all_public_not_excluded_members(self):
"""
TODO this should handle dictionaries too.
lists and strings have no members that we care about so return an empty list for them.
:param obj:
:return:
"""
if isinstance(self.obj, dict):
return [key for key in self.obj if key not in self.view_model.excludes]
try:
iter(self.obj)
return []
except TypeError:
return [member[0] for member in inspect.getmembers(self.obj)
if not member[0].startswith('_')
and member[0] not in self.view_model.excludes]
class PropertyDetails(object):
def __init__(self, name, property_definition, property_type, property_value):
self.name = name
self.property_definition = property_definition
self.property_type = property_type
self.property_value = property_value
self.properties = PropertiesList(property_type, property_value)
class DisplayModels(object):
def __init__(self):
self.models = {} # {string: ViewModel}
def add_model(self, type, model):
self.models[type] = model
model.all_models = self
def get_model_for_type(self, obj_type):
"""
TODO: Should this also be able to take in an actual type?
:param obj_type:
:return:
"""
if obj_type in self.models:
return self.models[obj_type]()
return ViewModel()
def get_property_details(self, view_model, property_name, property_value):
prop = view_model.get_property(property_name)
property_type = self.get_model_for_type(prop.type)
#
if not property_type.type:
property_type.type = self.get_model_for_type(property_value.__class__.__name__)
return PropertyDetails(property_name, prop, property_type, property_value)
class DisplayExtension(Extension):
tags = set(['display'])
default_template_path = 'display'
def __init__(self, environment):
environment.extend(
display_template_path=self.default_template_path,
display_template_extension='html',
display_models=DisplayModels()
)
self.format_string = "{path}/{template_name}.{extension}"
super(DisplayExtension, self).__init__(environment)
def parse(self, parser):
lineno = parser.stream.next().lineno
obj = self.parse_object(parser)
as_model = self.parse_as(parser)
includes, excludes = self.parse_includes(parser)
property_name = self.get_property_name(obj)
current_property = self.get_current_property()
property_details = self.get_property_details(obj, as_model, property_name, includes, excludes)
properties = self.get_properties(obj, property_details)
assignments = [
self.assign_obj(obj, property_details, lineno),
self.assign_property_details(obj, property_details, lineno),
self.assign_properties(obj, properties, lineno),
]
template = self.include_template(obj, property_details)
return self.create_node(assignments, template)
def parse_object(self, parser):
return parser.parse_expression()
def parse_as(self, parser):
if parser.stream.next_if('name:as'):
return parser.parse_expression()
return nodes.Const(None)
def parse_includes(self, parser):
if parser.stream.next_if('name:include'):
includes = parser.parse_expression()
excludes = nodes.Const(None)
elif parser.stream.next_if('name:exclude'):
includes = nodes.Const(None)
excludes = parser.parse_expression()
return includes, excludes
def get_property_name(self, obj):
property_stack = self.get_property_stack(obj)
if property_stack:
return nodes.List(property_stack)
return nodes.Const(None)
def get_current_property(self):
return nodes.Name('property', 'load')
def get_property_details(self, obj, current_property, property_name):
return self.call_method('call_get_property_details', args=[obj, current_property, property_name])
def call_get_property_details(self, obj, current_property, property_stack, as_model, includes=None, excludes=None):
print(obj)
print(property_stack)
includes = make_sure_is_list(includes)
excludes = make_sure_is_list(excludes)
if not current_property:
view_model = self.environment.display_models.get_model_for_type(obj.__class__.__name__)
else:
view_model = current_property.view_model
for property_name in property_stack:
current_obj = self.environment.getattr(current_obj, property_name)
property_details = self.environment.display_models.get_property_details(view_model, property_name, current_obj)
view_model = property_details.view_model
if as_model:
property_details.view_model = self.environment.display_models.get_model_for_type(as_model)
return property_details
new_property = self.environment.display_models.get_property_details(view_model, property_name, obj)
return PropertyDetails(property_name, include=includes, exclude=excludes)
def assign_obj(self, obj, property_details, lineno):
return nodes.Assign(nodes.Name('obj', 'store', lineno=lineno),
self.call_method('call_assign_obj', args=[obj, property_details]), lineno=lineno)
def call_assign_obj(self, obj, property_details):
return property_details.format_value(obj)
def assign_property_details(self, property_details, lineno):
return nodes.Assign(nodes.Name('property', 'store', lineno=lineno), property_details, lineno=lineno)
def assign_properties(self, lineno):
return nodes.Assign(nodes.Name('properties', 'store'),
nodes.Getattr(nodes.Name('property', 'store'), 'properties'),
lineno=lineno)
def include_template(self, obj, property_details):
return nodes.Include(self.call_method('call_get_template_list', args=[obj, property_details]), True, False)
def call_get_template_list(self, obj, property_details):
templates = []
if property_details and property_details.template:
templates.append(self.get_template_path(property_details.template))
return templates + [self.get_template_path(cls.__name__) for cls in inspect.getmro(obj.__class__)]
def get_template_path(self, name):
return self.format_string.format(path=self.environment.display_template_path,
template_name=name,
extension=self.environment.display_template_extension)
def create_node(self, assignments, template):
return nodes.Scope(assignments + [template])
def make_sure_is_list(list_to_check):
if not list_to_check:
return list_to_check
if isinstance(list_to_check, basestring):
return [list_to_check]
try:
iter(list_to_check)
return list_to_check
except TypeError:
return [list_to_check]
|
|
import pandas as pd
import numpy as np
import pyranges as pr
from copy import deepcopy
from kipoi.metadata import GenomicRanges
from kipoi.data import Dataset, kipoi_dataloader
from kipoi_conda.dependencies import Dependencies
from kipoiseq.transforms import ReorderedOneHot
from kipoi.specs import Author
from kipoi_utils.utils import default_kwargs
from kipoiseq.extractors import FastaStringExtractor
from kipoiseq.transforms.functional import resize_interval, one_hot_dna
from kipoiseq.utils import to_scalar, parse_dtype
from kipoiseq.dataclasses import Interval
deps = Dependencies(conda=['bioconda::pybedtools', 'bioconda::pyfaidx', 'bioconda::pyranges', 'numpy', 'pandas'],
pip=['kipoiseq'])
package_authors = [Author(name='Ziga Avsec', github='avsecz'),
Author(name='Roman Kreuzhuber', github='krrome')]
# Add Alex here?
# Object exported on import *
__all__ = ['SeqIntervalDl', 'StringSeqIntervalDl', 'BedDataset', 'AnchoredGTFDl']
class BedDataset(object):
"""Reads a tsv file in the following format:
```
chr start stop task1 task2 ...
```
# Arguments
tsv_file: tsv file type
bed_columns: number of columns corresponding to the bed file. All the columns
after that will be parsed as targets
num_chr: if specified, 'chr' in the chromosome name will be dropped
label_dtype: specific data type for labels, Example: `float` or `np.float32`
ambiguous_mask: if specified, rows containing only ambiguous_mask values will be skipped
incl_chromosomes: exclusive list of chromosome names to include in the final dataset.
if not None, only these will be present in the dataset
excl_chromosomes: list of chromosome names to omit from the dataset.
ignore_targets: if True, target variables are ignored
"""
# bed types accorging to
# https://www.ensembl.org/info/website/upload/bed.html
bed_types = [str, # chrom
int, # chromStart
int, # chromEnd
str, # name
str, # score, as str to prevent issues, also its useless
str, # strand
int, # thickStart
int, # thickEnd
str, # itemRbg
int, # blockCount
int, # blockSizes
int] # blockStarts
def __init__(self, tsv_file,
label_dtype=None,
bed_columns=3,
num_chr=False,
ambiguous_mask=None,
incl_chromosomes=None,
excl_chromosomes=None,
ignore_targets=False):
# TODO - `chrom` column: use pd.Categorical for memory efficiency
self.tsv_file = tsv_file
self.bed_columns = bed_columns
self.num_chr = num_chr
self.label_dtype = label_dtype
self.ambiguous_mask = ambiguous_mask
self.incl_chromosomes = incl_chromosomes
self.excl_chromosomes = excl_chromosomes
self.ignore_targets = ignore_targets
df_peek = pd.read_table(self.tsv_file,
header=None,
nrows=1,
sep='\t')
found_columns = df_peek.shape[1]
self.n_tasks = found_columns - self.bed_columns
if self.n_tasks < 0:
raise ValueError("BedDataset requires at least {} valid bed columns. Found only {} columns".
format(self.bed_columns, found_columns))
self.df = pd.read_table(self.tsv_file,
header=None,
dtype={i: d
for i, d in enumerate(self.bed_types[:self.bed_columns] +
[self.label_dtype] * self.n_tasks)},
sep='\t')
if self.num_chr and self.df.iloc[0][0].startswith("chr"):
self.df[0] = self.df[0].str.replace("^chr", "")
if not self.num_chr and not self.df.iloc[0][0].startswith("chr"):
self.df[0] = "chr" + self.df[0]
if ambiguous_mask is not None:
# exclude regions where only ambigous labels are present
self.df = self.df[~np.all(
self.df.iloc[:, self.bed_columns:] == ambiguous_mask, axis=1)]
# omit data outside chromosomes
if incl_chromosomes is not None:
self.df = self.df[self.df[0].isin(incl_chromosomes)]
if excl_chromosomes is not None:
self.df = self.df[~self.df[0].isin(excl_chromosomes)]
def __getitem__(self, idx):
"""Returns (pybedtools.Interval, labels)
"""
row = self.df.iloc[idx]
# TODO: use kipoiseq.dataclasses.interval instead of pybedtools
import pybedtools
interval = pybedtools.create_interval_from_list(
[to_scalar(x) for x in row.iloc[:self.bed_columns]])
if self.ignore_targets or self.n_tasks == 0:
labels = {}
else:
labels = row.iloc[self.bed_columns:].values.astype(
self.label_dtype)
return interval, labels
def __len__(self):
return len(self.df)
def get_targets(self):
return self.df.iloc[:, self.bed_columns:].values.astype(self.label_dtype)
@kipoi_dataloader(override={"dependencies": deps, 'info.authors': package_authors})
class StringSeqIntervalDl(Dataset):
"""
info:
doc: >
Dataloader for a combination of fasta and tab-delimited input files such as bed files. The dataloader extracts
regions from the fasta file as defined in the tab-delimited `intervals_file`. Returned sequences are of the type
np.array([str]).
args:
intervals_file:
doc: bed3+<columns> file path containing intervals + (optionally) labels
example:
url: https://raw.githubusercontent.com/kipoi/kipoiseq/master/tests/data/intervals_51bp.tsv
md5: a76e47b3df87fd514860cf27fdc10eb4
fasta_file:
doc: Reference genome FASTA file path.
example:
url: https://raw.githubusercontent.com/kipoi/kipoiseq/master/tests/data/hg38_chr22_32000000_32300000.fa
md5: 01320157a250a3d2eea63e89ecf79eba
num_chr_fasta:
doc: True, the the dataloader will make sure that the chromosomes don't start with chr.
label_dtype:
doc: None, datatype of the task labels taken from the intervals_file. Example - str, int, float, np.float32
auto_resize_len:
doc: None, required sequence length.
# max_seq_len:
# doc: maximum allowed sequence length
use_strand:
doc: reverse-complement fasta sequence if bed file defines negative strand. Requires a bed6 file
force_upper:
doc: Force uppercase output of sequences
ignore_targets:
doc: if True, don't return any target variables
output_schema:
inputs:
name: seq
shape: ()
doc: DNA sequence as string
special_type: DNAStringSeq
associated_metadata: ranges
targets:
shape: (None,)
doc: (optional) values following the bed-entries
metadata:
ranges:
type: GenomicRanges
doc: Ranges describing inputs.seq
postprocessing:
variant_effects:
bed_input:
- intervals_file
"""
def __init__(self,
intervals_file,
fasta_file,
num_chr_fasta=False,
label_dtype=None,
auto_resize_len=None,
# max_seq_len=None,
use_strand=False,
force_upper=True,
ignore_targets=False):
self.num_chr_fasta = num_chr_fasta
self.intervals_file = intervals_file
self.fasta_file = fasta_file
self.auto_resize_len = auto_resize_len
self.use_strand = use_strand
self.force_upper = force_upper
# self.max_seq_len = max_seq_len
if use_strand:
# require a 6-column bed-file if strand is used
bed_columns = 6
else:
bed_columns = 3
self.bed = BedDataset(self.intervals_file,
num_chr=self.num_chr_fasta,
bed_columns=bed_columns,
label_dtype=parse_dtype(label_dtype),
ignore_targets=ignore_targets)
self.fasta_extractors = None
def __len__(self):
return len(self.bed)
def __getitem__(self, idx):
if self.fasta_extractors is None:
self.fasta_extractors = FastaStringExtractor(self.fasta_file, use_strand=self.use_strand,
force_upper=self.force_upper)
interval, labels = self.bed[idx]
if self.auto_resize_len:
# automatically resize the sequence to cerat
interval = resize_interval(
interval, self.auto_resize_len, anchor='center')
# QUESTION: @kromme - why to we need max_seq_len?
# if self.max_seq_len is not None:
# assert interval.stop - interval.start <= self.max_seq_len
# Run the fasta extractor and transform if necessary
seq = self.fasta_extractors.extract(interval)
if self.bed.bed_columns == 6:
ranges = GenomicRanges(interval.chrom, interval.start, interval.stop, str(idx), interval.strand)
else:
ranges = GenomicRanges(interval.chrom, interval.start, interval.stop, str(idx))
return {
"inputs": np.array(seq),
"targets": labels,
"metadata": {
"ranges": ranges
}
}
@classmethod
def get_output_schema(cls):
output_schema = deepcopy(cls.output_schema)
kwargs = default_kwargs(cls)
ignore_targets = kwargs['ignore_targets']
if ignore_targets:
output_schema.targets = None
return output_schema
# TODO - properly deal with samples outside of the genome
@kipoi_dataloader(override={"dependencies": deps, 'info.authors': package_authors})
class SeqIntervalDl(Dataset):
"""
info:
doc: >
Dataloader for a combination of fasta and tab-delimited input files such as bed files. The dataloader extracts
regions from the fasta file as defined in the tab-delimited `intervals_file` and converts them into one-hot encoded
format. Returned sequences are of the type np.array with the shape inferred from the arguments: `alphabet_axis`
and `dummy_axis`.
args:
intervals_file:
doc: bed3+<columns> file path containing intervals + (optionally) labels
example:
url: https://raw.githubusercontent.com/kipoi/kipoiseq/master/tests/data/intervals_51bp.tsv
md5: a76e47b3df87fd514860cf27fdc10eb4
fasta_file:
doc: Reference genome FASTA file path.
example:
url: https://raw.githubusercontent.com/kipoi/kipoiseq/master/tests/data/hg38_chr22_32000000_32300000.fa
md5: 01320157a250a3d2eea63e89ecf79eba
num_chr_fasta:
doc: True, the the dataloader will make sure that the chromosomes don't start with chr.
label_dtype:
doc: 'None, datatype of the task labels taken from the intervals_file. Example: str, int, float, np.float32'
auto_resize_len:
doc: None, required sequence length.
use_strand:
doc: reverse-complement fasta sequence if bed file defines negative strand. Requires a bed6 file
alphabet_axis:
doc: axis along which the alphabet runs (e.g. A,C,G,T for DNA)
dummy_axis:
doc: defines in which dimension a dummy axis should be added. None if no dummy axis is required.
alphabet:
doc: >
alphabet to use for the one-hot encoding. This defines the order of the one-hot encoding.
Can either be a list or a string: 'ACGT' or ['A, 'C', 'G', 'T']. Default: 'ACGT'
dtype:
doc: 'defines the numpy dtype of the returned array. Example: int, np.int32, np.float32, float'
ignore_targets:
doc: if True, don't return any target variables
output_schema:
inputs:
name: seq
shape: (None, 4)
doc: One-hot encoded DNA sequence
special_type: DNASeq
associated_metadata: ranges
targets:
shape: (None,)
doc: (optional) values following the bed-entry - chr start end target1 target2 ....
metadata:
ranges:
type: GenomicRanges
doc: Ranges describing inputs.seq
postprocessing:
variant_effects:
bed_input:
- intervals_file
"""
def __init__(self,
intervals_file,
fasta_file,
num_chr_fasta=False,
label_dtype=None,
auto_resize_len=None,
# max_seq_len=None,
use_strand=False,
alphabet_axis=1,
dummy_axis=None,
alphabet="ACGT",
ignore_targets=False,
dtype=None):
# core dataset, not using the one-hot encoding params
self.seq_dl = StringSeqIntervalDl(intervals_file, fasta_file, num_chr_fasta=num_chr_fasta,
label_dtype=label_dtype, auto_resize_len=auto_resize_len,
use_strand=use_strand,
ignore_targets=ignore_targets)
self.input_transform = ReorderedOneHot(alphabet=alphabet,
dtype=dtype,
alphabet_axis=alphabet_axis,
dummy_axis=dummy_axis)
def __len__(self):
return len(self.seq_dl)
def __getitem__(self, idx):
ret = self.seq_dl[idx]
ret['inputs'] = self.input_transform(str(ret["inputs"]))
return ret
@classmethod
def get_output_schema(cls):
"""Get the output schema. Overrides the default `cls.output_schema`
"""
output_schema = deepcopy(cls.output_schema)
# get the default kwargs
kwargs = default_kwargs(cls)
# figure out the input shape
mock_input_transform = ReorderedOneHot(alphabet=kwargs['alphabet'],
dtype=kwargs['dtype'],
alphabet_axis=kwargs['alphabet_axis'],
dummy_axis=kwargs['dummy_axis'])
input_shape = mock_input_transform.get_output_shape(
kwargs['auto_resize_len'])
# modify it
output_schema.inputs.shape = input_shape
# (optionally) get rid of the target shape
if kwargs['ignore_targets']:
output_schema.targets = None
return output_schema
@kipoi_dataloader(override={"dependencies": deps, 'info.authors': [Author(name='Alex Karollus', github='Karollus')]})
class AnchoredGTFDl(Dataset):
"""
info:
doc: >
Dataloader for a combination of fasta and gtf files. The dataloader extracts fixed length regions
around anchor points. Anchor points are extracted from the gtf based on the anchor parameter.
The sequences corresponding to the region are then extracted from the fasta file and optionally
trnasformed using a function given by the transform parameter.
args:
gtf_file:
doc: Path to a gtf file (str)
example:
url: https://zenodo.org/record/1466102/files/example_files-gencode.v24.annotation_chr22.gtf
md5: c0d1bf7738f6a307b425e4890621e7d9
fasta_file:
doc: Reference genome FASTA file path (str)
example:
url: https://zenodo.org/record/1466102/files/example_files-hg38_chr22.fa
md5: b0f5cdd4f75186f8a4d2e23378c57b5b
num_upstream:
doc: Number of nt by which interval is extended upstream of the anchor point
num_downstream:
doc: Number of nt by which interval is extended downstream of the anchor point
gtf_filter:
doc: >
Allows to filter the gtf before extracting the anchor points. Can be str, callable
or None. If str, it is interpreted as argument to pandas .query(). If callable,
it is interpreted as function that filters a pandas dataframe and returns the
filtered df.
anchor:
doc: >
Defines the anchor points. Can be str or callable. If it is a callable, it is
treated as function that takes a pandas dataframe and returns a modified version
of the dataframe where each row represents one anchor point, the position of
which is stored in the column called anchor_pos. If it is a string, a predefined function
is loaded. Currently available are tss (anchor is the start of a gene), start_codon
(anchor is the start of the start_codon), stop_codon (anchor is the position right after
the stop_codon), polya (anchor is the position right after the end of a gene).
transform:
doc: Callable (or None) to transform the extracted sequence (e.g. one-hot)
interval_attrs:
doc: Metadata to extract from the gtf, e.g. ["gene_id", "Strand"]
use_strand:
doc: True or False
output_schema:
inputs:
name: seq
shape: (None, 4)
special_type: DNAStringSeq
doc: exon sequence with flanking intronic sequence
associated_metadata: ranges
metadata:
gene_id:
type: str
doc: gene id
Strand:
type: str
doc: Strand
ranges:
type: GenomicRanges
doc: ranges that the sequences were extracted
"""
_function_mapping = {
"tss": lambda x: AnchoredGTFDl.anchor_to_feature_start(x, "gene", use_strand=True),
"start_codon": lambda x: AnchoredGTFDl.anchor_to_feature_start(x, "start_codon", use_strand=True),
"stop_codon": lambda x: AnchoredGTFDl.anchor_to_feature_end(x, "stop_codon", use_strand=True),
"polya": lambda x: AnchoredGTFDl.anchor_to_feature_end(x, "gene", use_strand=True)
}
def __init__(self, gtf_file, fasta_file,
num_upstream, num_downstream,
gtf_filter='gene_type == "protein_coding"',
anchor='tss',
transform=one_hot_dna,
interval_attrs=["gene_id", "Strand"],
use_strand=True):
# Read and filter gtf
gtf = pr.read_gtf(gtf_file).df
if gtf_filter:
if isinstance(gtf_filter, str):
gtf = gtf.query(gtf_filter)
else:
gtf = gtf_filter(gtf)
# Extract anchor
if isinstance(anchor, str):
anchor = anchor.lower()
if anchor in self._function_mapping:
anchor = self._function_mapping[anchor]
else:
raise Exception("No valid anchorpoint was chosen")
self._gtf_anchor = anchor(gtf)
# Other parameters
self._use_strand = use_strand
self._fa = FastaStringExtractor(fasta_file, use_strand=self._use_strand)
self._transform = transform
if self._transform is None:
self._transform = lambda x: x
self._num_upstream = num_upstream
self._num_downstream = num_downstream
self._interval_attrs = interval_attrs
def _create_anchored_interval(self, row, num_upstream, num_downstream):
if self._use_strand == True and row.Strand == "-":
# negative strand
start = row.anchor_pos - num_downstream
end = row.anchor_pos + num_upstream
else:
# positive strand
start = row.anchor_pos - num_upstream
end = row.anchor_pos + num_downstream
interval = Interval(row.Chromosome, start, end, strand=row.Strand)
return interval
def __len__(self):
return len(self._gtf_anchor)
def __getitem__(self, idx):
row = self._gtf_anchor.iloc[idx]
interval = self._create_anchored_interval(row,
num_upstream=self._num_upstream,
num_downstream=self._num_downstream)
sequence = self._fa.extract(interval)
sequence = self._transform(sequence)
metadata_dict = {k:row.get(k, '') for k in self._interval_attrs}
metadata_dict["ranges"] = GenomicRanges(interval.chrom, interval.start, interval.stop, str(idx))
return {
"inputs": np.array(sequence),
"metadata": metadata_dict
}
@staticmethod
def anchor_to_feature_start(gtf, feature, use_strand):
gtf = gtf.query('Feature == @feature')
if use_strand:
gtf["anchor_pos"] = ((gtf.Start * (gtf.Strand == "+"))
+ (gtf.End * (gtf.Strand == "-")))
else:
gtf["anchor_pos"] = gtf.Start
return gtf
@staticmethod
def anchor_to_feature_end(gtf, feature, use_strand):
gtf = gtf.query('Feature == @feature')
if use_strand:
gtf["anchor_pos"] = ((gtf.End * (gtf.Strand == "+"))
+ (gtf.Start * (gtf.Strand == "-")))
else:
gtf["anchor_pos"] = gtf.End
return gtf
|
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 19332 if testnet else 9332
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
|
# Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
IAM Resource Policy Checker
---------------------------
When securing resources with iam policies, we want to parse and evaluate
the resource's policy for any cross account or public access grants that
are not intended.
In general, iam policies can be complex, and where possible using iam
simulate is preferrable, but requires passing the caller's arn, which
is not feasible when we're evaluating who the valid set of callers
are.
References
- IAM Policy Evaluation - http://goo.gl/sH5Dt5
- IAM Policy Reference - http://goo.gl/U0a06y
"""
import json
from c7n.filters import Filter
from c7n.resolver import ValuesFrom
from c7n.utils import get_account_id, local_session, type_schema
class CrossAccountAccessFilter(Filter):
"""Check a resource's embedded iam policy for cross account access.
"""
schema = type_schema(
'cross-account',
whitelist_from=ValuesFrom.schema,
whitelist={'type': 'array', 'items': {'type': 'string'}})
policy_attribute = 'Policy'
def process(self, resources, event=None):
self.accounts = self.get_accounts()
return super(CrossAccountAccessFilter, self).process(resources, event)
def get_accounts(self):
owner_id = get_account_id(local_session(self.manager.session_factory))
accounts = set(self.data.get('whitelist', ()))
if 'whitelist_from' in self.data:
values = ValuesFrom(self.data['whitelist_from'], self.manager)
accounts = accounts.union(values.get_values())
accounts.add(owner_id)
return accounts
def get_resource_policy(self, r):
return r.get(self.policy_attribute, None)
def __call__(self, r):
p = self.get_resource_policy(r)
if p is None:
return False
violations = check_cross_account(p, self.accounts)
if violations:
r['CrossAccountViolations'] = violations
return True
def _account(arn):
# we could try except but some minor runtime cost, basically flag
# invalids values
if ':' not in arn:
return arn
return arn.split(':', 5)[4]
def check_cross_account(policy_text, allowed_accounts):
"""Find cross account access policy grant not explicitly allowed
"""
if isinstance(policy_text, basestring):
policy = json.loads(policy_text)
else:
policy = policy_text
violations = []
for s in policy['Statement']:
principal_ok = True
if s['Effect'] != 'Allow':
continue
# Highly suspect in an allow
if 'NotPrincipal' in s:
violations.append(s)
continue
# Does this wildcard
if 'Principal' not in s:
violations.append(s)
continue
# Skip relays for events to sns
if 'Service' in s['Principal']:
s['Principal'].pop('Service')
if not s['Principal']:
continue
assert len(s['Principal']) == 1, "Too many principals %s" % s
# At this point principal is required?
p = (
isinstance(s['Principal'], basestring) and s['Principal']
or s['Principal']['AWS'])
p = isinstance(p, basestring) and (p,) or p
for pid in p:
if pid == '*':
principal_ok = False
elif pid.startswith('arn:aws:iam::cloudfront:user'):
continue
else:
account_id = _account(pid)
if account_id not in allowed_accounts:
principal_ok = False
if principal_ok:
continue
if 'Condition' not in s:
violations.append(s)
continue
if 'StringEquals' in s['Condition']:
# Default SNS Policy does this
if 'AWS:SourceOwner' in s['Condition']['StringEquals']:
so = s['Condition']['StringEquals']['AWS:SourceOwner']
if not isinstance(so, list):
so = [so]
so = [pso for pso in so if pso not in allowed_accounts]
if not so:
principal_ok = True
# Default keys in kms do this
if 'kms:CallerAccount' in s['Condition']['StringEquals']:
so = s['Condition']['StringEquals']['kms:CallerAccount']
if so in allowed_accounts:
principal_ok = True
## BEGIN S3 WhiteList
## Note these are transient white lists for s3
## we need to refactor this to verify ip against a
## cidr white list, and verify vpce/vpc against the
## accounts.
# For now allow vpce/vpc conditions as sufficient on s3
if s['Condition']['StringEquals'].keys()[0] in (
"aws:sourceVpce", "aws:sourceVpce"):
principal_ok = True
if 'StringLike' in s['Condition']:
# For now allow vpce/vpc conditions as sufficient on s3
if s['Condition'][
'StringLike'].keys()[0].lower() == "aws:sourcevpce":
principal_ok = True
if 'ForAnyValue:StringLike' in s['Condition']:
if s['Condition']['ForAnyValue:StringLike'].keys()[
0].lower() == 'aws:sourcevpce':
principal_ok = True
if 'IpAddress' in s['Condition']:
principal_ok = True
## END S3 WhiteList
if 'ArnEquals' in s['Condition']:
# Other valid arn equals? / are invalids allowed?
# duplicate block from below, inline closure func
# would remove, but slower, else move to class eval
principal_ok = True
keys = ('aws:SourceArn', 'AWS:SourceArn')
for k in keys:
if k in s['Condition']['ArnEquals']:
v = s['Condition']['ArnEquals'][k]
if v is None:
violations.append(s)
else:
v = isinstance(v, basestring) and (v,) or v
for arn in v:
aid = _account(arn)
if aid not in allowed_accounts:
violations.append(s)
if 'ArnLike' in s['Condition']:
# Other valid arn equals? / are invalids allowed?
v = s['Condition']['ArnLike']['aws:SourceArn']
v = isinstance(v, basestring) and (v,) or v
principal_ok = True
for arn in v:
aid = _account(arn)
if aid not in allowed_accounts:
violations.append(s)
if not principal_ok:
violations.append(s)
return violations
|
|
import tensorflow as tf
import numpy as np
class BaseController:
def __init__(self, input_size, output_size, memory_read_heads, word_size, shift_range, batch_size=1):
"""
constructs a controller as described in the "Neural Turing Machines" paper
https://arxiv.org/abs/1410.5401
Parameters:
----------
input_size: int
the size of the data input vector
output_size: int
the size of the data output vector
memory_read_heads: int
the number of read haeds in the associated external memory
word_size: int
the size of the word in the associated external memory
shift_range: int
allowed integer shifts
batch_size: int
the size of the input data batch [optional]
"""
self.input_size = input_size
self.output_size = output_size
self.read_heads = memory_read_heads
self.word_size = word_size
self.batch_size = batch_size
self.shift_range = shift_range
# indicates if the internal neural network is recurrent
# by the existence of recurrent_update and get_state methods
has_recurrent_update = callable(getattr(self, 'update_state', None))
has_get_state = callable(getattr(self, 'get_state', None))
self.has_recurrent_nn = has_recurrent_update and has_get_state
# the actual size of the neural network input after flatenning and
# concatenating the input vector with the previously read vectors from memory
self.nn_input_size = self.word_size * self.read_heads + self.input_size
# X Reading heads + 1 writing head: Key + strength + gate + gamma + shift | erase and add vectors
self.interface_vector_size = (self.word_size + 3 + (2*self.shift_range + 1)) * (self.read_heads + 1) + 2*self.word_size
# define network vars
with tf.name_scope("controller"):
self.network_vars()
self.nn_output_size = None
with tf.variable_scope("shape_inference"):
self.nn_output_size = self.get_nn_output_size()
self.initials()
def initials(self):
"""
sets the initial values of the controller transformation weights matrices
this method can be overwritten to use a different initialization scheme
"""
# defining internal weights of the controller
self.interface_weights = tf.Variable(
tf.random_normal([self.nn_output_size, self.interface_vector_size], stddev=0.1),
name='interface_weights'
)
self.nn_output_weights = tf.Variable(
tf.random_normal([self.nn_output_size, self.output_size], stddev=0.1),
name='nn_output_weights'
)
def network_vars(self):
"""
defines the variables needed by the internal neural network
[the variables should be attributes of the class, i.e. self.*]
"""
raise NotImplementedError("network_vars is not implemented")
def network_op(self, X):
"""
defines the controller's internal neural network operation
Parameters:
----------
X: Tensor (batch_size, word_size * read_haeds + input_size)
the input data concatenated with the previously read vectors from memory
Returns: Tensor (batch_size, nn_output_size)
"""
raise NotImplementedError("network_op method is not implemented")
def get_nn_output_size(self):
"""
retrives the output size of the defined neural network
Returns: int
the output's size
Raises: ValueError
"""
input_vector = np.zeros([self.batch_size, self.nn_input_size], dtype=np.float32)
output_vector = None
if self.has_recurrent_nn:
output_vector,_ = self.network_op(input_vector, self.get_state())
else:
output_vector = self.network_op(input_vector)
shape = output_vector.get_shape().as_list()
if len(shape) > 2:
raise ValueError("Expected the neural network to output a 1D vector, but got %dD" % (len(shape) - 1))
else:
return shape[1]
def parse_interface_vector(self, interface_vector):
"""
parses the flat interface_vector into its various components with their
correct shapes
Parameters:
----------
interface_vector: Tensor (batch_size, interface_vector_size)
the flattened interface vector to be parsed
Returns: dict
a dictionary with the components of the interface_vector parsed
"""
parsed = {}
r_keys_end = self.word_size * self.read_heads
r_strengths_end = r_keys_end + self.read_heads
r_gates_end = r_strengths_end + self.read_heads
r_gamma_end = r_gates_end + self.read_heads
r_shift_end = r_gamma_end + (self.shift_range * 2 + 1) * self.read_heads
w_key_end = r_shift_end + self.word_size
w_strengths_end = w_key_end + 1
w_gates_end = w_strengths_end + 1
w_gamma_end = w_gates_end + 1
w_shift_end = w_gamma_end + (self.shift_range * 2 + 1)
erase_end = w_shift_end + self.word_size
write_end = erase_end + self.word_size
r_keys_shape = (-1, self.word_size, self.read_heads)
r_scalars_shape = (-1, self.read_heads)
r_shift_shape = (-1, self.shift_range * 2 + 1, self.read_heads)
w_key_shape = (-1, self.word_size, 1)
w_scalars_shape = (-1, 1)
w_shift_shape = (-1, self.shift_range * 2 + 1,1)
write_shape = erase_shape = (-1, self.word_size)
# parsing the vector into its individual components
parsed['read_keys'] = tf.reshape(interface_vector[:, :r_keys_end], r_keys_shape)
parsed['read_strengths'] = tf.nn.softplus(tf.reshape(interface_vector[:, r_keys_end:r_strengths_end], r_scalars_shape))+1
parsed['read_gates'] = tf.sigmoid(tf.reshape(interface_vector[:, r_strengths_end:r_gates_end], r_scalars_shape))
parsed['read_gammas'] = tf.nn.softplus(tf.reshape(interface_vector[:, r_gates_end:r_gamma_end], r_scalars_shape))+1
parsed['read_shifts'] = tf.nn.softmax(tf.reshape(interface_vector[:, r_gamma_end:r_shift_end], r_shift_shape),dim=1)
parsed['write_key'] = tf.reshape(interface_vector[:, r_shift_end:w_key_end], w_key_shape)
parsed['write_strength'] = tf.nn.softplus(tf.reshape(interface_vector[:, w_key_end:w_strengths_end], w_scalars_shape))+1
parsed['write_gate'] = tf.sigmoid(tf.reshape(interface_vector[:, w_strengths_end:w_gates_end], w_scalars_shape))
parsed['write_gamma'] = tf.nn.softplus(tf.reshape(interface_vector[:, w_gates_end:w_gamma_end], w_scalars_shape)) + 1
parsed['write_shift'] = tf.nn.softmax(tf.reshape(interface_vector[:, w_gamma_end:w_shift_end], w_shift_shape),dim=1)
parsed['erase_vector'] = tf.sigmoid(tf.reshape(interface_vector[:, w_shift_end:erase_end], erase_shape))
parsed['write_vector'] = tf.reshape(interface_vector[:, erase_end:write_end], write_shape)
return parsed
def process_input(self, X, last_read_vectors, state=None):
"""
processes input data through the controller network and returns the
pre-output and interface_vector
Parameters:
----------
X: Tensor (batch_size, input_size)
the input data batch
last_read_vectors: (batch_size, word_size, read_heads)
the last batch of read vectors from memory
state: Tuple
state vectors if the network is recurrent
Returns: Tuple
pre-output: Tensor (batch_size, output_size)
parsed_interface_vector: dict
"""
flat_read_vectors = tf.reshape(last_read_vectors, (-1, self.word_size * self.read_heads))
complete_input = tf.concat(1, [X, flat_read_vectors])
nn_output, nn_state = None, None
if self.has_recurrent_nn:
nn_output, nn_state = self.network_op(complete_input, state)
else:
nn_output = self.network_op(complete_input)
pre_output = tf.matmul(nn_output, self.nn_output_weights)
interface = tf.matmul(nn_output, self.interface_weights)
parsed_interface = self.parse_interface_vector(interface)
if self.has_recurrent_nn:
return pre_output, parsed_interface, nn_state
else:
return pre_output, parsed_interface
|
|
'''
Tag To Header
Version 2.0.1
By Joe Hiatt, Scott Kennedy(1), Brendan Kohrn and Mike Schmitt(1)
(1) Department of Pathology, University of Washington School of Medicine, Seattle, WA 98195
March 24, 2014
Isolate duplex tags, move them from within the sequenced read to the header region, and remove the spacer region.
usage: tag_to_header.py [-h] [--infile1 INFILE1] [--infile2 INFILE2]
[--outfile1 OUTFILE1] [--outfile2 OUTFILE2]
[--barcode_length BLENGTH] [--spacer_length SLENGTH]
[--read_out ROUT] [--adapter ADAPTERSEQ]
optional arguments:
-h, --help show this help message and exit
--infile1 INFILE1 First input raw fastq file.
--infile2 INFILE2 Second input raw fastq file.
--outfile1 OUTFILE1 Output file for first fastq reads.
--outfile2 OUTFILE2 Output file for second fastq reads.
--barcode_length BLENGTH
Length of the duplex tag sequence. [12]
--spacer_length SLENGTH
Length of the spacer sequences used. [5]
--read_out ROUT How often you want to be told what the program is
doing. [1000000]
--adapter ADAPTERSEQ Optional: Spacer sequence for filtering on the
presence of the spacer. This could be thrown off by
low quality scores.
'''
import sys
import re
from argparse import ArgumentParser
from Bio import SeqIO
class fastQRead:
def __init__(self, in1, in2, in3, in4):
'''This class is meant to hold a single fastQ read.
'''
self.name=in1.strip().strip("@").replace(' ', '_')
self.seq=in2.strip()
self.spacer="+"
self.qual=in4.strip()
if len(self.seq)!=len(self.qual):
raise ValueError("Sequence and quality scores of different lengths!/n%s/n%s/n%s/n%s" % (in1, in2, "+", in4))
def __getitem__(self, index):
'''This should allow slicing of the read to proceed properly.
'''
if isinstance(index, int):
return self.seq[index]
elif isinstance(index, slice):
answer = self.__class__(self.name, self.seq[index], self.spacer, self.qual[index])
return answer
raise ValueError("Invalid index")
class fastQItterator:
def __init__(self, inFile):
'''This class will go through a fastQ file one line at a time.
'''
self.source=inFile
self.eof=False
def next(self):
new=[]
for j in xrange(4):
try:
tmp=self.source.next()
except StopIteration:
self.eof=True
return("EOF")
new.append(tmp)
newRead=fastQRead(new[0],new[1],new[2],new[3])
return(newRead)
def close(self):
self.source.close()
return(True)
class fastqWriter:
def __init__(self, outFile):
self.file=outFile
self.firstLine=True
def write(self, read):
if self.firstLine==True:
self.file.write("@" + read.name)
self.firstLine=False
else:
self.file.write("\n@" + read.name)
self.file.write("\n" + read.seq)
self.file.write("\n" + read.spacer)
self.file.write("\n" + read.qual)
return(True)
def close(self):
self.file.close()
return(True)
class fastaWriter:
def __init__(self, outFile):
self.file=outFile
self.firstLine=True
def write(self, name, seq):
if self.firstLine==True:
self.file.write(">" + name)
self.firstLine=False
else:
self.file.write("\n>" + name)
self.file.write("\n" + seq)
return(True)
def close(self):
self.file.close()
return(True)
def tagExtractFxn(x, blen):
'''this is the function that extracts the UID tags from both the
forward and reverse read. Assigns read1 the sequence from some
position to the end, then read2 from some position to the end,
then assigns tag1 from the 5'-end to length of the UID tag for
read1 and then read 2.
'''
###return(x[0][:blen], x[1][:blen])
return(tagMatch(x[0]), tagMatch(x[1]))
def tagMatch(seq):
pattern = '(?P<stag>[ATCG]{12})CAGTA(?P<templatestart>[ATCG]{18})(?P<motifs>[ATCG]{40})(?P<templateend>[ATCG]{18})TACTG(?P<rtag>[ATCG]{12})'
match = re.search(pattern, seq)
if match:
return(match.group(0), match.groupdict())
else:
return None
##return(match.group('stag'), match.group('templatestart'), match.group('motifs'), match.group('templateend'), match.group('rtag')
def hdrRenameFxn(x, y, z):
'''this function renames the header with the formatting of
*header coordinates,etc*, *index seq*, *tag from read1*, *tag from read2*, *spacer from this read*
*read designation from original header*
'''
return("%s%s%s" % (x[:-1], y, z))
def main():
parser = ArgumentParser()
parser.add_argument('--infile1', dest = 'infile1', help = 'First input raw fastq file. ', required=True)
parser.add_argument('--infile2', dest = 'infile2', help = 'Second input raw fastq file. ', required=True)
parser.add_argument('--outfile1', dest = 'outfile1', help = 'Output file for first fastq reads. ', required=True)
parser.add_argument('--outfile2', dest = 'outfile2', help = 'Output file for second fastq reads. ', required=True)
parser.add_argument('--barcode_length', type = int, default = 12, dest = 'blength', help = 'Length of the duplex tag sequence. [12]')
parser.add_argument('--spacer_length', type = int, default = 5, dest = 'slength', help = 'Length of the spacer sequences used. [5]')
parser.add_argument('--read_out', type = int, default = 1000000, dest = 'rOut', help = 'How often you want to be told what the program is doing. [1000000]')
parser.add_argument('--adapter', default = None, dest = 'adapterSeq', help = 'Optional: Spacer sequence for filtering on the presence of the spacer. This could be thrown off by low quality scores.')
o=parser.parse_args()
in1=fastQItterator(open(o.infile1, 'rU'))
in2=fastQItterator(open(o.infile2, 'rU'))
out1=fastaWriter(open(o.outfile1, 'w'))
out2=fastaWriter(open(o.outfile2, 'w'))
ctr=0
nospacer = 0
goodreads = 0
badtag = 0
oldBad = 0
isEOF=False
while isEOF==False:
read1 = in1.next()
read2 = in2.next()
if read1 == "EOF" or read2 == "EOF":
isEOF = True
else:
ctr += 1
if o.adapterSeq != None and (read1.seq[o.blength:o.blength + o.slength] != o.adapterSeq or read2[o.blength:o.blength + o.slength] != o.adapterSeq):
nospacer += 1
else:
#extract tags
r1parts, r2parts = tagExtractFxn((read1.seq, read2.seq),o.blength)
if not r1parts or not r2parts:
continue
readName = read1.name
#header reconstruction
read1.name = hdrRenameFxn(read1.name, r1parts[1]['stag'], r2parts[1]['stag'])
read2.name = hdrRenameFxn(read2.name, r1parts[1]['stag'], r2parts[1]['stag'])
tag1 = r1parts[1]['stag']
tag2 = r2parts[1]['stag']
#fastq reconstruction
if (tag1.isalpha() and tag1.count('N') == 0) and (tag2.isalpha() and tag2.count('N') == 0):
rOut1 = r1parts[1]['templatestart'] + r1parts[1]['motifs'] + r1parts[1]['templateend']
rOut2 = r2parts[1]['templatestart'] + r2parts[1]['motifs'] + r2parts[1]['templateend']
out1.write(read1.name, rOut1)
out2.write(read2.name, rOut2)
encodedStr = "\t".join((readName, r1parts[1]['stag'], r2parts[1]['stag'], r1parts[1]['templatestart'], r1parts[1]['motifs'], r2parts[1]['motifs']))
print encodedStr
goodreads += 1
else:
badtag += 1
if ctr%o.rOut==0:
sys.stderr.write("Total sequences processed: %s\n" % (ctr))
sys.stderr.write("Sequences passing filter: %s\n" % (goodreads))
sys.stderr.write("Missing spacers: %s\n" % (nospacer))
sys.stderr.write("Bad tags: %s\n\n" % (badtag))
if badtag == oldBad+o.rOut:
sys.stderr.write("Warning! Potential file error between lines %s and %s. " % ((ctr-o.rOut)*4,(ctr)*4))
oldBad = badtag
in1.close()
in2.close()
out1.close()
out2.close()
sys.stderr.write("Summary statistics:\n")
sys.stderr.write("Total sequences processed: %s\n" % (ctr))
sys.stderr.write("Good sequences: %s\n" % (goodreads))
sys.stderr.write("Missing spacers: %s\n" % (nospacer))
sys.stderr.write("Bad tags: %s\n\n" % (badtag))
if __name__ == "__main__":
main()
|
|
from tastypie.authorization import Authorization
from tastypie.resources import ModelResource, fields, ALL_WITH_RELATIONS
from openpds.authentication import OAuth2Authentication
from openpds.authorization import PDSAuthorization
from openpds.tastypie_internaldatastore import IDSAnswerResource
from openpds import settings
import datetime
import json, ast
from tastypie import fields
from tastypie.authorization import Authorization
from tastypie.validation import Validation
from openpds.tastypie_mongodb.resources import MongoDBResource, Document
from openpds.core.models import AuditEntry, Profile, Notification, Device
from django.db import models
import pdb
from gcm import GCM
class IncidentResource(MongoDBResource):
id = fields.CharField(attribute="_id")
type = fields.CharField(attribute="type", null=False)
date = fields.DateTimeField(attribute="date", null=False)
description = fields.CharField(attribute="description", null=False)
location = fields.DictField(attribute="location", null=False)
user_reported = fields.BooleanField(attribute="user_reported", null=False)
source = fields.CharField(attribute="source", null=False)
class Meta:
authentication = OAuth2Authentication("crowdsos_write")
authorization = PDSAuthorization(scope = "crowdsos_write", audit_enabled = False)
resource_name= "incident"
list_allowed_methods = ["delete", "get", "post"]
object_class = Document
collection = "incident"
filtering = { "type": ["exact"] }
class FunfResource(MongoDBResource):
id = fields.CharField(attribute="_id")
key = fields.CharField(attribute="key", null=True, help_text='The funf probe name.')
time = fields.DateTimeField(attribute="time", null=True, help_text='A human readable datetime. The time represents when funf collected the data.')
value = fields.CharField(attribute="value", null=True, help_text='A json blob of funf data.')
class Meta:
authentication = OAuth2Authentication("funf_write")
authorization = PDSAuthorization(scope = "funf_write", audit_enabled = False)
resource_name = "funf"
list_allowed_methods = ["delete", "get", "post"]
object_class = Document
collection = "funf" # collection name
filtering = { "key" : ["exact"]}
class FunfConfigResource(MongoDBResource):
id = fields.CharField(attribute="_id")
name = fields.CharField(attribute="name", blank = False, null = False)
config = fields.DictField(attribute="config", blank = False, null = False)
class Meta:
resource_name = "funfconfig"
list_allowed_methods = ["delete", "get", "post"]
authentication = OAuth2Authentication("funf_write")
authorization = PDSAuthorization(scope = "funf_write", audit_enabled = True)
object_class = Document
collection = "funfconfig" # collection name
class AnswerResource(IDSAnswerResource):
id = fields.CharField(attribute="_id", help_text='A guid identifier for an answer entry.')
key = fields.CharField(attribute="key", help_text='A unique string to identify each answer.', null=False, unique=True)
value = fields.DictField(attribute="value", help_text='A json blob of answer data.', null=True, )
class Meta:
resource_name = "answer"
list_allowed_methods = ["get", "post"]
help_text='resource help text...'
authentication = OAuth2Authentication("funf_write")
authorization = PDSAuthorization(scope = "funf_write", audit_enabled = True)
object_class = Document
isList = False
class AnswerListResource(IDSAnswerResource):
id = fields.CharField(attribute="_id", help_text='A guid identifier for an answer entry.')
key = fields.CharField(attribute="key", help_text='A unique string to identify each answer.', null=False, unique=True)
value = fields.ListField(attribute="value", help_text='A json blob of answer data.', null=True, )
class Meta:
resource_name = "answerlist"
list_allowed_methods = ["get", "post"]
help_text='resource help text...'
authentication = OAuth2Authentication("funf_write")
authorization = PDSAuthorization(scope = "funf_write", audit_enabled = True)
object_class = Document
isList = True
class ProfileResource(ModelResource):
class Meta:
queryset = Profile.objects.all()
authentication = OAuth2Authentication("funf_write")
authorization = PDSAuthorization(scope = "funf_write", audit_enabled = True)
filtering = { "uuid": ["contains", "exact"]}
class AuditEntryCountResource(ModelResource):
def get_resource_uri(self, bundle):
# Returning nothing here... there isn't a model backing this resource, so there's nowhere to pull this from
# If we deem this important in the future, we can potentially construct a URL with datefilters built-in
return ""
def dehydrate(self, bundle):
# Since there's no backing model here, tastypie for some reason doesn't fill in the necessary fields on the data
# As a result, this must be done manually
bundle.data['date'] = bundle.obj['date']
bundle.data['count'] = bundle.obj['count']
return bundle
def build_filters(self, filters):
#pdb.set_trace()
applicable_filters = super(AuditEntryCountResource, self).build_filters(filters)
qset = None
date_gte = filters.get("date__gte")
if (date_gte):
qset = models.Q(timestamp__gte = date_gte + " 00:00:00Z")
date_lte = filters.get("date__lte")
if (date_lte):
qset2 = (models.Q(timestamp__lte = date_lte + " 23:59:59Z"))
qset = qset & qset2 if qset else qset2
if (qset):
applicable_filters["time_filter"] = qset
datastore_owner_uuid = filters.get("datastore_owner__uuid")
if (datastore_owner_uuid):
applicable_filters["datastore_owner__uuid"] = models.Q(datastore_owner__uuid=datastore_owner_uuid)
return applicable_filters
def apply_filters(self, request, applicable_filters):
time_filter = None
#pdb.set_trace()
if ("time_filter" in applicable_filters):
time_filter= applicable_filters.pop("time_filter")
if ("datastore_owner__uuid" in applicable_filters):
datastore_owner_filter = applicable_filters.pop("datastore_owner__uuid")
time_filter = time_filter & datastore_owner_filter if time_filter else datastore_owner_filter
semi_filtered = super(AuditEntryCountResource, self).apply_filters(request, applicable_filters)
return semi_filtered.filter(time_filter) if time_filter else semi_filtered
class Meta:
queryset = AuditEntry.objects.extra({ 'date' : 'date(timestamp)'}).values('date').annotate(count = models.Count("id"))
authentication = OAuth2Authentication("funf_write")
authorization = PDSAuthorization(scope = "funf_write", audit_enabled = False)
fields = ['date', 'count']
allowed_methods = ('get')
filtering = {"date" : ["gte", "lte", "gt", "lt"],
"datastore_owner": ALL_WITH_RELATIONS}
ordering = ("date");
class AuditEntryResource(ModelResource):
datastore_owner = fields.ForeignKey(ProfileResource, 'datastore_owner', full = True)
requester = fields.ForeignKey(ProfileResource, 'requester', full = True)
def dehydrate(self, bundle):
# Sending this over the line is a waste of bandwidth...
# When we have the time, we should make this formatting happen on the client side from the raw timestamp
bundle.data['timestamp_date'] = bundle.data['timestamp'].date()
bundle.data['timestamp_time'] = bundle.data['timestamp'].time().strftime('%I:%M:%S %p')
return bundle
#def dispatch(self, request_type, request, **kwargs):
# # This method is used for pulling the datastore_owner out of the url path, rather than a querystring parameter
# # This is not supported in v0.3
# pdb.set_trace()
# owner_uuid = kwargs.pop("owner_uuid")
# kwargs["datastore_owner"], created = Profile.objects.get_or_create(uuid = owner_uuid)
# return super(AuditEntryResource, self).dispatch(request_type, request, **kwargs)
class Meta:
queryset = AuditEntry.objects.all()
# POST is provided to allow a Resource or Sandbox server to store audit entries on the PDS
allowed_methods = ('get', 'post')
authentication = OAuth2Authentication("funf_write")
authorization = PDSAuthorization(scope = "funf_write", audit_enabled = False)
filtering = { "datastore_owner" : ALL_WITH_RELATIONS,
"timestamp": ["gte", "lte", "gt", "lt"],
"script": ["contains"],
"requester": ALL_WITH_RELATIONS }
ordering = ('timestamp')
limit = 20
class NotificationResource(ModelResource):
datastore_owner = fields.ForeignKey(ProfileResource, "datastore_owner", full = True)
def obj_create(self, bundle, request=None, **kwargs):
bundle = super(NotificationResource, self).obj_create(bundle, request, **kwargs)
profile = Profile.objects.get(uuid = bundle.data["datastore_owner"]["uuid"])
devices = Device.objects.filter(datastore_owner = profile)
if devices.count() > 0:
gcm = GCM(settings.GCM_API_KEY)
for device in devices:
try:
gcm.plaintext_request(registration_id=device.gcm_reg_id, data={"action":"notify"})
except Exception as e:
print e
return bundle
class Meta:
queryset = Notification.objects.all()
allowed_methods = ("get", "post", "delete")
authentication = OAuth2Authentication("funf_write")
authorization = PDSAuthorization(scope = "funf_write", audit_enabled = True)
filtering = { "datastore_owner": ALL_WITH_RELATIONS }
ordering = ("timestamp")
limit = 20
class DeviceResource(ModelResource):
datastore_owner = fields.ForeignKey(ProfileResource, "datastore_owner", full=True)
def obj_create(self, bundle, request = None, **kwargs):
#pdb.set_trace()
profile = Profile.objects.get(uuid = bundle.data["datastore_owner"]["uuid"])
devices = Device.objects.filter(datastore_owner=profile)
if devices.count() > 0:
# Note: we're trying to keep only the most recent... not the best way to do it, but it works
devices.delete()
return super(DeviceResource, self).obj_create(bundle,request, **kwargs)
class Meta:
queryset = Device.objects.all()
allowed_methods = ("get", "post", "delete")
authentication = OAuth2Authentication("funf_write")
authorization = PDSAuthorization(scope = "funf_write", audit_enabled = True)
filtering = { "datastore_owner" : ALL_WITH_RELATIONS }
limit = 20
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class UsersOperations(object):
"""UsersOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: The API version. Constant value: "2019-03-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2019-03-01"
self.config = config
def list_by_data_box_edge_device(
self, device_name, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all the users registered on a data box edge/gateway device.
:param device_name: The device name.
:type device_name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of User
:rtype:
~azure.mgmt.edgegateway.models.UserPaged[~azure.mgmt.edgegateway.models.User]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_by_data_box_edge_device.metadata['url']
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.UserPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.UserPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_by_data_box_edge_device.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/users'}
def get(
self, device_name, name, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets the properties of the specified user.
:param device_name: The device name.
:type device_name: str
:param name: The user name.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: User or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.edgegateway.models.User or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('User', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/users/{name}'}
def _create_or_update_initial(
self, device_name, name, resource_group_name, encrypted_password=None, share_access_rights=None, custom_headers=None, raw=False, **operation_config):
user = models.User(encrypted_password=encrypted_password, share_access_rights=share_access_rights)
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(user, 'User')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('User', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, device_name, name, resource_group_name, encrypted_password=None, share_access_rights=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates a new user or updates an existing user's information on a data
box edge/gateway device.
:param device_name: The device name.
:type device_name: str
:param name: The user name.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param encrypted_password: The password details.
:type encrypted_password:
~azure.mgmt.edgegateway.models.AsymmetricEncryptedSecret
:param share_access_rights: List of shares that the user has rights
on. This field should not be specified during user creation.
:type share_access_rights:
list[~azure.mgmt.edgegateway.models.ShareAccessRight]
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns User or
ClientRawResponse<User> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.edgegateway.models.User]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.edgegateway.models.User]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
device_name=device_name,
name=name,
resource_group_name=resource_group_name,
encrypted_password=encrypted_password,
share_access_rights=share_access_rights,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('User', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/users/{name}'}
def _delete_initial(
self, device_name, name, resource_group_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, device_name, name, resource_group_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the user on a databox edge/gateway device.
:param device_name: The device name.
:type device_name: str
:param name: The user name.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
device_name=device_name,
name=name,
resource_group_name=resource_group_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/users/{name}'}
|
|
import os
import os.path
import tempfile
import shutil
from nose.tools import eq_
from build_pack_utils import utils
from compile_helpers import setup_webdir_if_it_doesnt_exist
from compile_helpers import convert_php_extensions
from compile_helpers import is_web_app
from compile_helpers import find_stand_alone_app_to_run
from compile_helpers import load_manifest
from compile_helpers import find_all_php_versions
from compile_helpers import validate_php_version
from compile_helpers import setup_log_dir
class TestCompileHelpers(object):
def setUp(self):
self.build_dir = tempfile.mkdtemp(prefix='build-')
self.cache_dir = tempfile.mkdtemp(prefix='cache-')
os.rmdir(self.build_dir) # delete otherwise copytree complains
os.rmdir(self.cache_dir) # cache dir does not exist normally
def tearDown(self):
if os.path.exists(self.build_dir):
shutil.rmtree(self.build_dir)
if os.path.exists(self.cache_dir):
shutil.rmtree(self.cache_dir)
for name in os.listdir(os.environ['TMPDIR']):
if name.startswith('httpd-') and name.endswith('.gz'):
os.remove(os.path.join(os.environ['TMPDIR'], name))
if name.startswith('php-') and name.endswith('.gz'):
os.remove(os.path.join(os.environ['TMPDIR'], name))
def assert_exists(self, *args):
eq_(True, os.path.exists(os.path.join(*args)),
"Does not exists: %s" % os.path.join(*args))
def test_setup_log_dir(self):
eq_(False, os.path.exists(os.path.join(self.build_dir, 'logs')))
setup_log_dir({
'BUILD_DIR': self.build_dir
})
self.assert_exists(self.build_dir, 'logs')
def test_setup_if_webdir_exists(self):
shutil.copytree('tests/data/app-1', self.build_dir)
setup_webdir_if_it_doesnt_exist(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'WEBDIR': 'htdocs',
'LIBDIR': 'lib'
}))
self.assert_exists(self.build_dir, 'htdocs')
self.assert_exists(self.build_dir, 'htdocs', 'index.php')
self.assert_exists(self.build_dir, 'htdocs', 'info.php')
self.assert_exists(self.build_dir, 'htdocs',
'technical-difficulties1.jpg')
self.assert_exists(self.build_dir, '.bp-config')
self.assert_exists(self.build_dir, '.bp-config', 'options.json')
self.assert_exists(self.build_dir, '.bp-config', 'httpd', 'extra',
'httpd-remoteip.conf')
eq_(2, len(os.listdir(self.build_dir)))
eq_(3, len(os.listdir(os.path.join(self.build_dir, 'htdocs'))))
def test_setup_if_custom_webdir_exists(self):
shutil.copytree('tests/data/app-6', self.build_dir)
setup_webdir_if_it_doesnt_exist(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'WEBDIR': 'public',
'LIBDIR': 'lib'
}))
self.assert_exists(self.build_dir, 'public')
self.assert_exists(self.build_dir, 'public', 'index.php')
self.assert_exists(self.build_dir, 'public', 'info.php')
self.assert_exists(self.build_dir, 'public',
'technical-difficulties1.jpg')
self.assert_exists(self.build_dir, '.bp-config')
self.assert_exists(self.build_dir, '.bp-config', 'options.json')
self.assert_exists(self.build_dir, '.bp-config', 'httpd', 'extra',
'httpd-remoteip.conf')
eq_(3, len(os.listdir(self.build_dir)))
eq_(3, len(os.listdir(os.path.join(self.build_dir, 'public'))))
def test_setup_if_htdocs_does_not_exist(self):
shutil.copytree('tests/data/app-2', self.build_dir)
setup_webdir_if_it_doesnt_exist(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'WEBDIR': 'htdocs',
'LIBDIR': 'lib'
}))
self.assert_exists(self.build_dir, 'htdocs')
self.assert_exists(self.build_dir, 'htdocs', 'index.php')
self.assert_exists(self.build_dir, 'htdocs', 'info.php')
self.assert_exists(self.build_dir, 'htdocs',
'technical-difficulties1.jpg')
self.assert_exists(self.build_dir, '.bp-config')
self.assert_exists(self.build_dir, '.bp-config', 'options.json')
self.assert_exists(self.build_dir, '.bp-config', 'httpd', 'extra',
'httpd-remoteip.conf')
eq_(2, len(os.listdir(self.build_dir)))
eq_(3, len(os.listdir(os.path.join(self.build_dir, 'htdocs'))))
def test_setup_if_htdocs_does_not_exist_but_library_does(self):
shutil.copytree('tests/data/app-7', self.build_dir)
setup_webdir_if_it_doesnt_exist(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'WEBDIR': 'htdocs',
'LIBDIR': 'lib'
}))
self.assert_exists(self.build_dir, 'htdocs')
self.assert_exists(self.build_dir, 'htdocs', 'index.php')
self.assert_exists(self.build_dir, 'htdocs', 'info.php')
self.assert_exists(self.build_dir, 'htdocs',
'technical-difficulties1.jpg')
self.assert_exists(self.build_dir, 'htdocs', 'library')
self.assert_exists(self.build_dir, 'htdocs', 'library', 'junk.php')
self.assert_exists(self.build_dir, 'lib')
self.assert_exists(self.build_dir, 'lib', 'test.php')
self.assert_exists(self.build_dir, '.bp-config')
self.assert_exists(self.build_dir, '.bp-config', 'options.json')
self.assert_exists(self.build_dir, '.bp-config', 'httpd', 'extra',
'httpd-remoteip.conf')
self.assert_exists(self.build_dir, 'manifest.yml')
eq_(4, len(os.listdir(self.build_dir)))
eq_(4, len(os.listdir(os.path.join(self.build_dir, 'htdocs'))))
def test_setup_if_custom_webdir_does_not_exist(self):
shutil.copytree('tests/data/app-2', self.build_dir)
setup_webdir_if_it_doesnt_exist(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'WEBDIR': 'public',
'LIBDIR': 'lib'
}))
self.assert_exists(self.build_dir, 'public')
self.assert_exists(self.build_dir, 'public', 'index.php')
self.assert_exists(self.build_dir, 'public', 'info.php')
self.assert_exists(self.build_dir, 'public',
'technical-difficulties1.jpg')
self.assert_exists(self.build_dir, '.bp-config')
self.assert_exists(self.build_dir, '.bp-config', 'options.json')
self.assert_exists(self.build_dir, '.bp-config', 'httpd', 'extra',
'httpd-remoteip.conf')
eq_(2, len(os.listdir(self.build_dir)))
eq_(3, len(os.listdir(os.path.join(self.build_dir, 'public'))))
def test_setup_if_htdocs_does_not_exist_with_extensions(self):
shutil.copytree('tests/data/app-4', self.build_dir)
setup_webdir_if_it_doesnt_exist(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'WEBDIR': 'htdocs',
'LIBDIR': 'lib'
}))
self.assert_exists(self.build_dir, 'htdocs')
self.assert_exists(self.build_dir, 'htdocs', 'index.php')
self.assert_exists(self.build_dir, 'htdocs', 'info.php')
self.assert_exists(self.build_dir, 'htdocs',
'technical-difficulties1.jpg')
self.assert_exists(self.build_dir, '.bp-config')
self.assert_exists(self.build_dir, '.bp-config', 'options.json')
self.assert_exists(self.build_dir, '.bp-config', 'httpd', 'extra',
'httpd-remoteip.conf')
self.assert_exists(self.build_dir, '.bp')
self.assert_exists(self.build_dir, '.bp', 'logs')
self.assert_exists(self.build_dir, '.bp', 'logs', 'some.log')
self.assert_exists(self.build_dir, '.extensions')
self.assert_exists(self.build_dir, '.extensions', 'some-ext')
self.assert_exists(self.build_dir, '.extensions', 'some-ext',
'extension.py')
eq_(4, len(os.listdir(self.build_dir)))
eq_(3, len(os.listdir(os.path.join(self.build_dir, 'htdocs'))))
def test_setup_if_custom_webdir_does_not_exist_with_extensions(self):
shutil.copytree('tests/data/app-4', self.build_dir)
setup_webdir_if_it_doesnt_exist(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'WEBDIR': 'public',
'LIBDIR': 'lib'
}))
self.assert_exists(self.build_dir, 'public')
self.assert_exists(self.build_dir, 'public', 'index.php')
self.assert_exists(self.build_dir, 'public', 'info.php')
self.assert_exists(self.build_dir, 'public',
'technical-difficulties1.jpg')
self.assert_exists(self.build_dir, '.bp-config')
self.assert_exists(self.build_dir, '.bp-config', 'options.json')
self.assert_exists(self.build_dir, '.bp-config', 'httpd', 'extra',
'httpd-remoteip.conf')
self.assert_exists(self.build_dir, '.bp')
self.assert_exists(self.build_dir, '.bp', 'logs')
self.assert_exists(self.build_dir, '.bp', 'logs', 'some.log')
self.assert_exists(self.build_dir, '.extensions')
self.assert_exists(self.build_dir, '.extensions', 'some-ext')
self.assert_exists(self.build_dir, '.extensions', 'some-ext',
'extension.py')
eq_(4, len(os.listdir(self.build_dir)))
eq_(3, len(os.listdir(os.path.join(self.build_dir, 'public'))))
def test_setup_if_htdocs_with_stand_alone_app(self):
shutil.copytree('tests/data/app-5', self.build_dir)
setup_webdir_if_it_doesnt_exist(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'WEB_SERVER': 'none'
}))
self.assert_exists(self.build_dir, 'app.php')
eq_(1, len(os.listdir(self.build_dir)))
def test_convert_php_extensions_54(self):
ctx = {
'PHP_VERSION': '5.4.x',
'PHP_EXTENSIONS': ['mod1', 'mod2', 'mod3'],
'ZEND_EXTENSIONS': ['zmod1', 'zmod2']
}
convert_php_extensions(ctx)
eq_('extension=mod1.so\nextension=mod2.so\nextension=mod3.so',
ctx['PHP_EXTENSIONS'])
eq_('zend_extension="@HOME/php/lib/php/extensions/'
'no-debug-non-zts-20100525/zmod1.so"\n'
'zend_extension="@HOME/php/lib/php/extensions/'
'no-debug-non-zts-20100525/zmod2.so"',
ctx['ZEND_EXTENSIONS'])
def test_convert_php_extensions_55(self):
ctx = {
'PHP_VERSION': '5.5.x',
'PHP_EXTENSIONS': ['mod1', 'mod2', 'mod3'],
'ZEND_EXTENSIONS': ['zmod1', 'zmod2']
}
convert_php_extensions(ctx)
eq_('extension=mod1.so\nextension=mod2.so\nextension=mod3.so',
ctx['PHP_EXTENSIONS'])
eq_('zend_extension="zmod1.so"\nzend_extension="zmod2.so"',
ctx['ZEND_EXTENSIONS'])
def test_convert_php_extensions_54_none(self):
ctx = {
'PHP_VERSION': '5.4.x',
'PHP_EXTENSIONS': [],
'ZEND_EXTENSIONS': []
}
convert_php_extensions(ctx)
eq_('', ctx['PHP_EXTENSIONS'])
eq_('', ctx['ZEND_EXTENSIONS'])
def test_convert_php_extensions_55_none(self):
ctx = {
'PHP_VERSION': '5.5.x',
'PHP_EXTENSIONS': [],
'ZEND_EXTENSIONS': []
}
convert_php_extensions(ctx)
eq_('', ctx['PHP_EXTENSIONS'])
eq_('', ctx['ZEND_EXTENSIONS'])
def test_convert_php_extensions_54_one(self):
ctx = {
'PHP_VERSION': '5.4.x',
'PHP_EXTENSIONS': ['mod1'],
'ZEND_EXTENSIONS': ['zmod1']
}
convert_php_extensions(ctx)
eq_('extension=mod1.so', ctx['PHP_EXTENSIONS'])
eq_('zend_extension="@HOME/php/lib/php/extensions/'
'no-debug-non-zts-20100525/zmod1.so"',
ctx['ZEND_EXTENSIONS'])
def test_convert_php_extensions_55_one(self):
ctx = {
'PHP_VERSION': '5.5.x',
'PHP_EXTENSIONS': ['mod1'],
'ZEND_EXTENSIONS': ['zmod1']
}
convert_php_extensions(ctx)
eq_('extension=mod1.so', ctx['PHP_EXTENSIONS'])
eq_('zend_extension="zmod1.so"',
ctx['ZEND_EXTENSIONS'])
def test_is_web_app(self):
ctx = {}
eq_(True, is_web_app(ctx))
ctx['WEB_SERVER'] = 'nginx'
eq_(True, is_web_app(ctx))
ctx['WEB_SERVER'] = 'httpd'
eq_(True, is_web_app(ctx))
ctx['WEB_SERVER'] = 'none'
eq_(False, is_web_app(ctx))
def test_find_stand_alone_app_to_run_app_start_cmd(self):
ctx = {'APP_START_CMD': "echo 'Hello World!'"}
eq_("echo 'Hello World!'", find_stand_alone_app_to_run(ctx))
results = ('app.php', 'main.php', 'run.php', 'start.php', 'app.php')
for i, res in enumerate(results):
ctx = {'BUILD_DIR': 'tests/data/standalone/test%d' % (i + 1)}
eq_(res, find_stand_alone_app_to_run(ctx))
def test_load_manifest(self):
ctx = {'BP_DIR': '.'}
manifest = load_manifest(ctx)
assert manifest is not None
assert 'dependencies' in manifest.keys()
assert 'language' in manifest.keys()
assert 'url_to_dependency_map' in manifest.keys()
assert 'exclude_files' in manifest.keys()
def test_find_all_php_versions(self):
ctx = {'BP_DIR': '.'}
manifest = load_manifest(ctx)
dependencies = manifest['dependencies']
versions = find_all_php_versions(dependencies)
eq_(2, len([v for v in versions if v.startswith('5.4.')]))
eq_(2, len([v for v in versions if v.startswith('5.5.')]))
eq_(2, len([v for v in versions if v.startswith('5.6.')]))
def test_validate_php_version(self):
ctx = {
'ALL_PHP_VERSIONS': ['5.4.31', '5.4.30'],
'PHP_54_LATEST': '5.4.31',
'PHP_VERSION': '5.4.30'
}
validate_php_version(ctx)
eq_('5.4.30', ctx['PHP_VERSION'])
ctx['PHP_VERSION'] = '5.4.29'
validate_php_version(ctx)
eq_('5.4.31', ctx['PHP_VERSION'])
ctx['PHP_VERSION'] = '5.4.30'
validate_php_version(ctx)
eq_('5.4.30', ctx['PHP_VERSION'])
|
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Copyright (C) 2018 Caphm (original implementation module)
Navigation for classic plugin directory listing mode
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
from __future__ import absolute_import, division, unicode_literals
import xbmcplugin
import resources.lib.common as common
import resources.lib.kodi.library_utils as lib_utils
import resources.lib.kodi.ui as ui
from resources.lib.database.db_utils import TABLE_MENU_DATA
from resources.lib.globals import G
from resources.lib.navigation.directory_utils import (finalize_directory, convert_list_to_dir_items, custom_viewmode,
end_of_directory, get_title, activate_profile, auto_scroll)
from resources.lib.utils.logging import LOG, measure_exec_time_decorator
# What means dynamic menus (and dynamic id):
# Are considered dynamic menus all menus which context name do not exists in the 'loco_contexts' of
# MAIN_MENU_ITEMS items in globals.py.
# These menus are generated on the fly (they are not hardcoded) and their data references are saved in TABLE_MENU_DATA
# as menu item (with same structure of MAIN_MENU_ITEMS items in globals.py)
# The same TABLE_MENU_DATA table is used to temporary store the title of menus of the main menu which can change
# dynamically according to the language set by the profile, and it is the most practical way to get the title
# when opening a menu
# The 'pathitems':
# It should match the 'path' key in MAIN_MENU_ITEMS of globals.py (or when not listed the dynamic menu item)
# the indexes are: 0 the function name of this 'Directory' class, 1 the menu id, 2 an optional id
class Directory(object):
"""Directory listings"""
def __init__(self, params):
LOG.debug('Initializing "Directory" with params: {}', params)
self.params = params
# After build url the param value is converted as string
self.perpetual_range_start = (None if self.params.get('perpetual_range_start') == 'None'
else self.params.get('perpetual_range_start'))
if 'dir_update_listing' in self.params:
self.dir_update_listing = self.params['dir_update_listing'] == 'True'
else:
self.dir_update_listing = bool(self.perpetual_range_start)
if self.perpetual_range_start == '0':
# For cache identifier purpose
self.perpetual_range_start = None
def root(self, pathitems=None): # pylint: disable=unused-argument
"""Show profiles or home listing when profile auto-selection is enabled"""
# Fetch initial page to refresh all session data
current_directory = common.WndHomeProps[common.WndHomeProps.CURRENT_DIRECTORY]
if not current_directory:
# Note when the profiles are updated to the database (by fetch_initial_page call),
# the update sanitize also relative settings to profiles (see _delete_non_existing_profiles in website.py)
common.make_call('fetch_initial_page')
# When the add-on is used in a browser window, we do not have to execute the auto profile selection
if not G.IS_ADDON_EXTERNAL_CALL:
autoselect_profile_guid = G.LOCAL_DB.get_value('autoselect_profile_guid', '')
if autoselect_profile_guid and not common.WndHomeProps[common.WndHomeProps.IS_CONTAINER_REFRESHED]:
if not current_directory:
LOG.info('Performing auto-selection of profile {}', autoselect_profile_guid)
self.params['switch_profile_guid'] = autoselect_profile_guid
self.home(None)
return
list_data, extra_data = common.make_call('get_profiles', {'request_update': False})
self._profiles(list_data, extra_data)
def profiles(self, pathitems=None): # pylint: disable=unused-argument
"""Show profiles listing"""
LOG.debug('Showing profiles listing')
list_data, extra_data = common.make_call('get_profiles', {'request_update': True})
self._profiles(list_data, extra_data)
@custom_viewmode(G.VIEW_PROFILES)
def _profiles(self, list_data, extra_data): # pylint: disable=unused-argument
# The standard kodi theme does not allow to change view type if the content is "files" type,
# so here we use "images" type, visually better to see
finalize_directory(convert_list_to_dir_items(list_data), G.CONTENT_IMAGES)
end_of_directory(True)
@measure_exec_time_decorator()
@custom_viewmode(G.VIEW_MAINMENU)
def home(self, pathitems=None): # pylint: disable=unused-argument
"""Show home listing"""
if 'switch_profile_guid' in self.params:
if G.IS_ADDON_EXTERNAL_CALL:
# Profile switch/ask PIN only once
ret = not self.params['switch_profile_guid'] == G.LOCAL_DB.get_active_profile_guid()
else:
# Profile switch/ask PIN every time you come from ...
ret = common.WndHomeProps[common.WndHomeProps.CURRENT_DIRECTORY] in ['', 'root', 'profiles']
if ret and not activate_profile(self.params['switch_profile_guid']):
xbmcplugin.endOfDirectory(G.PLUGIN_HANDLE, succeeded=False)
return
LOG.debug('Showing home listing')
list_data, extra_data = common.make_call('get_mainmenu') # pylint: disable=unused-variable
finalize_directory(convert_list_to_dir_items(list_data), G.CONTENT_FOLDER,
title=(G.LOCAL_DB.get_profile_config('profileName', '???') +
' - ' + common.get_local_string(30097)))
end_of_directory(True)
@measure_exec_time_decorator()
@common.inject_video_id(path_offset=0, inject_full_pathitems=True)
def show(self, videoid, pathitems):
if videoid.mediatype == common.VideoId.SEASON:
self._episodes(videoid, pathitems)
else:
self._seasons(videoid, pathitems)
def _seasons(self, videoid, pathitems):
"""Show the seasons list of a tv show"""
call_args = {
'pathitems': pathitems,
'tvshowid_dict': videoid.to_dict(),
'perpetual_range_start': self.perpetual_range_start,
}
list_data, extra_data = common.make_call('get_seasons', call_args)
if len(list_data) == 1:
# Check if Kodi setting "Flatten TV show seasons" is enabled
value = common.json_rpc('Settings.GetSettingValue',
{'setting': 'videolibrary.flattentvshows'}).get('value')
if value != 0: # Values: 0=never, 1=if one season, 2=always
# If there is only one season, load and show the episodes now
pathitems = list_data[0]['url'].replace(G.BASE_URL, '').strip('/').split('/')[1:]
videoid = common.VideoId.from_path(pathitems)
self._episodes(videoid, pathitems)
return
self._seasons_directory(list_data, extra_data)
@custom_viewmode(G.VIEW_SEASON)
def _seasons_directory(self, list_data, extra_data):
finalize_directory(convert_list_to_dir_items(list_data), G.CONTENT_SEASON, 'sort_only_label',
title=extra_data.get('title', ''))
end_of_directory(self.dir_update_listing)
@custom_viewmode(G.VIEW_EPISODE)
def _episodes(self, videoid, pathitems):
"""Show the episodes list of a season"""
call_args = {
'pathitems': pathitems,
'seasonid_dict': videoid.to_dict(),
'perpetual_range_start': self.perpetual_range_start,
}
list_data, extra_data = common.make_call('get_episodes', call_args)
finalize_directory(convert_list_to_dir_items(list_data), G.CONTENT_EPISODE, 'sort_episodes',
title=extra_data.get('title', ''))
end_of_directory(self.dir_update_listing)
auto_scroll(list_data)
@measure_exec_time_decorator()
@custom_viewmode(G.VIEW_SHOW)
def video_list(self, pathitems):
"""Show a video list of a list ID"""
menu_data = G.MAIN_MENU_ITEMS.get(pathitems[1])
if not menu_data: # Dynamic menus
menu_data = G.LOCAL_DB.get_value(pathitems[1], table=TABLE_MENU_DATA, data_type=dict)
call_args = {
'list_id': pathitems[2],
'menu_data': menu_data,
'is_dynamic_id': not G.is_known_menu_context(pathitems[2])
}
list_data, extra_data = common.make_call('get_video_list', call_args)
finalize_directory(convert_list_to_dir_items(list_data), menu_data.get('content_type', G.CONTENT_SHOW),
title=get_title(menu_data, extra_data))
end_of_directory(False)
return menu_data.get('view')
@measure_exec_time_decorator()
@custom_viewmode(G.VIEW_SHOW)
def video_list_sorted(self, pathitems):
"""Show a video list sorted of a 'context' name"""
menu_data = G.MAIN_MENU_ITEMS.get(pathitems[1])
if not menu_data: # Dynamic menus
menu_data = G.LOCAL_DB.get_value(pathitems[1], table=TABLE_MENU_DATA, data_type=dict)
call_args = {
'pathitems': pathitems,
'menu_data': menu_data,
'sub_genre_id': self.params.get('sub_genre_id'), # Used to show the sub-genre folder when sub-genres exists
'perpetual_range_start': self.perpetual_range_start,
'is_dynamic_id': not G.is_known_menu_context(pathitems[2])
}
list_data, extra_data = common.make_call('get_video_list_sorted', call_args)
sort_type = 'sort_nothing'
if menu_data['path'][1] == 'myList' and int(G.ADDON.getSettingInt('menu_sortorder_mylist')) == 0:
# At the moment it is not possible to make a query with results sorted for the 'mylist',
# so we adding the sort order of kodi
sort_type = 'sort_label_ignore_folders'
finalize_directory(convert_list_to_dir_items(list_data), menu_data.get('content_type', G.CONTENT_SHOW),
title=get_title(menu_data, extra_data), sort_type=sort_type)
end_of_directory(self.dir_update_listing)
return menu_data.get('view')
@measure_exec_time_decorator()
@custom_viewmode(G.VIEW_FOLDER)
def recommendations(self, pathitems):
"""Show video lists for a genre"""
menu_data = G.MAIN_MENU_ITEMS.get(pathitems[1])
call_args = {
'menu_data': menu_data,
'genre_id': None,
'force_use_videolist_id': True,
}
list_data, extra_data = common.make_call('get_genres', call_args)
finalize_directory(convert_list_to_dir_items(list_data), G.CONTENT_FOLDER,
title=get_title(menu_data, extra_data), sort_type='sort_label')
end_of_directory(False)
return menu_data.get('view')
@measure_exec_time_decorator()
@custom_viewmode(G.VIEW_SHOW)
def supplemental(self, pathitems): # pylint: disable=unused-argument
"""Show supplemental video list (eg. trailers) of a tv show / movie"""
menu_data = {'path': ['is_context_menu_item', 'is_context_menu_item'], # Menu item do not exists
'title': common.get_local_string(30179)}
from json import loads
call_args = {
'menu_data': menu_data,
'video_id_dict': loads(self.params['video_id_dict']),
'supplemental_type': self.params['supplemental_type']
}
list_data, extra_data = common.make_call('get_video_list_supplemental', call_args)
finalize_directory(convert_list_to_dir_items(list_data), menu_data.get('content_type', G.CONTENT_SHOW),
title=get_title(menu_data, extra_data))
end_of_directory(self.dir_update_listing)
return menu_data.get('view')
@measure_exec_time_decorator()
@custom_viewmode(G.VIEW_FOLDER)
def genres(self, pathitems):
"""Show loco list of a genre or from loco root the list of contexts specified in the menu data"""
menu_data = G.MAIN_MENU_ITEMS.get(pathitems[1])
if not menu_data: # Dynamic menus
menu_data = G.LOCAL_DB.get_value(pathitems[1], table=TABLE_MENU_DATA, data_type=dict)
call_args = {
'menu_data': menu_data,
# When genre_id is None is loaded the loco root the list of contexts specified in the menu data
'genre_id': None if len(pathitems) < 3 else int(pathitems[2]),
'force_use_videolist_id': False,
}
list_data, extra_data = common.make_call('get_genres', call_args)
finalize_directory(convert_list_to_dir_items(list_data), G.CONTENT_FOLDER,
title=get_title(menu_data, extra_data), sort_type='sort_label')
end_of_directory(False)
return menu_data.get('view')
@custom_viewmode(G.VIEW_FOLDER)
def subgenres(self, pathitems):
"""Show a lists of sub-genres of a 'genre id'"""
menu_data = G.MAIN_MENU_ITEMS[pathitems[1]]
call_args = {
'menu_data': menu_data,
'genre_id': pathitems[2]
}
list_data, extra_data = common.make_call('get_subgenres', call_args)
finalize_directory(convert_list_to_dir_items(list_data), menu_data.get('content_type', G.CONTENT_SHOW),
title=get_title(menu_data, extra_data),
sort_type='sort_label')
end_of_directory(False)
return menu_data.get('view')
def search(self, pathitems):
from resources.lib.navigation.directory_search import route_search_nav
route_search_nav(pathitems, self.perpetual_range_start, self.dir_update_listing, self.params)
@measure_exec_time_decorator()
def exported(self, pathitems=None):
"""List all items that are exported to the Kodi library"""
chunked_video_list, perpetual_range_selector = lib_utils.list_contents(self.perpetual_range_start)
if chunked_video_list:
self._exported_directory(pathitems, chunked_video_list, perpetual_range_selector)
else:
ui.show_notification(common.get_local_string(30111))
xbmcplugin.endOfDirectory(G.PLUGIN_HANDLE, succeeded=False)
@custom_viewmode(G.VIEW_SHOW)
def _exported_directory(self, pathitems, chunked_video_list, perpetual_range_selector):
menu_data = G.MAIN_MENU_ITEMS['exported']
call_args = {
'pathitems': pathitems,
'menu_data': menu_data,
'chunked_video_list': chunked_video_list,
'perpetual_range_selector': perpetual_range_selector
}
list_data, extra_data = common.make_call('get_video_list_chunked', call_args)
finalize_directory(convert_list_to_dir_items(list_data), menu_data.get('content_type', G.CONTENT_SHOW),
title=get_title(menu_data, extra_data))
end_of_directory(self.dir_update_listing)
return menu_data.get('view')
|
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 18332 if testnet else 8332
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
# coding: utf-8
import itertools
import logging
import unittest
import urllib
import environment
import keyspace_util
import utils
from protocols_flavor import protocols_flavor
from vtdb import dbexceptions
from vtdb import vtgate_cursor
from vtdb import vtgate_client
shard_0_master = None
shard_1_master = None
lookup_master = None
keyspace_env = None
create_vt_user = '''create table vt_user (
id bigint,
name varchar(64),
primary key (id)
) Engine=InnoDB'''
create_vt_user2 = '''create table vt_user2 (
id bigint,
name varchar(64),
primary key (id)
) Engine=InnoDB'''
create_vt_user_extra = '''create table vt_user_extra (
user_id bigint,
email varchar(64),
primary key (user_id)
) Engine=InnoDB'''
create_vt_music = '''create table vt_music (
user_id bigint,
id bigint,
song varchar(64),
primary key (user_id, id)
) Engine=InnoDB'''
create_vt_music_extra = '''create table vt_music_extra (
music_id bigint,
user_id bigint,
artist varchar(64),
primary key (music_id)
) Engine=InnoDB'''
create_join_user = '''create table join_user (
id bigint,
name varchar(64),
primary key (id)
) Engine=InnoDB'''
create_join_user_extra = '''create table join_user_extra (
user_id bigint,
email varchar(64),
primary key (user_id)
) Engine=InnoDB'''
create_join_name_info = '''create table join_name_info (
name varchar(128),
info varchar(128),
primary key (name)
) Engine=InnoDB'''
create_vt_user_seq = '''create table vt_user_seq (
id int,
next_id bigint,
cache bigint,
increment bigint,
primary key(id)
) comment 'vitess_sequence' Engine=InnoDB'''
init_vt_user_seq = 'insert into vt_user_seq values(0, 1, 2, 1)'
create_vt_music_seq = '''create table vt_music_seq (
id int,
next_id bigint,
cache bigint,
increment bigint,
primary key(id)
) comment 'vitess_sequence' Engine=InnoDB'''
init_vt_music_seq = 'insert into vt_music_seq values(0, 1, 2, 1)'
create_name_user2_map = '''create table name_user2_map (
name varchar(64),
user2_id bigint,
primary key (name, user2_id)
) Engine=InnoDB'''
create_music_user_map = '''create table music_user_map (
music_id bigint,
user_id bigint,
primary key (music_id)
) Engine=InnoDB'''
vschema = {
'user': '''{
"sharded": true,
"vindexes": {
"user_index": {
"type": "hash"
},
"unicode_hash": {
"type": "unicode_loose_md5"
},
"name_user2_map": {
"type": "lookup_hash",
"params": {
"table": "name_user2_map",
"from": "name",
"to": "user2_id"
},
"owner": "vt_user2"
},
"music_user_map": {
"type": "lookup_hash_unique",
"params": {
"table": "music_user_map",
"from": "music_id",
"to": "user_id"
},
"owner": "vt_music"
}
},
"tables": {
"vt_user": {
"column_vindexes": [
{
"column": "id",
"name": "user_index"
}
],
"auto_increment": {
"column": "id",
"sequence": "vt_user_seq"
}
},
"vt_user2": {
"column_vindexes": [
{
"column": "id",
"name": "user_index"
},
{
"column": "name",
"name": "name_user2_map"
}
]
},
"vt_user_extra": {
"column_vindexes": [
{
"column": "user_id",
"name": "user_index"
}
]
},
"vt_music": {
"column_vindexes": [
{
"column": "user_id",
"name": "user_index"
},
{
"column": "id",
"name": "music_user_map"
}
],
"auto_increment": {
"column": "id",
"sequence": "vt_music_seq"
}
},
"vt_music_extra": {
"column_vindexes": [
{
"column": "music_id",
"name": "music_user_map"
},
{
"column": "user_id",
"name": "user_index"
}
]
},
"join_user": {
"column_vindexes": [
{
"column": "id",
"name": "user_index"
}
]
},
"join_user_extra": {
"column_vindexes": [
{
"column": "user_id",
"name": "user_index"
}
]
},
"join_name_info": {
"column_vindexes": [
{
"column": "name",
"name": "unicode_hash"
}
]
}
}
}''',
'lookup': '''{
"sharded": false,
"tables": {
"vt_user_seq": {
"type": "sequence"
},
"vt_music_seq": {
"type": "sequence"
},
"music_user_map": {},
"name_user2_map": {}
}
}''',
}
def setUpModule():
global keyspace_env
global shard_0_master
global shard_1_master
global lookup_master
logging.debug('in setUpModule')
try:
environment.topo_server().setup()
logging.debug('Setting up tablets')
keyspace_env = keyspace_util.TestEnv()
keyspace_env.launch(
'user',
shards=['-80', '80-'],
ddls=[
create_vt_user,
create_vt_user2,
create_vt_user_extra,
create_vt_music,
create_vt_music_extra,
create_join_user,
create_join_user_extra,
create_join_name_info,
],
rdonly_count=1, # to test SplitQuery
)
keyspace_env.launch(
'lookup',
ddls=[
create_vt_user_seq,
create_vt_music_seq,
create_music_user_map,
create_name_user2_map,
],
)
shard_0_master = keyspace_env.tablet_map['user.-80.master']
shard_1_master = keyspace_env.tablet_map['user.80-.master']
lookup_master = keyspace_env.tablet_map['lookup.0.master']
utils.apply_vschema(vschema)
utils.VtGate().start(
tablets=[shard_0_master, shard_1_master, lookup_master])
utils.vtgate.wait_for_endpoints('user.-80.master', 1)
utils.vtgate.wait_for_endpoints('user.80-.master', 1)
utils.vtgate.wait_for_endpoints('lookup.0.master', 1)
except:
tearDownModule()
raise
def tearDownModule():
logging.debug('in tearDownModule')
utils.required_teardown()
if utils.options.skip_teardown:
return
logging.debug('Tearing down the servers and setup')
if keyspace_env:
keyspace_env.teardown()
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
def get_connection(timeout=10.0):
protocol, endpoint = utils.vtgate.rpc_endpoint(python=True)
try:
return vtgate_client.connect(protocol, endpoint, timeout)
except Exception:
logging.exception('Connection to vtgate (timeout=%s) failed.', timeout)
raise
class TestVTGateFunctions(unittest.TestCase):
def setUp(self):
self.master_tablet = shard_1_master
if protocols_flavor().vtgate_python_types() == 'proto3':
self.int_type = 265
self.string_type = 6165
else:
self.int_type = 8L
self.string_type = 253L
def execute_on_master(self, vtgate_conn, sql, bind_vars):
return vtgate_conn._execute(
sql, bind_vars, tablet_type='master', keyspace_name=None)
def test_health(self):
f = urllib.urlopen('http://localhost:%d/debug/health' % utils.vtgate.port)
response = f.read()
f.close()
self.assertEqual(response, 'ok')
def test_srv_vschema(self):
"""Makes sure the SrvVSchema object is properly built."""
v = utils.run_vtctl_json(['GetSrvVSchema', 'test_nj'])
self.assertEqual(len(v['keyspaces']), 2, 'wrong vschema: %s' % str(v))
self.assertIn('user', v['keyspaces'])
self.assertIn('lookup', v['keyspaces'])
def test_user(self):
count = 4
vtgate_conn = get_connection()
cursor = vtgate_conn.cursor(
tablet_type='master', keyspace=None, writable=True)
# Initialize the sequence.
# TODO(sougou): Use DDL when ready.
cursor.begin()
cursor.execute(init_vt_user_seq, {})
cursor.commit()
# Test insert
for x in xrange(count):
i = x+1
cursor.begin()
cursor.execute(
'insert into vt_user (name) values (:name)',
{'name': 'test %s' % i})
self.assertEqual(
(cursor.fetchall(), cursor.rowcount, cursor.lastrowid,
cursor.description),
([], 1L, i, []))
cursor.commit()
# Test select equal
for x in xrange(count):
i = x+1
cursor.execute('select id, name from vt_user where id = :id', {'id': i})
self.assertEqual(
(cursor.fetchall(), cursor.rowcount, cursor.lastrowid,
cursor.description),
([(i, 'test %s' % i)], 1L, 0,
[('id', self.int_type), ('name', self.string_type)]))
# Test case sensitivity
cursor.execute('select Id, Name from vt_user where iD = :id', {'id': 1})
self.assertEqual(
(cursor.fetchall(), cursor.rowcount, cursor.lastrowid,
cursor.description),
([(1, 'test 1')], 1L, 0,
[('Id', self.int_type), ('Name', self.string_type)]))
# Test insert with no auto-inc
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into vt_user (id, name) values (:id, :name)',
{'id': 6, 'name': 'test 6'})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
# Verify values in db
result = shard_0_master.mquery('vt_user', 'select id, name from vt_user')
self.assertEqual(result, ((1L, 'test 1'), (2L, 'test 2'), (3L, 'test 3')))
result = shard_1_master.mquery('vt_user', 'select id, name from vt_user')
self.assertEqual(result, ((4L, 'test 4'), (6L, 'test 6')))
# Test IN clause
result = self.execute_on_master(
vtgate_conn,
'select id, name from vt_user where id in (:a, :b)', {'a': 1, 'b': 4})
result[0].sort()
self.assertEqual(
result,
([(1L, 'test 1'), (4L, 'test 4')], 2L, 0,
[('id', self.int_type), ('name', self.string_type)]))
result = self.execute_on_master(
vtgate_conn,
'select id, name from vt_user where id in (:a, :b)', {'a': 1, 'b': 2})
result[0].sort()
self.assertEqual(
result,
([(1L, 'test 1'), (2L, 'test 2')], 2L, 0,
[('id', self.int_type), ('name', self.string_type)]))
# Test scatter
result = vtgate_conn._execute(
'select id, name from vt_user',
{}, tablet_type='master', keyspace_name=None)
result[0].sort()
self.assertEqual(
result,
([(1L, 'test 1'), (2L, 'test 2'), (3L, 'test 3'), (4L, 'test 4'),
(6L, 'test 6')], 5L, 0,
[('id', self.int_type), ('name', self.string_type)]))
# Test stream over scatter
stream_cursor_1 = vtgate_conn.cursor(
tablet_type='master', keyspace=None,
cursorclass=vtgate_cursor.StreamVTGateCursor)
stream_cursor_1.execute('select id, name from vt_user', {})
stream_cursor_2 = vtgate_conn.cursor(
tablet_type='master', keyspace=None,
cursorclass=vtgate_cursor.StreamVTGateCursor)
stream_cursor_2.execute('select id, name from vt_user', {})
self.assertEqual(stream_cursor_1.description,
[('id', self.int_type), ('name', self.string_type)])
self.assertEqual(stream_cursor_2.description,
[('id', self.int_type), ('name', self.string_type)])
rows_1 = []
rows_2 = []
for row_1, row_2 in itertools.izip(stream_cursor_1, stream_cursor_2):
rows_1.append(row_1)
rows_2.append(row_2)
self.assertEqual(
sorted(rows_1),
[(1L, 'test 1'), (2L, 'test 2'), (3L, 'test 3'), (4L, 'test 4'),
(6L, 'test 6')])
self.assertEqual(
sorted(rows_2),
[(1L, 'test 1'), (2L, 'test 2'), (3L, 'test 3'), (4L, 'test 4'),
(6L, 'test 6')])
# Test updates
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'update vt_user set name = :name where id = :id',
{'id': 1, 'name': 'test one'})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'update vt_user set name = :name where id = :id',
{'id': 4, 'name': 'test four'})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery('vt_user', 'select id, name from vt_user')
self.assertEqual(
result, ((1L, 'test one'), (2L, 'test 2'), (3L, 'test 3')))
result = shard_1_master.mquery('vt_user', 'select id, name from vt_user')
self.assertEqual(
result, ((4L, 'test four'), (6L, 'test 6')))
# Test deletes
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'delete from vt_user where id = :id',
{'id': 1})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'delete from vt_user where id = :id',
{'id': 4})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery('vt_user', 'select id, name from vt_user')
self.assertEqual(result, ((2L, 'test 2'), (3L, 'test 3')))
result = shard_1_master.mquery('vt_user', 'select id, name from vt_user')
self.assertEqual(result, ((6L, 'test 6'),))
# test passing in the keyspace in the cursor
lcursor = vtgate_conn.cursor(
tablet_type='master', keyspace='lookup', writable=True)
with self.assertRaisesRegexp(
dbexceptions.DatabaseError, '.*table vt_user not found in schema.*'):
lcursor.execute('select id, name from vt_user', {})
def test_user2(self):
# user2 is for testing non-unique vindexes
vtgate_conn = get_connection()
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into vt_user2 (id, name) values (:id, :name)',
{'id': 1, 'name': 'name1'})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'insert into vt_user2 (id, name) values (:id, :name)',
{'id': 7, 'name': 'name1'})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'insert into vt_user2 (id, name) values (:id, :name)',
{'id': 2, 'name': 'name2'})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery('vt_user', 'select id, name from vt_user2')
self.assertEqual(result, ((1L, 'name1'), (2L, 'name2')))
result = shard_1_master.mquery('vt_user', 'select id, name from vt_user2')
self.assertEqual(result, ((7L, 'name1'),))
result = lookup_master.mquery(
'vt_lookup', 'select name, user2_id from name_user2_map')
self.assertEqual(result, (('name1', 1L), ('name1', 7L), ('name2', 2L)))
# Test select by id
result = self.execute_on_master(
vtgate_conn,
'select id, name from vt_user2 where id = :id', {'id': 1})
self.assertEqual(
result, ([(1, 'name1')], 1L, 0,
[('id', self.int_type), ('name', self.string_type)]))
# Test select by lookup
result = self.execute_on_master(
vtgate_conn,
'select id, name from vt_user2 where name = :name', {'name': 'name1'})
result[0].sort()
self.assertEqual(
result,
([(1, 'name1'), (7, 'name1')], 2L, 0,
[('id', self.int_type), ('name', self.string_type)]))
# Test IN clause using non-unique vindex
result = self.execute_on_master(
vtgate_conn,
"select id, name from vt_user2 where name in ('name1', 'name2')", {})
result[0].sort()
self.assertEqual(
result,
([(1, 'name1'), (2, 'name2'), (7, 'name1')], 3L, 0,
[('id', self.int_type), ('name', self.string_type)]))
result = self.execute_on_master(
vtgate_conn,
"select id, name from vt_user2 where name in ('name1')", {})
result[0].sort()
self.assertEqual(
result,
([(1, 'name1'), (7, 'name1')], 2L, 0,
[('id', self.int_type), ('name', self.string_type)]))
# Test delete
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'delete from vt_user2 where id = :id',
{'id': 1})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'delete from vt_user2 where id = :id',
{'id': 2})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery('vt_user', 'select id, name from vt_user2')
self.assertEqual(result, ())
result = shard_1_master.mquery('vt_user', 'select id, name from vt_user2')
self.assertEqual(result, ((7L, 'name1'),))
result = lookup_master.mquery(
'vt_lookup', 'select name, user2_id from name_user2_map')
self.assertEqual(result, (('name1', 7L),))
vtgate_conn.begin()
self.execute_on_master(
vtgate_conn,
'delete from vt_user2 where id = :id',
{'id': 7})
vtgate_conn.commit()
def test_user_extra(self):
# user_extra is for testing unowned functional vindex
count = 4
vtgate_conn = get_connection()
for x in xrange(count):
i = x+1
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into vt_user_extra (user_id, email) '
'values (:user_id, :email)',
{'user_id': i, 'email': 'test %s' % i})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
for x in xrange(count):
i = x+1
result = self.execute_on_master(
vtgate_conn,
'select user_id, email from vt_user_extra where user_id = :user_id',
{'user_id': i})
self.assertEqual(
result,
([(i, 'test %s' % i)], 1L, 0,
[('user_id', self.int_type), ('email', self.string_type)]))
result = shard_0_master.mquery(
'vt_user', 'select user_id, email from vt_user_extra')
self.assertEqual(result, ((1L, 'test 1'), (2L, 'test 2'), (3L, 'test 3')))
result = shard_1_master.mquery(
'vt_user', 'select user_id, email from vt_user_extra')
self.assertEqual(result, ((4L, 'test 4'),))
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'update vt_user_extra set email = :email where user_id = :user_id',
{'user_id': 1, 'email': 'test one'})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'update vt_user_extra set email = :email where user_id = :user_id',
{'user_id': 4, 'email': 'test four'})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery(
'vt_user', 'select user_id, email from vt_user_extra')
self.assertEqual(result, ((1L, 'test one'), (2L, 'test 2'), (3L, 'test 3')))
result = shard_1_master.mquery(
'vt_user', 'select user_id, email from vt_user_extra')
self.assertEqual(result, ((4L, 'test four'),))
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'delete from vt_user_extra where user_id = :user_id',
{'user_id': 1})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'delete from vt_user_extra where user_id = :user_id',
{'user_id': 4})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery(
'vt_user', 'select user_id, email from vt_user_extra')
self.assertEqual(result, ((2L, 'test 2'), (3L, 'test 3')))
result = shard_1_master.mquery(
'vt_user', 'select user_id, email from vt_user_extra')
self.assertEqual(result, ())
vtgate_conn.begin()
self.execute_on_master(
vtgate_conn,
'delete from vt_user_extra where user_id = :user_id',
{'user_id': 2})
self.execute_on_master(
vtgate_conn,
'delete from vt_user_extra where user_id = :user_id',
{'user_id': 3})
vtgate_conn.commit()
def test_music(self):
# music is for testing owned lookup index
vtgate_conn = get_connection()
# Initialize the sequence.
# TODO(sougou): Use DDL when ready.
vtgate_conn.begin()
self.execute_on_master(vtgate_conn, init_vt_music_seq, {})
vtgate_conn.commit()
count = 4
for x in xrange(count):
i = x+1
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into vt_music (user_id, song) values (:user_id, :song)',
{'user_id': i, 'song': 'test %s' % i})
self.assertEqual(result, ([], 1L, i, []))
vtgate_conn.commit()
for x in xrange(count):
i = x+1
result = self.execute_on_master(
vtgate_conn,
'select user_id, id, song from vt_music where id = :id', {'id': i})
self.assertEqual(
result,
([(i, i, 'test %s' % i)], 1, 0,
[('user_id', self.int_type),
('id', self.int_type),
('song', self.string_type)]))
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into vt_music (user_id, id, song) '
'values (:user_id, :id, :song)',
{'user_id': 5, 'id': 6, 'song': 'test 6'})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery(
'vt_user', 'select user_id, id, song from vt_music')
self.assertEqual(
result,
((1L, 1L, 'test 1'), (2L, 2L, 'test 2'), (3L, 3L, 'test 3'),
(5L, 6L, 'test 6')))
result = shard_1_master.mquery(
'vt_user', 'select user_id, id, song from vt_music')
self.assertEqual(
result, ((4L, 4L, 'test 4'),))
result = lookup_master.mquery(
'vt_lookup', 'select music_id, user_id from music_user_map')
self.assertEqual(
result,
((1L, 1L), (2L, 2L), (3L, 3L), (4L, 4L), (6L, 5L)))
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'update vt_music set song = :song where id = :id',
{'id': 6, 'song': 'test six'})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'update vt_music set song = :song where id = :id',
{'id': 4, 'song': 'test four'})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery(
'vt_user', 'select user_id, id, song from vt_music')
self.assertEqual(
result, ((1L, 1L, 'test 1'), (2L, 2L, 'test 2'), (3L, 3L, 'test 3'),
(5L, 6L, 'test six')))
result = shard_1_master.mquery(
'vt_user', 'select user_id, id, song from vt_music')
self.assertEqual(
result, ((4L, 4L, 'test four'),))
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'delete from vt_music where id = :id',
{'id': 3})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'delete from vt_music where user_id = :user_id',
{'user_id': 4})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery(
'vt_user', 'select user_id, id, song from vt_music')
self.assertEqual(
result, ((1L, 1L, 'test 1'), (2L, 2L, 'test 2'), (5L, 6L, 'test six')))
result = shard_1_master.mquery(
'vt_user', 'select user_id, id, song from vt_music')
self.assertEqual(result, ())
result = lookup_master.mquery(
'vt_lookup', 'select music_id, user_id from music_user_map')
self.assertEqual(result, ((1L, 1L), (2L, 2L), (6L, 5L)))
def test_music_extra(self):
# music_extra is for testing unonwed lookup index
vtgate_conn = get_connection()
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into vt_music_extra (music_id, user_id, artist) '
'values (:music_id, :user_id, :artist)',
{'music_id': 1, 'user_id': 1, 'artist': 'test 1'})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'insert into vt_music_extra (music_id, artist) '
'values (:music_id, :artist)',
{'music_id': 6, 'artist': 'test 6'})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = self.execute_on_master(
vtgate_conn,
'select music_id, user_id, artist '
'from vt_music_extra where music_id = :music_id',
{'music_id': 6})
self.assertEqual(
result, ([(6L, 5L, 'test 6')], 1, 0,
[('music_id', self.int_type),
('user_id', self.int_type),
('artist', self.string_type)]))
result = shard_0_master.mquery(
'vt_user', 'select music_id, user_id, artist from vt_music_extra')
self.assertEqual(result, ((1L, 1L, 'test 1'), (6L, 5L, 'test 6')))
result = shard_1_master.mquery(
'vt_user', 'select music_id, user_id, artist from vt_music_extra')
self.assertEqual(result, ())
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'update vt_music_extra set artist = :artist '
'where music_id = :music_id',
{'music_id': 6, 'artist': 'test six'})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'update vt_music_extra set artist = :artist '
'where music_id = :music_id',
{'music_id': 7, 'artist': 'test seven'})
self.assertEqual(result, ([], 0L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery(
'vt_user', 'select music_id, user_id, artist from vt_music_extra')
self.assertEqual(result, ((1L, 1L, 'test 1'), (6L, 5L, 'test six')))
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'delete from vt_music_extra where music_id = :music_id',
{'music_id': 6})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'delete from vt_music_extra where music_id = :music_id',
{'music_id': 7})
self.assertEqual(result, ([], 0L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery(
'vt_user', 'select music_id, user_id, artist from vt_music_extra')
self.assertEqual(result, ((1L, 1L, 'test 1'),))
def test_joins(self):
vtgate_conn = get_connection()
vtgate_conn.begin()
self.execute_on_master(
vtgate_conn,
'insert into join_user (id, name) values (:id, :name)',
{'id': 1, 'name': 'name1'})
self.execute_on_master(
vtgate_conn,
'insert into join_user_extra (user_id, email) '
'values (:user_id, :email)',
{'user_id': 1, 'email': 'email1'})
self.execute_on_master(
vtgate_conn,
'insert into join_user_extra (user_id, email) '
'values (:user_id, :email)',
{'user_id': 2, 'email': 'email2'})
self.execute_on_master(
vtgate_conn,
'insert into join_name_info (name, info) '
'values (:name, :info)',
{'name': 'name1', 'info': 'name test'})
vtgate_conn.commit()
result = self.execute_on_master(
vtgate_conn,
'select u.id, u.name, e.user_id, e.email '
'from join_user u join join_user_extra e where e.user_id = u.id',
{})
self.assertEqual(
result,
([(1L, 'name1', 1L, 'email1')],
1,
0,
[('id', self.int_type),
('name', self.string_type),
('user_id', self.int_type),
('email', self.string_type)]))
result = self.execute_on_master(
vtgate_conn,
'select u.id, u.name, e.user_id, e.email '
'from join_user u join join_user_extra e where e.user_id = u.id+1',
{})
self.assertEqual(
result,
([(1L, 'name1', 2L, 'email2')],
1,
0,
[('id', self.int_type),
('name', self.string_type),
('user_id', self.int_type),
('email', self.string_type)]))
result = self.execute_on_master(
vtgate_conn,
'select u.id, u.name, e.user_id, e.email '
'from join_user u left join join_user_extra e on e.user_id = u.id+1',
{})
self.assertEqual(
result,
([(1L, 'name1', 2L, 'email2')],
1,
0,
[('id', self.int_type),
('name', self.string_type),
('user_id', self.int_type),
('email', self.string_type)]))
result = self.execute_on_master(
vtgate_conn,
'select u.id, u.name, e.user_id, e.email '
'from join_user u left join join_user_extra e on e.user_id = u.id+2',
{})
self.assertEqual(
result,
([(1L, 'name1', None, None)],
1,
0,
[('id', self.int_type),
('name', self.string_type),
('user_id', self.int_type),
('email', self.string_type)]))
result = self.execute_on_master(
vtgate_conn,
'select u.id, u.name, e.user_id, e.email '
'from join_user u join join_user_extra e on e.user_id = u.id+2 '
'where u.id = 2',
{})
self.assertEqual(
result,
([],
0,
0,
[('id', self.int_type),
('name', self.string_type),
('user_id', self.int_type),
('email', self.string_type)]))
result = self.execute_on_master(
vtgate_conn,
'select u.id, u.name, n.info '
'from join_user u join join_name_info n on u.name = n.name '
'where u.id = 1',
{})
self.assertEqual(
result,
([(1L, 'name1', 'name test')],
1,
0,
[('id', self.int_type),
('name', self.string_type),
('info', self.string_type)]))
vtgate_conn.begin()
self.execute_on_master(
vtgate_conn,
'delete from join_user where id = :id',
{'id': 1})
self.execute_on_master(
vtgate_conn,
'delete from join_user_extra where user_id = :user_id',
{'user_id': 1})
self.execute_on_master(
vtgate_conn,
'delete from join_user_extra where user_id = :user_id',
{'user_id': 2})
vtgate_conn.commit()
def test_insert_value_required(self):
vtgate_conn = get_connection()
try:
vtgate_conn.begin()
with self.assertRaisesRegexp(
dbexceptions.DatabaseError, '.*value must be supplied.*'):
self.execute_on_master(
vtgate_conn,
'insert into vt_user_extra (email) values (:email)',
{'email': 'test 10'})
finally:
vtgate_conn.rollback()
def test_vtclient(self):
"""This test uses vtclient to send and receive various queries.
"""
# specify a good default keyspace for the connection here.
utils.vtgate.vtclient(
'insert into vt_user_extra(user_id, email) values (:v1, :v2)',
keyspace='user',
bindvars=[10, 'test 10'])
out, _ = utils.vtgate.vtclient(
'select user_id, email from vt_user_extra where user_id = :v1',
bindvars=[10], json_output=True)
self.assertEqual(out, {
u'fields': [u'user_id', u'email'],
u'rows': [[u'10', u'test 10']],
})
utils.vtgate.vtclient(
'update vt_user_extra set email=:v2 where user_id = :v1',
bindvars=[10, 'test 1000'])
out, _ = utils.vtgate.vtclient(
'select user_id, email from vt_user_extra where user_id = :v1',
bindvars=[10], streaming=True, json_output=True)
self.assertEqual(out, {
u'fields': [u'user_id', u'email'],
u'rows': [[u'10', u'test 1000']],
})
utils.vtgate.vtclient(
'delete from vt_user_extra where user_id = :v1', bindvars=[10])
out, _ = utils.vtgate.vtclient(
'select user_id, email from vt_user_extra where user_id = :v1',
bindvars=[10], json_output=True)
self.assertEqual(out, {
u'fields': [u'user_id', u'email'],
u'rows': None,
})
# check that specifying an invalid keyspace is propagated and triggers an
# error
_, err = utils.vtgate.vtclient(
'insert into vt_user_extra(user_id, email) values (:v1, :v2)',
keyspace='invalid',
bindvars=[10, 'test 10'],
raise_on_error=False)
self.assertIn('keyspace invalid not found in vschema', err)
def test_vtctl_vtgate_execute(self):
"""This test uses 'vtctl VtGateExecute' to send and receive various queries.
"""
utils.vtgate.execute(
'insert into vt_user_extra(user_id, email) values (:user_id, :email)',
bindvars={'user_id': 11, 'email': 'test 11'})
qr = utils.vtgate.execute(
'select user_id, email from vt_user_extra where user_id = :user_id',
bindvars={'user_id': 11})
logging.debug('Original row: %s', str(qr))
self.assertEqual(len(qr['rows']), 1)
v = qr['rows'][0][1]
self.assertEqual(v, 'test 11')
utils.vtgate.execute(
'update vt_user_extra set email=:email where user_id = :user_id',
bindvars={'user_id': 11, 'email': 'test 1100'})
qr = utils.vtgate.execute(
'select user_id, email from vt_user_extra where user_id = :user_id',
bindvars={'user_id': 11})
logging.debug('Modified row: %s', str(qr))
self.assertEqual(len(qr['rows']), 1)
v = qr['rows'][0][1]
self.assertEqual(v, 'test 1100')
utils.vtgate.execute(
'delete from vt_user_extra where user_id = :user_id',
bindvars={'user_id': 11})
qr = utils.vtgate.execute(
'select user_id, email from vt_user_extra where user_id = :user_id',
bindvars={'user_id': 11})
self.assertEqual(len(qr['rows'] or []), 0)
def test_split_query(self):
"""This test uses 'vtctl VtGateSplitQuery' to validate the Map-Reduce APIs.
We want to return KeyRange queries.
"""
sql = 'select id, name from vt_user'
s = utils.vtgate.split_query(sql, 'user', 2)
self.assertEqual(len(s), 2)
first_half_queries = 0
second_half_queries = 0
for q in s:
self.assertEqual(q['query']['sql'], sql)
self.assertIn('key_range_part', q)
self.assertEqual(len(q['key_range_part']['key_ranges']), 1)
kr = q['key_range_part']['key_ranges'][0]
eighty_in_base64 = 'gA=='
is_first_half = 'start' not in kr and kr['end'] == eighty_in_base64
is_second_half = 'end' not in kr and kr['start'] == eighty_in_base64
self.assertTrue(is_first_half or is_second_half,
'invalid keyrange %s' % str(kr))
if is_first_half:
first_half_queries += 1
else:
second_half_queries += 1
self.assertEqual(first_half_queries, 1, 'invalid split %s' % str(s))
self.assertEqual(second_half_queries, 1, 'invalid split %s' % str(s))
if __name__ == '__main__':
utils.main()
|
|
# Copyright 2015 Michael DeHaan <michael.dehaan/gmail>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import strider.utils.logger
from strider.common.commands import invoke
from strider.common.instance_data import InstanceData
import os
import socket
import time
from jinja2 import Template
SSH_CANARY = "_ssh_availability_test_" # string used in SSH connectivity checks
SSH_RETRY = 10 # how long to wait per check iteration
SSH_CONNECT_TIMEOUT = 10
class Shell(object):
def __init__(self, copy_from=None, copy_to=None, commands=None):
self.log = strider.utils.logger.get_logger('SHELL')
self.copy_from = copy_from
self.copy_to = copy_to
self.commands = commands
# track whether we've waited for SSH yet
self.waited = False
if self.commands is None:
self.commands = []
# backwards compatibility
if self.copy_from and self.copy_to:
log("WARNING: copy_from and copy_to are deprecated, see the new example on github")
self.commands.insert(0, dict(
type = "copy",
copy_from = self.copy_from,
copy_to = self.copy_to
))
# --------------------------------------------------------------------------
# PROVISIONER PUBLIC API
# --------------------------------------------------------------------------
def ssh(self, instance_data):
""" open a shell into a box """
# if writing a new provisioner, it may be helpful to subclass this
# class rather than reimplementing this method. Then just override
# converge
self._wait_for_ready(instance_data)
return invoke(self._build_ssh_cmd(instance_data,""))
# --------------------------------------------------------------------------
def converge(self, instance_data):
""" run all convergence operations in the commands list """
for item in self.commands:
if isinstance(item, basestring):
item = dict(type="ssh", command=item)
self._dispatch(instance_data, item)
# --------------------------------------------------------------------------
# PRIVATE FUNCTIONS
# --------------------------------------------------------------------------
def _dispatch(self, instance_data, item):
"""
Handle an item in the commands array, examples:
"echo foo" # SSH (shorthand)
{ type: "ssh", command: "echo foo"} # also SSH
{ type: "copy", from: x, to: y } # scp
"""
what = item.get('type', None)
if what == 'ssh':
# wait for SSH and then launch a command
self._wait_for_ready(instance_data)
return invoke(self._build_ssh_cmd(
instance_data, item.get('command', None)
))
elif what == 'copy' or what == 'rsync':
# wait for SSH and then launch an scp
copy_from = item['copy_from']
copy_to = item['copy_to']
self._wait_for_ready(instance_data)
if what == 'rsync':
return invoke(self._build_rsync_cmd(
instance_data,
copy_from = copy_from,
copy_to = copy_to
))
elif what == 'copy':
invoke(self._build_ssh_cmd(instance_data, "mkdir -p %s" % copy_to))
return invoke(self._build_copy_cmd(
instance_data,
copy_from = copy_from,
copy_to = copy_to
))
elif what == 'command':
command = item['command']
# wait for SSH and then launch an scp
self._wait_for_ready(instance_data)
return invoke(self._build_local_cmd(
instance_data,
command = command
))
# add any other operational types here (such as local command execution)
else:
raise Exception("unknown type in commands list: %s, %s" % (item, what))
# --------------------------------------------------------------------------
def _wait_for_ready(self, instance_data):
""" wait for SSH availability """
if self.waited:
return True
(host, port) = (instance_data.ssh.host, instance_data.ssh.port)
self.log("checking for SSH availability on %s:%s" % (host, port))
while True:
cmd = self._build_ssh_cmd(
instance_data,
"echo %s" % SSH_CANARY,
connect_timeout=SSH_CONNECT_TIMEOUT)
output = invoke(cmd, check_output=True).strip()
if output.endswith(SSH_CANARY):
self.waited = True
return True
self.log("retrying SSH availability in %s seconds..." % SSH_RETRY)
time.sleep(SSH_RETRY)
# --------------------------------------------------------------------------
def _ssh_params(self, instance_data):
""" builds common SSH params used by all operations"""
return "-o Port=%s -o LogLevel=quiet -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" % instance_data.ssh.port
# --------------------------------------------------------------------------
def _build_ssh_cmd(self, instance_data, what, connect_timeout=30):
""" builds a shell command line """
assert instance_data is not None
assert what is not None
base = "ssh %s -o ConnectTimeout=%s -i %s %s@%s -p %s" % (
self._ssh_params(instance_data),
connect_timeout,
instance_data.ssh.keyfile,
instance_data.ssh.user,
instance_data.ssh.host,
instance_data.ssh.port
)
if what is None or what == "":
return base
return "%s %s" % (base, what)
# --------------------------------------------------------------------------
def _build_copy_cmd(self, instance_data, copy_from, copy_to):
""" builds a remote copy command line """
# scp is provided because rsync is sometimes unreliable
# on AWS free tier, even on large enough instance sizes
assert instance_data is not None
assert copy_from is not None
assert copy_to is not None
return "scp -r %s -i %s -P %s %s %s@%s:%s" % (
self._ssh_params(instance_data),
instance_data.ssh.keyfile,
instance_data.ssh.port,
copy_from,
instance_data.ssh.user,
instance_data.ssh.host,
copy_to
)
# --------------------------------------------------------------------------
def _build_rsync_cmd(self, instance_data, copy_from, copy_to):
""" builds a remote copy command line """
return "rsync -avze 'ssh %s -i %s' --delete %s %s@%s:%s" % (
self._ssh_params(instance_data),
instance_data.ssh.keyfile,
copy_from,
instance_data.ssh.user,
instance_data.ssh.host,
copy_to
)
# --------------------------------------------------------------------------
def _build_local_cmd(self, instance_data, command):
""" builds a local shell command line """
assert instance_data is not None
params = {
'ssh_host': instance_data.ssh.host,
'ssh_keyfile': instance_data.ssh.keyfile,
'ssh_user': instance_data.ssh.user,
'ssh_port': instance_data.ssh.port
}
command = Template(command).render(**params)
return command
|
|
# Copyright 2015, 2016 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from src.main.HmcRestClient import *
from src.virtual_io_server import ListVirtualIOServer, \
VirtualSCSIMapping, \
VirtualFibreChannelMapping
from src.virtual_io_server.volume_group import CreateVolumeGroup, \
ListVolumeGroup, \
ModifyVolumeGroup
from src.partition_operation_util import PowerOnPartition,\
PowerOffPartition,\
ModifyPartition, \
CreatePartition
from src.logical_partition_profile import ListLogicalPartitionProfile,\
CreateLogicalPartitionProfile,\
ModifyLogicalPartitionProfile
from src.logical_partition import ListLogicalPartition
import sys
import os
#####################
# VIRTUAL IO SERVER
####################
directory = os.path.dirname(os.path.dirname(__file__))
def virtualioserver_children(n1, managedsystem_uuid, ip, x_api_session):
"""
This function provides a detailed view of the virtualIOserver
Args:
n1 : variable for client selected choices
managedsystem_uuid : The unique id of the Managed system
ip : ip address of hmc
x_api_session : session to be used
"""
os.system("cls")
st = 'y'
n = n1
if n == 1:
while True:
print ("\n\n","VirtualIOServer operations".center(50))
print_list = ['List','Create','Modify',
'Poweron','Poweroff','Return to vios menu',
'Return to ManagedSystem menu','Return to MainMenu','Help','Exit' ]
#select any VirtualIOServer operation
x = int(print_obj.print_on_screen(print_list) )
if x in [1,3,6]:
print("\nAvailable VirtualIOServers : ")
virtualioserver_object = ListVirtualIOServer.ListVirtualIOServer()
object_list = virtualioserver_object.\
list_VirtualIOServer(ip, managedsystem_uuid,
x_api_session)
selected_vios = get_selectedobject(object_list)
try:
if x == 1:
#object creation and method call to list the details of the selected vios
if selected_vios != None:
virtualioserver_object.print_virtualioserver_attributes(selected_vios)
elif x == 2:
#object creation and method call to create vios
try:
print("\nVirtualIOServer will be created with Following configruations,\n maximum,mimimum and desired memory = 256",
"\nShared processors,Minimum,Desired and maximum processing units = 0.5")
logicalpartition_object = CreatePartition.CreatePartition("VirtualIOServer")
created_logicalpartition_object = logicalpartition_object.create_Partition(ip,managedsystem_uuid,x_api_session)
print("\nPartition %s Created Successfully\n"%(created_logicalpartition_object.PartitionName.value()))
except (TypeError,AttributeError) :
log_object.log_error("Error in VIOS creation")
elif x == 3:
#object creation and method call to modify existing vios memory attributes
if selected_vios != None:
print("\nVIOS memory attributes are modified as maximum ,minimum ,desired memory = 512")
modify_logicalpartition_object = ModifyPartition.ModifyPartition("VirtualIOServer")
result = modify_logicalpartition_object.modify_Partition(ip,selected_vios,x_api_session)
if result:
print("\nModifications are updated successfully")
else:
log_object.log_error("Error occured while updating the modifications.Verify \
whether the vios is in running or not activated state before updating it")
elif x == 4:
#object creation and method call to poweron inactive vios
virtualioserver_object = ListVirtualIOServer.ListVirtualIOServer()
object_list = virtualioserver_object.\
list_VirtualIOServer(ip, managedsystem_uuid,
x_api_session)
print("\nList of VIOS in inactive state")
k = 0
inactive_object_list = []
for i in range(0,len(object_list)):
if object_list[i].PartitionState.value() == "not activated":
k = k+1
print("%s.%s" % (k,object_list[i].PartitionName.value()))
inactive_object_list.append(object_list[i])
if k>0:
try:
c = int(input("\nSelect any partition index the operation to be performed:"))
if c > 0:
ch = c-1
selected_vios = inactive_object_list[ch]
logicalpartition_object = PowerOnPartition.PowerOnPartition("VirtualIOServer")
logicalpartition_object.poweron_Partition(ip,selected_vios,x_api_session)
else :
print("\nTry again using valid option")
except IndexError :
print("\nTry again using valid option")
else:
log_object.log_warn("No Partitions are in inactive state")
elif x == 5:
#object creation and method call to poweroff selected active state vios
virtualioserver_object = ListVirtualIOServer.ListVirtualIOServer()
object_list = virtualioserver_object.\
list_VirtualIOServer(ip, managedsystem_uuid,
x_api_session)
print("\nList of VIOS in active state")
k = 0
active_object_list = []
for i in range(0,len(object_list)):
if object_list[i].PartitionState.value() == "open firmware" or object_list[i].PartitionState.value() == "running":
k = k+1
print("%s.%s" % (k,object_list[i].PartitionName.value()))
active_object_list.append(object_list[i])
if k>0 :
try:
c = int(input("\nSelect any VIOS index the operation to be performed:"))
if c > 0:
ch = c-1
selected_vios = active_object_list[ch]
logicalpartition_object = PowerOffPartition.PowerOffPartition("VirtualIOServer")
logicalpartition_object.poweroff_Partition(ip,selected_vios,x_api_session)
else:
print("\nTry again using valid option")
except IndexError :
print("\nTry again using valid option")
elif x == 6:
os.system("cls")
return 1
elif x == 7:
os.system("cls")
return 2
elif x == 8:
os.system("cls")
return 3
elif x == 9:
print(open(directory+"/help/VirtualIOServer/VirtualIOServerOperations.txt").read())
elif x == 10:
sys.exit(1)
else:
print("\nTry again using valid option")
back_to_menu()
except TypeError :
log_object.log_warn("No VirtualIOServers Available")
back_to_menu()
elif n == 2:
while True:
print ("\n\n","LogicalPartitionProfile".center(50))
print_list = ['List','Create','Modify','Return to vios menu',
'Return to ManagedSystem menu','Return to MainMenu',
'Help','Exit']
#select any LogicalPartitionProfile operation
x1 = int(print_obj.print_on_screen(print_list))
if x1 > 0 and x1 < 4:
print("\nAvailable VirtualIOServers :")
virtualioserver_object = ListVirtualIOServer.ListVirtualIOServer()
object_list = virtualioserver_object.\
list_VirtualIOServer(ip, managedsystem_uuid,
x_api_session)
selected_vios = get_selectedobject(object_list)
list_logicalpartitionprofile_object=ListLogicalPartitionProfile.ListLogicalPartitionProfile("VirtualIOServer")
if x1 == 1:
#object creation and method call to list the details of the selected vios profiles
if selected_vios != None:
partition_id = selected_vios.PartitionUUID.value()
profile_object_list = list_logicalpartitionprofile_object.\
list_LogicalPartitionProfile(ip,partition_id,x_api_session)
for i in range(0,len(profile_object_list)):
list_logicalpartitionprofile_object.\
print_logicalpartitionprofile_attributes(profile_object_list[i])
elif x1 == 2:
#object creation and method call to create profile in the selected vios
if selected_vios != None:
print("\nLogical Partition profile is created with Following configruations,\n maximum,mimimum and desired memory = 256",
"\nprofile type = REG_LPAR_PROFILE_TYPE")
create_logicalpartitionprofile_object = CreateLogicalPartitionProfile.\
CreateLogicalPartitionProfile("VirtualIOServer")
created_logicalpartitionprofile_object = create_logicalpartitionprofile_object.\
create_LogicalPartitionProfile(ip,selected_vios,x_api_session)
if created_logicalpartitionprofile_object != None:
print("\nProfile %s Created Successfully\n"%(created_logicalpartitionprofile_object.\
ProfileName.value()))
list_logicalpartitionprofile_object.\
print_logicalpartitionprofile_attributes(created_logicalpartitionprofile_object)
elif x1 == 3:
#object creation and method call to modify selected profiles memory attributes
if selected_vios != None:
partition_id = selected_vios.PartitionUUID.value()
profile_object_list = list_logicalpartitionprofile_object.\
list_LogicalPartitionProfile(ip,partition_id, x_api_session)
print("\nAvailable LogicalPartitionProfile:")
for i in range(0,len(profile_object_list)):
print("%s.%s"%(i+1,profile_object_list[i].ProfileName.value()))
try:
ch=int(input("\nselect any profile index to modify :"))
print("\nLogical partition profile memory attributes are modified as maximum ,minimum ,desired memory = 512")
modify_logicalpartitionprofile_object = ModifyLogicalPartitionProfile.\
ModifyLogicalPartitionProfile("VirtualIOServer")
modify_bool = modify_logicalpartitionprofile_object.\
modify_LogicalPartitionProfile(ip,
partition_id,
profile_object_list[ch-1],
x_api_session)
if modify_bool:
print("\nUpdations to the profile are made Successfully")
else:
log_object.log_error("\nError occured while updating")
except IndexError :
print("\nTry again using valid option")
elif x1 == 4:
os.system("cls")
return 1
elif x1 == 5:
os.system("cls")
return 2
elif x1 == 6:
os.system("cls")
return 3
elif x1 == 6:
print(open(directory+"/help/LogicalPartitionProfile.txt").read())
elif x1 == 7:
sys.exit(1)
else:
print("\nTry again using valid option")
back_to_menu()
elif n == 3:
while True:
print ("\n\n","VolumeGroup".center(50))
print_list = ['List','Create','Modify','Return to vios menu',
'Return to ManagedSystem menu',
'Return to MainMenu','Help','Exit']
x1 = int(print_obj.print_on_screen(print_list))
if x1 > 0 and x1 < 4:
print("\nAvailable VirtualIOServers : ")
virtualioserver_object = ListVirtualIOServer.ListVirtualIOServer()
object_list = virtualioserver_object.\
list_VirtualIOServer(ip, managedsystem_uuid,
x_api_session)
selected_vios = get_selectedobject(object_list)
#select any VolumeGroup operation
if x1 == 1:
#object creation and method call to list the details of the volume group
if selected_vios != None:
vios_id = selected_vios.Metadata.Atom.AtomID.value()
print("\nAvailable VolumeGroups :")
volumegroup_list_object = ListVolumeGroup.ListVolumeGroup()
volumegroup_list = volumegroup_list_object.list_volumegroup(ip, vios_id,
x_api_session)
try:
for i in range(0,len(volumegroup_list)):
print("%s.%s"%(i+1,volumegroup_list[i].GroupName.value()))
ch = int(input("\nEnter your choice :"))
volumegroup_list_object.print_volumegroup_attributes(volumegroup_list[ch-1])
except TypeError:
log_object.log_warn("No VolumeGroups are available")
except IndexError:
print("\nTry using vaild option")
back_to_menu()
elif x1 == 2:
#object creation and method call to create a volume group
vios_uuid = selected_vios.Metadata.Atom.AtomID.value()
print("\nVolumeGroup will be created with one PhysicalVolume,the name of the disk is hardcoded")
volumegroup_object = CreateVolumeGroup.CreateVolumeGroup()
volumegroup_object.create_volumegroup(ip, vios_uuid, x_api_session)
back_to_menu()
elif x1 == 3:
#object creation and method call to modify volume group
#by adding a physical volume or creating a virtual disk
vios_id = selected_vios.Metadata.Atom.AtomID.value()
volumegroup_modify_object = ModifyVolumeGroup.ModifyVolumeGroup()
print("\nAvailable VolumeGroups :")
volumegroup_list_object = ListVolumeGroup.ListVolumeGroup()
volumegroup_list = volumegroup_list_object.list_volumegroup(ip, vios_id,
x_api_session)
if volumegroup_list != None:
for i in range(0,len(volumegroup_list)):
print("%s.%s"%(i+1,volumegroup_list[i].GroupName.value()))
choice = int(input("\nEnter your choice :"))
selected_volumegroup = volumegroup_list[choice-1]
ch = int(input("\n1.Add PhysicalVolume to VolumeGroup\n2.Create VirtualDisk in VolumeGroup\nEnter your choice :"))
if ch == 1:
volumegroup_modify_object.add_physicalvolume(ip, vios_id, x_api_session, selected_volumegroup)
elif ch == 2:
print("\nVirtualDisk of size VolumeGroup_size/2 will be created")
volumegroup_modify_object.add_virtualdisk(ip, vios_id, x_api_session, selected_volumegroup)
else:
print("\nTry again using valid option")
else :
log_object.log_warn("No VolumeGroups are available")
back_to_menu()
elif x1 == 4:
os.system("cls")
return 1
elif x1 == 5:
os.system("cls")
return 2
elif x1 == 6:
os.system("cls")
return 3
elif x1 == 7:
print(open(directory+"/help/VirtualIOServer/VolumeGroup.txt").read())
back_to_menu()
elif x1 == 8:
sys.exit(1)
else:
print("\nTry again using valid option")
back_to_menu()
elif n == 4:
#creates a vscsi mapping from selected vios to selected lpar
print("\nAvailable VirtualIOServers : ")
virtualioserver_object = ListVirtualIOServer.ListVirtualIOServer()
object_list = virtualioserver_object.\
list_VirtualIOServer(ip, managedsystem_uuid,
x_api_session)
selected_vios = get_selectedobject(object_list)
print("\nAvailable LogicalPartitions : ")
logicalpartition_object = ListLogicalPartition.ListLogicalPartition()
logicalpartition_object_list = logicalpartition_object.\
list_LogicalPartition(ip, managedsystem_uuid,
x_api_session)
selected_lpar = get_selectedobject(logicalpartition_object_list)
if selected_vios != None and selected_lpar !=None:
vscsi_object = VirtualSCSIMapping.VirtualSCSIMapping()
lpar_id = selected_lpar.Metadata.Atom.AtomID.value()
vscsi_object.create_vscsi_mapping(ip,managedsystem_uuid,x_api_session,selected_vios,lpar_id)
back_to_menu()
return 1
elif n == 5:
#creates a virtual fibre channel mapping from selected vios to selected lpar
print("\nAvailable VirtualIOServers : ")
virtualioserver_object = ListVirtualIOServer.ListVirtualIOServer()
object_list = virtualioserver_object.\
list_VirtualIOServer(ip, managedsystem_uuid,
x_api_session)
selected_vios = get_selectedobject(object_list)
print("\nAvailable LogicalPartitions : ")
logicalpartition_object = ListLogicalPartition.ListLogicalPartition()
logicalpartition_object_list = logicalpartition_object.\
list_LogicalPartition(ip, managedsystem_uuid,
x_api_session)
selected_lpar = get_selectedobject(logicalpartition_object_list)
if selected_vios != None and selected_lpar !=None:
vfc_object = VirtualFibreChannelMapping.VirtualFibreChannelMapping()
lpar_id = selected_lpar.Metadata.Atom.AtomID.value()
vfc_object.create_virtualfibrechannel_mapping(ip, managedsystem_uuid, x_api_session, selected_vios, lpar_id)
back_to_menu()
return 1
|
|
<<<<<<< HEAD
<<<<<<< HEAD
"""Word completion for GNU readline.
The completer completes keywords, built-ins and globals in a selectable
namespace (which defaults to __main__); when completing NAME.NAME..., it
evaluates (!) the expression up to the last dot and completes its attributes.
It's very cool to do "import sys" type "sys.", hit the completion key (twice),
and see the list of names defined by the sys module!
Tip: to use the tab key as the completion key, call
readline.parse_and_bind("tab: complete")
Notes:
- Exceptions raised by the completer function are *ignored* (and generally cause
the completion to fail). This is a feature -- since readline sets the tty
device in raw (or cbreak) mode, printing a traceback wouldn't work well
without some complicated hoopla to save, reset and restore the tty state.
- The evaluation of the NAME.NAME... form may cause arbitrary application
defined code to be executed if an object with a __getattr__ hook is found.
Since it is the responsibility of the application (or the user) to enable this
feature, I consider this an acceptable risk. More complicated expressions
(e.g. function calls or indexing operations) are *not* evaluated.
- When the original stdin is not a tty device, GNU readline is never
used, and this module (and the readline module) are silently inactive.
"""
import atexit
import builtins
import __main__
__all__ = ["Completer"]
class Completer:
def __init__(self, namespace = None):
"""Create a new completer for the command line.
Completer([namespace]) -> completer instance.
If unspecified, the default namespace where completions are performed
is __main__ (technically, __main__.__dict__). Namespaces should be
given as dictionaries.
Completer instances should be used as the completion mechanism of
readline via the set_completer() call:
readline.set_completer(Completer(my_namespace).complete)
"""
if namespace and not isinstance(namespace, dict):
raise TypeError('namespace must be a dictionary')
# Don't bind to namespace quite yet, but flag whether the user wants a
# specific namespace or to use __main__.__dict__. This will allow us
# to bind to __main__.__dict__ at completion time, not now.
if namespace is None:
self.use_main_ns = 1
else:
self.use_main_ns = 0
self.namespace = namespace
def complete(self, text, state):
"""Return the next possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
"""
if self.use_main_ns:
self.namespace = __main__.__dict__
if state == 0:
if "." in text:
self.matches = self.attr_matches(text)
else:
self.matches = self.global_matches(text)
try:
return self.matches[state]
except IndexError:
return None
def _callable_postfix(self, val, word):
if callable(val):
word = word + "("
return word
def global_matches(self, text):
"""Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names currently
defined in self.namespace that match.
"""
import keyword
matches = []
n = len(text)
for word in keyword.kwlist:
if word[:n] == text:
matches.append(word)
for nspace in [builtins.__dict__, self.namespace]:
for word, val in nspace.items():
if word[:n] == text and word != "__builtins__":
matches.append(self._callable_postfix(val, word))
return matches
def attr_matches(self, text):
"""Compute matches when text contains a dot.
Assuming the text is of the form NAME.NAME....[NAME], and is
evaluable in self.namespace, it will be evaluated and its attributes
(as revealed by dir()) are used as possible completions. (For class
instances, class members are also considered.)
WARNING: this can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated.
"""
import re
m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text)
if not m:
return []
expr, attr = m.group(1, 3)
try:
thisobject = eval(expr, self.namespace)
except Exception:
return []
# get the content of the object, except __builtins__
words = dir(thisobject)
if "__builtins__" in words:
words.remove("__builtins__")
if hasattr(thisobject, '__class__'):
words.append('__class__')
words.extend(get_class_members(thisobject.__class__))
matches = []
n = len(attr)
for word in words:
if word[:n] == attr and hasattr(thisobject, word):
val = getattr(thisobject, word)
word = self._callable_postfix(val, "%s.%s" % (expr, word))
matches.append(word)
return matches
def get_class_members(klass):
ret = dir(klass)
if hasattr(klass,'__bases__'):
for base in klass.__bases__:
ret = ret + get_class_members(base)
return ret
try:
import readline
except ImportError:
pass
else:
readline.set_completer(Completer().complete)
# Release references early at shutdown (the readline module's
# contents are quasi-immortal, and the completer function holds a
# reference to globals).
atexit.register(lambda: readline.set_completer(None))
=======
"""Word completion for GNU readline.
The completer completes keywords, built-ins and globals in a selectable
namespace (which defaults to __main__); when completing NAME.NAME..., it
evaluates (!) the expression up to the last dot and completes its attributes.
It's very cool to do "import sys" type "sys.", hit the completion key (twice),
and see the list of names defined by the sys module!
Tip: to use the tab key as the completion key, call
readline.parse_and_bind("tab: complete")
Notes:
- Exceptions raised by the completer function are *ignored* (and generally cause
the completion to fail). This is a feature -- since readline sets the tty
device in raw (or cbreak) mode, printing a traceback wouldn't work well
without some complicated hoopla to save, reset and restore the tty state.
- The evaluation of the NAME.NAME... form may cause arbitrary application
defined code to be executed if an object with a __getattr__ hook is found.
Since it is the responsibility of the application (or the user) to enable this
feature, I consider this an acceptable risk. More complicated expressions
(e.g. function calls or indexing operations) are *not* evaluated.
- When the original stdin is not a tty device, GNU readline is never
used, and this module (and the readline module) are silently inactive.
"""
import atexit
import builtins
import __main__
__all__ = ["Completer"]
class Completer:
def __init__(self, namespace = None):
"""Create a new completer for the command line.
Completer([namespace]) -> completer instance.
If unspecified, the default namespace where completions are performed
is __main__ (technically, __main__.__dict__). Namespaces should be
given as dictionaries.
Completer instances should be used as the completion mechanism of
readline via the set_completer() call:
readline.set_completer(Completer(my_namespace).complete)
"""
if namespace and not isinstance(namespace, dict):
raise TypeError('namespace must be a dictionary')
# Don't bind to namespace quite yet, but flag whether the user wants a
# specific namespace or to use __main__.__dict__. This will allow us
# to bind to __main__.__dict__ at completion time, not now.
if namespace is None:
self.use_main_ns = 1
else:
self.use_main_ns = 0
self.namespace = namespace
def complete(self, text, state):
"""Return the next possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
"""
if self.use_main_ns:
self.namespace = __main__.__dict__
if state == 0:
if "." in text:
self.matches = self.attr_matches(text)
else:
self.matches = self.global_matches(text)
try:
return self.matches[state]
except IndexError:
return None
def _callable_postfix(self, val, word):
if callable(val):
word = word + "("
return word
def global_matches(self, text):
"""Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names currently
defined in self.namespace that match.
"""
import keyword
matches = []
n = len(text)
for word in keyword.kwlist:
if word[:n] == text:
matches.append(word)
for nspace in [builtins.__dict__, self.namespace]:
for word, val in nspace.items():
if word[:n] == text and word != "__builtins__":
matches.append(self._callable_postfix(val, word))
return matches
def attr_matches(self, text):
"""Compute matches when text contains a dot.
Assuming the text is of the form NAME.NAME....[NAME], and is
evaluable in self.namespace, it will be evaluated and its attributes
(as revealed by dir()) are used as possible completions. (For class
instances, class members are also considered.)
WARNING: this can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated.
"""
import re
m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text)
if not m:
return []
expr, attr = m.group(1, 3)
try:
thisobject = eval(expr, self.namespace)
except Exception:
return []
# get the content of the object, except __builtins__
words = dir(thisobject)
if "__builtins__" in words:
words.remove("__builtins__")
if hasattr(thisobject, '__class__'):
words.append('__class__')
words.extend(get_class_members(thisobject.__class__))
matches = []
n = len(attr)
for word in words:
if word[:n] == attr and hasattr(thisobject, word):
val = getattr(thisobject, word)
word = self._callable_postfix(val, "%s.%s" % (expr, word))
matches.append(word)
return matches
def get_class_members(klass):
ret = dir(klass)
if hasattr(klass,'__bases__'):
for base in klass.__bases__:
ret = ret + get_class_members(base)
return ret
try:
import readline
except ImportError:
pass
else:
readline.set_completer(Completer().complete)
# Release references early at shutdown (the readline module's
# contents are quasi-immortal, and the completer function holds a
# reference to globals).
atexit.register(lambda: readline.set_completer(None))
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""Word completion for GNU readline.
The completer completes keywords, built-ins and globals in a selectable
namespace (which defaults to __main__); when completing NAME.NAME..., it
evaluates (!) the expression up to the last dot and completes its attributes.
It's very cool to do "import sys" type "sys.", hit the completion key (twice),
and see the list of names defined by the sys module!
Tip: to use the tab key as the completion key, call
readline.parse_and_bind("tab: complete")
Notes:
- Exceptions raised by the completer function are *ignored* (and generally cause
the completion to fail). This is a feature -- since readline sets the tty
device in raw (or cbreak) mode, printing a traceback wouldn't work well
without some complicated hoopla to save, reset and restore the tty state.
- The evaluation of the NAME.NAME... form may cause arbitrary application
defined code to be executed if an object with a __getattr__ hook is found.
Since it is the responsibility of the application (or the user) to enable this
feature, I consider this an acceptable risk. More complicated expressions
(e.g. function calls or indexing operations) are *not* evaluated.
- When the original stdin is not a tty device, GNU readline is never
used, and this module (and the readline module) are silently inactive.
"""
import atexit
import builtins
import __main__
__all__ = ["Completer"]
class Completer:
def __init__(self, namespace = None):
"""Create a new completer for the command line.
Completer([namespace]) -> completer instance.
If unspecified, the default namespace where completions are performed
is __main__ (technically, __main__.__dict__). Namespaces should be
given as dictionaries.
Completer instances should be used as the completion mechanism of
readline via the set_completer() call:
readline.set_completer(Completer(my_namespace).complete)
"""
if namespace and not isinstance(namespace, dict):
raise TypeError('namespace must be a dictionary')
# Don't bind to namespace quite yet, but flag whether the user wants a
# specific namespace or to use __main__.__dict__. This will allow us
# to bind to __main__.__dict__ at completion time, not now.
if namespace is None:
self.use_main_ns = 1
else:
self.use_main_ns = 0
self.namespace = namespace
def complete(self, text, state):
"""Return the next possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
"""
if self.use_main_ns:
self.namespace = __main__.__dict__
if state == 0:
if "." in text:
self.matches = self.attr_matches(text)
else:
self.matches = self.global_matches(text)
try:
return self.matches[state]
except IndexError:
return None
def _callable_postfix(self, val, word):
if callable(val):
word = word + "("
return word
def global_matches(self, text):
"""Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names currently
defined in self.namespace that match.
"""
import keyword
matches = []
n = len(text)
for word in keyword.kwlist:
if word[:n] == text:
matches.append(word)
for nspace in [builtins.__dict__, self.namespace]:
for word, val in nspace.items():
if word[:n] == text and word != "__builtins__":
matches.append(self._callable_postfix(val, word))
return matches
def attr_matches(self, text):
"""Compute matches when text contains a dot.
Assuming the text is of the form NAME.NAME....[NAME], and is
evaluable in self.namespace, it will be evaluated and its attributes
(as revealed by dir()) are used as possible completions. (For class
instances, class members are also considered.)
WARNING: this can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated.
"""
import re
m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text)
if not m:
return []
expr, attr = m.group(1, 3)
try:
thisobject = eval(expr, self.namespace)
except Exception:
return []
# get the content of the object, except __builtins__
words = dir(thisobject)
if "__builtins__" in words:
words.remove("__builtins__")
if hasattr(thisobject, '__class__'):
words.append('__class__')
words.extend(get_class_members(thisobject.__class__))
matches = []
n = len(attr)
for word in words:
if word[:n] == attr and hasattr(thisobject, word):
val = getattr(thisobject, word)
word = self._callable_postfix(val, "%s.%s" % (expr, word))
matches.append(word)
return matches
def get_class_members(klass):
ret = dir(klass)
if hasattr(klass,'__bases__'):
for base in klass.__bases__:
ret = ret + get_class_members(base)
return ret
try:
import readline
except ImportError:
pass
else:
readline.set_completer(Completer().complete)
# Release references early at shutdown (the readline module's
# contents are quasi-immortal, and the completer function holds a
# reference to globals).
atexit.register(lambda: readline.set_completer(None))
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
|
"""Provides functionality to interact with image processing services."""
import asyncio
from datetime import timedelta
import logging
from typing import Tuple
from PIL import ImageDraw
import voluptuous as vol
from homeassistant.const import ATTR_ENTITY_ID, ATTR_NAME, CONF_ENTITY_ID, CONF_NAME
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import ENTITY_SERVICE_SCHEMA
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.util.async_ import run_callback_threadsafe
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
DOMAIN = "image_processing"
SCAN_INTERVAL = timedelta(seconds=10)
DEVICE_CLASSES = [
"alpr", # Automatic license plate recognition
"face", # Face
"ocr", # OCR
]
SERVICE_SCAN = "scan"
EVENT_DETECT_FACE = "image_processing.detect_face"
ATTR_AGE = "age"
ATTR_CONFIDENCE = "confidence"
ATTR_FACES = "faces"
ATTR_GENDER = "gender"
ATTR_GLASSES = "glasses"
ATTR_MOTION = "motion"
ATTR_TOTAL_FACES = "total_faces"
CONF_SOURCE = "source"
CONF_CONFIDENCE = "confidence"
DEFAULT_TIMEOUT = 10
DEFAULT_CONFIDENCE = 80
SOURCE_SCHEMA = vol.Schema(
{
vol.Required(CONF_ENTITY_ID): cv.entity_domain("camera"),
vol.Optional(CONF_NAME): cv.string,
}
)
PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_SOURCE): vol.All(cv.ensure_list, [SOURCE_SCHEMA]),
vol.Optional(CONF_CONFIDENCE, default=DEFAULT_CONFIDENCE): vol.All(
vol.Coerce(float), vol.Range(min=0, max=100)
),
}
)
PLATFORM_SCHEMA_BASE = cv.PLATFORM_SCHEMA_BASE.extend(PLATFORM_SCHEMA.schema)
def draw_box(
draw: ImageDraw,
box: Tuple[float, float, float, float],
img_width: int,
img_height: int,
text: str = "",
color: Tuple[int, int, int] = (255, 255, 0),
) -> None:
"""
Draw a bounding box on and image.
The bounding box is defined by the tuple (y_min, x_min, y_max, x_max)
where the coordinates are floats in the range [0.0, 1.0] and
relative to the width and height of the image.
For example, if an image is 100 x 200 pixels (height x width) and the bounding
box is `(0.1, 0.2, 0.5, 0.9)`, the upper-left and bottom-right coordinates of
the bounding box will be `(40, 10)` to `(180, 50)` (in (x,y) coordinates).
"""
line_width = 3
font_height = 8
y_min, x_min, y_max, x_max = box
(left, right, top, bottom) = (
x_min * img_width,
x_max * img_width,
y_min * img_height,
y_max * img_height,
)
draw.line(
[(left, top), (left, bottom), (right, bottom), (right, top), (left, top)],
width=line_width,
fill=color,
)
if text:
draw.text(
(left + line_width, abs(top - line_width - font_height)), text, fill=color
)
async def async_setup(hass, config):
"""Set up the image processing."""
component = EntityComponent(_LOGGER, DOMAIN, hass, SCAN_INTERVAL)
await component.async_setup(config)
async def async_scan_service(service):
"""Service handler for scan."""
image_entities = await component.async_extract_from_service(service)
update_tasks = []
for entity in image_entities:
entity.async_set_context(service.context)
update_tasks.append(entity.async_update_ha_state(True))
if update_tasks:
await asyncio.wait(update_tasks)
hass.services.async_register(
DOMAIN, SERVICE_SCAN, async_scan_service, schema=ENTITY_SERVICE_SCHEMA
)
return True
class ImageProcessingEntity(Entity):
"""Base entity class for image processing."""
timeout = DEFAULT_TIMEOUT
@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
return None
@property
def confidence(self):
"""Return minimum confidence for do some things."""
return None
def process_image(self, image):
"""Process image."""
raise NotImplementedError()
def async_process_image(self, image):
"""Process image.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.process_image, image)
async def async_update(self):
"""Update image and process it.
This method is a coroutine.
"""
camera = self.hass.components.camera
image = None
try:
image = await camera.async_get_image(
self.camera_entity, timeout=self.timeout
)
except HomeAssistantError as err:
_LOGGER.error("Error on receive image from entity: %s", err)
return
# process image data
await self.async_process_image(image.content)
class ImageProcessingFaceEntity(ImageProcessingEntity):
"""Base entity class for face image processing."""
def __init__(self):
"""Initialize base face identify/verify entity."""
self.faces = []
self.total_faces = 0
@property
def state(self):
"""Return the state of the entity."""
confidence = 0
state = None
# No confidence support
if not self.confidence:
return self.total_faces
# Search high confidence
for face in self.faces:
if ATTR_CONFIDENCE not in face:
continue
f_co = face[ATTR_CONFIDENCE]
if f_co > confidence:
confidence = f_co
for attr in [ATTR_NAME, ATTR_MOTION]:
if attr in face:
state = face[attr]
break
return state
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return "face"
@property
def state_attributes(self):
"""Return device specific state attributes."""
attr = {ATTR_FACES: self.faces, ATTR_TOTAL_FACES: self.total_faces}
return attr
def process_faces(self, faces, total):
"""Send event with detected faces and store data."""
run_callback_threadsafe(
self.hass.loop, self.async_process_faces, faces, total
).result()
@callback
def async_process_faces(self, faces, total):
"""Send event with detected faces and store data.
known are a dict in follow format:
[
{
ATTR_CONFIDENCE: 80,
ATTR_NAME: 'Name',
ATTR_AGE: 12.0,
ATTR_GENDER: 'man',
ATTR_MOTION: 'smile',
ATTR_GLASSES: 'sunglasses'
},
]
This method must be run in the event loop.
"""
# Send events
for face in faces:
if ATTR_CONFIDENCE in face and self.confidence:
if face[ATTR_CONFIDENCE] < self.confidence:
continue
face.update({ATTR_ENTITY_ID: self.entity_id})
self.hass.async_add_job(self.hass.bus.async_fire, EVENT_DETECT_FACE, face)
# Update entity store
self.faces = faces
self.total_faces = total
|
|
from sqlalchemy.orm import relationship
from app.database import Base, engine0
# from app import db
from sqlalchemy import Table, Integer, Column, PrimaryKeyConstraint
# Base.metadata.reflect(db.engine)
# Base.metadata.bind=engine
class Hd(Base):
__tablename__ = 'hd'
__table_args__ = {'autoload':True}
def to_json(self):
json_comment = {'nbuf': self.nbuf,
'crc': self.crc,
'now': self.now,
'time': self.time,
'us': self.us,
'ns': self.ns,
'evid': self.evid,
'evnum': self.evnum,
'surfmask': self.surfmask,
'calib': self.calib,
'priority': self.priority,
'turfword': self.turfword,
'l1mask': self.l1mask,
'l1maskh': self.l1maskh,
'peakthetabin': self.peakthetabin,
'imagepeak': self.imagepeak,
'coherentsumpeak': self.coherentsumpeak,
'prioritizerstuff': self.prioritizerstuff,
'trigtype': self.trigtype,
'trignum': self.trignum,
'l3cnt': self.l3cnt,
'pps': self.pps,
'trigtime': self.trigtime,
'c3po': self.c3po,
'deadtime': self.deadtime,
'l3trigpat': self.l3trigpat,
'l3trigpath': self.l3trigpath,
'phimask': self.phimask,
'phimaskh': self.phimaskh
}
return json_comment
class Rf(Base):
__tablename__ = 'rf'
__table_args__ = (Column('nbuf', Integer, primary_key=True), {'autoload':True})
def to_json(self):
json_comment = {'nbuf': self.nbuf,
'crc': self.crc,
'now': self.now,
'time': self.time,
'us': self.us,
'ns': self.ns,
'evid': self.evid,
'evnum': self.evnum,
'surfmask': self.surfmask,
'calib': self.calib,
'priority': self.priority,
'turfword': self.turfword,
'l1mask': self.l1mask,
'l1maskh': self.l1maskh,
'peakthetabin': self.peakthetabin,
'imagepeak': self.imagepeak,
'coherentsumpeak': self.coherentsumpeak,
'prioritizerstuff': self.prioritizerstuff,
'trigtype': self.trigtype,
'trignum': self.trignum,
'l3cnt': self.l3cnt,
'pps': self.pps,
'trigtime': self.trigtime,
'c3po': self.c3po,
'deadtime': self.deadtime,
'l3trigpat': self.l3trigpat,
'l3trigpath': self.l3trigpath,
'phimask': self.phimask,
'phimaskh': self.phimaskh
}
return json_comment
class Hk(Base):
__tablename__ = 'hk'
__table_args__ = {'autoload':True}
def to_json(self):
json_comment = {'nbuf': self.nbuf,
'crc': self.crc,
'now': self.now,
'time': self.time,
'us': self.us,
'code': self.code,
'cal': self.cal,
'avz': self.avz,
'bd1': self.bd1,
'bd2': self.bd2,
'bd3': self.bd3,
'calb1': self.calb1,
'calb2': self.calb2,
'calb3': self.calb3,
'accx': self.accx,
'accy': self.accy,
'accz': self.accz,
'acct': self.acct,
'ssx': self.ssx,
'ssy': self.ssy,
'ssi': self.ssi,
'ssflag': self.ssflag,
'ssel': self.ssel,
'ssaz': self.ssaz,
'sst': self.sst,
'pressh': self.pressh,
'pressl': self.pressl,
'p1_5v': self.p1_5v,
'p3_3v': self.p3_3v,
'p5v': self.p5v,
'p5sbv': self.p5sbv,
'p12v': self.p12v,
'p24v': self.p24v,
'ppvv': self.ppvv,
'n5v': self.n5v,
'n12v': self.n12v,
'iprf1v': self.iprf1v,
'iprf2v': self.iprf2v,
'p1_5i': self.p1_5i,
'p3_3i': self.p3_3i,
'p5i': self.p5i,
'p5sbi': self.p5sbi,
'p12i': self.p12i,
'p24i': self.p24i,
'ppvi': self.ppvi,
'n5i': self.n5i,
'n12i': self.n12i,
'iprf1i': self.iprf1i,
'iprf2i': self.iprf2i,
'bati': self.bati,
'p5vip': self.p5vip,
'it': self.it,
'et': self.et,
'sbst1': self.sbst1,
'sbst2': self.sbst2,
'core1': self.core1,
'core2': self.core2,
'sbst5': self.sbst5,
'sbst6': self.sbst6,
'magx': self.magx,
'magy': self.magy,
'magz': self.magz
}
return json_comment
class Wv(Base):
# __table__ = Table('wv', Base.metadata, PrimaryKeyConstraint("evnum", "id"),extend_existing=True)
__tablename__ = 'wv'
__table_args__ = ( PrimaryKeyConstraint("evnum","id"), {'autoload':True})
def to_json(self):
json_comment = {
'nbuf': self.nbuf,
# 'crc': self.crc,
'now': self.now,
'evnum': self.evnum,
'id': self.id,
# 'chip': self.chip,
# 'rcobit': self.rcobit,
# 'hbwrap': self.hbwrap,
# 'hbstart': self.hbstart,
# 'hbend': self.hbend,
# 'peds': self.peds,
# 'raw': self.raw,
# 'cal': [int(cal*100) for cal in self.cal]
'cal': self.cal
}
return json_comment
class Slow(Base):
# __table__ = Table('slow', Base.metadata, Column('time', Integer, primary_key=True), extend_existing=True)
__tablename__ = 'slow'
__table_args__ = (Column('time', Integer, primary_key=True), {'autoload':True})
# time = Column(Integer, primary_key=True)
def to_json(self):
json_comment = {
#'nbuf': self.nbuf, #nbuf is all 0s. useless
'crc': self.crc,
'now': self.now,
'time': self.time,
'evnum': self.evnum,
'latitude': self.latitude,
'longitude': self.longitude,
'altitude': self.altitude,
'rate1': self.rate1,
'rate10': self.rate10,
'tempraw': self.tempraw,
'powerraw': self.powerraw,
'tempv': self.tempv,
'powerv': self.powerv,
'temp': self.temp,
'ppvv': self.ppvv,
'p24v': self.p24v,
'bati': self.bati,
'p24i': self.p24i,
'avgl3': self.avgl3,
'avgscaler': self.avgscaler,
'avgrfpow': self.avgrfpow
}
return json_comment
class Sshk(Base):
__tablename__ = 'sshk'
__table_args__ = {'autoload':True}
def to_json(self):
json_comment = {
'nbuf': self.nbuf,
'crc': self.crc,
'time': self.time,
'us': self.us,
'now': self.now,
'code': self.code,
'cal': self.cal,
'avz': self.avz,
# 'bdl': self.bdl,
# 'calb1': self.calb1,
'ssx': self.ssx,
'ssy': self.ssy,
'ssi': self.ssi,
'ssflag': self.ssflag,
'ssel': self.ssel,
'ssaz': self.ssaz,
'sst': self.sst
}
return json_comment
class Hk_surf(Base):
__tablename__ = 'hk_surf'
__table_args__ = {'autoload':True}
def to_json(self):
json_comment = {
'nbuf': self.nbuf,
'crc': self.crc,
'now': self.now,
'time': self.time,
'us': self.us,
#'global': self.global,
'error': self.error,
'scalergoals': self.scalergoals,
#'nadirgoals': self.nadirgoals,
'upper': self.upper,
'scaler': self.scaler,
'thresh': self.thresh,
'thershset': self.threshset,
'rfpow': self.rfpow,
'l1scaler': self.l1scaler,
'surfask': self.surfmask
}
return json_comment
class Turf(Base):
__tablename__ = 'turf'
__table_args__ = {'autoload':True}
def to_json(self):
json_comment = {
'nbuf': self.nbuf,
'crc': self.crc,
'now': self.now,
'time': self.time,
'deadtime': self.deadtime,
'l2trigmask': self.l2trigmask,
# 'l1trigmaskh': self.l1trigmaskh,
'phitrigmask': self.phitrigmask,
# 'phitrigmaskh': self.phitrigmaskh,
'l2': self.l2,
# 'l1h': self.l1h,
'l3': self.l3,
'l3gated': self.l3gated,
# 'l3h': self.l3h
}
return json_comment
class Mon(Base):
__tablename__ = 'mon'
__table_args__ = {'autoload':True}
def to_json(self):
json_comment = {
'nbuf': self.nbuf,
'crc': self.crc,
'now': self.now,
'time': self.time,
'disk': self.disk,
'blade': self.blade,
'usbint': self.usbint,
'usbext': self.usbext,
'linkev': self.linkev,
'linkcmdlos': self.linkcmdlos,
'linkcmdsip': self.linkcmdsip,
'linkgps': self.linkgps,
'linkhk': self.linkhk,
'linkmon': self.linkmon,
'linkhd': self.linkhd,
'linksurf': self.linksurf,
'linkturf': self.linkturf,
'linkped': self.linkped
}
return json_comment
class Adu5_pat(Base):
__tablename__ = 'adu5_pat'
__table_args__ = {'autoload':True}
def to_json(self):
json_comment = {
'gpstype': self.gpstype,
'nbuf': self.nbuf,
'crc': self.crc,
'now': self.now,
'code': self.code,
'time': self.time,
'us': self.us,
'tod': self.tod,
'heading': self.heading,
'pitch': self.pitch,
'roll': self.roll,
'mrms': self.mrms,
'brms': self.brms,
'flag': self.flag,
'latitude': self.latitude,
'longitude': self.longitude,
'altitude': self.altitude
}
return json_comment
def to_czml(self):
czml_comment = {
'id': 'myObject',
'availability': '2014-01-15T00:00Z/2014-01-01T24:00Z',
'point': {
'color': {
'rgba': [255, 255, 0, 255]
},
'outlineWidth': 2.0,
'pixelSize': 3.0,
'show': True
},
"position" : {
"epoch" : "2012-08-04T10:00:00Z",
"cartographicDegrees" : [0,self.latitude,self.longitude,self.altitude]
}
}
return czml_comment
class Adu5_vtg(Base):
__tablename__ = 'adu5_vtg'
__table_args__ = {'autoload':True}
def to_json(self):
json_comment = {
'gpstype': self.gpstype,
'nbuf': self.nbuf,
'crc': self.crc,
'now': self.now,
'code': self.code,
'time': self.time,
'us': self.us,
'course': self.course,
'mcourse': self.mcourse,
'vkt': self.vkt,
'vkph': self.vkph
}
return json_comment
class Adu5_sat(Base):
__tablename__ = 'adu5_sat'
__table_args__ = {'autoload':True}
def to_json(self):
json_comment = {
'gpstype': self.gpstype,
'nbuf': self.nbuf,
'crc': self.crc,
'now': self.now,
'code': self.code,
'time': self.time,
'numsats': self.numsats,
'prn': self.prn,
'elevation': self.elevation,
'snr': self.snr,
'flag': self.flag,
'azimuth': self.azimuth
}
return json_comment
class G12_pos(Base):
__tablename__ = 'g12_pos'
__table_args__ = {'autoload':True}
def to_json(self):
json_comment = {
'nbuf': self.nbuf,
'crc': self.crc,
'now': self.now,
'time': self.time,
'us': self.us,
'tod': self.tod,
'numsats': self.numsats,
'latitude': self.latitude,
'longitude': self.longitude,
'altitude': self.altitude,
'course': self.course,
'upv': self.upv,
'vkt': self.vkt,
'pdop': self.pdop,
'hdop': self.hdop,
'vdop': self.vdop,
'tdop': self.tdop,
'unit': self.unit
}
return json_comment
class G12_sat(Base):
# __table__ = Table('wv', Base.metadata, PrimaryKeyConstraint("evnum", "id"),extend_existing=True)
__tablename__ = 'g12_sat'
__table_args__ = {'autoload':True}
def to_json(self):
json_comment = {
'nbuf': self.nbuf,
'crc': self.crc,
'now': self.now,
'time': self.time,
'numsats': self.numsats,
'prn': self.prn,
'elevation': self.elevation,
'snr': self.snr,
'flag': self.flag,
'azimuth': self.azimuth
}
return json_comment
class Cmd(Base):
# __table__ = Table('wv', Base.metadata, PrimaryKeyConstraint("evnum", "id"),extend_existing=True)
__tablename__ = 'cmd'
__table_args__ = {'autoload':True}
def to_json(self):
json_comment = {
'nbuf': self.nbuf,
'crc': self.crc,
'now': self.now,
'time': self.time,
'flag': self.flag,
'bytes': self.bytes,
'cmd': self.cmd
}
return json_comment
class Wakeup(Base):
# __table__ = Table('wv', Base.metadata, PrimaryKeyConstraint("evnum", "id"),extend_existing=True)
__tablename__ = 'wakeup'
__table_args__ = {'autoload':True}
def to_json(self):
json_comment = {
'nbuf': self.nbuf,
'crc': self.crc,
'now': self.now,
'type': self.type
}
return json_comment
class File(Base):
# __table__ = Table('wv', Base.metadata, PrimaryKeyConstraint("evnum", "id"),extend_existing=True)
__tablename__ = 'file'
__table_args__ = {'autoload':True}
def to_json(self):
json_comment = {
'nbuf': self.nbuf,
'crc': self.crc,
'now': self.now,
'time': self.time,
'filename': self.filename,
'length': self.length,
'content': self.content,
'hbwrap': self.hbwrap,
'hbstart': self.hbstart
}
return json_comment
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from fancypages.compat import AUTH_USER_MODEL, AUTH_USER_MODEL_NAME
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'HorizontalSeparatorBlock'
db.create_table('fancypages_horizontalseparatorblock', (
('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)),
))
db.send_create_signal('fancypages', ['HorizontalSeparatorBlock'])
# Adding unique constraint on 'Container', fields ['name', 'content_type', 'object_id']
db.create_unique('fancypages_container', ['name', 'content_type_id', 'object_id'])
def backwards(self, orm):
# Removing unique constraint on 'Container', fields ['name', 'content_type', 'object_id']
db.delete_unique('fancypages_container', ['name', 'content_type_id', 'object_id'])
# Deleting model 'HorizontalSeparatorBlock'
db.delete_table('fancypages_horizontalseparatorblock')
models = {
'assets.imageasset': {
'Meta': {'object_name': 'ImageAsset'},
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['{}']".format(AUTH_USER_MODEL)}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''"}),
'height': ('django.db.models.fields.IntegerField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.IntegerField', [], {'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
AUTH_USER_MODEL: {
'Meta': {'object_name': AUTH_USER_MODEL_NAME},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'fancypages.carouselblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'CarouselBlock', '_ormbases': ['fancypages.ContentBlock']},
'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'image_1': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}),
'image_10': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}),
'image_2': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}),
'image_3': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}),
'image_4': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}),
'image_5': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}),
'image_6': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}),
'image_7': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}),
'image_8': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}),
'image_9': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}),
'link_url_1': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_10': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_2': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_3': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_4': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_5': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_6': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_7': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_8': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_9': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'})
},
'fancypages.container': {
'Meta': {'unique_together': "(('name', 'content_type', 'object_id'),)", 'object_name': 'Container'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
'fancypages.contentblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'ContentBlock'},
'container': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'blocks'", 'to': "orm['fancypages.Container']"}),
'display_order': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'fancypages.fancypage': {
'Meta': {'object_name': 'FancyPage'},
'date_visible_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_visible_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'page_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'pages'", 'null': 'True', 'to': "orm['fancypages.PageType']"}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '15'}),
'visibility_types': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['fancypages.VisibilityType']", 'symmetrical': 'False'})
},
'fancypages.fourcolumnlayoutblock': {
'Meta': {'object_name': 'FourColumnLayoutBlock'},
'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'})
},
'fancypages.horizontalseparatorblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'HorizontalSeparatorBlock', '_ormbases': ['fancypages.ContentBlock']},
'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'})
},
'fancypages.imageandtextblock': {
'Meta': {'object_name': 'ImageAndTextBlock', '_ormbases': ['fancypages.ContentBlock']},
'alt_text': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'image_asset': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'image_text_blocks'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'default': "'Your text goes here.'", 'max_length': '2000'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'fancypages.imageblock': {
'Meta': {'object_name': 'ImageBlock', '_ormbases': ['fancypages.ContentBlock']},
'alt_text': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'image_asset': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'image_blocks'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'fancypages.orderedcontainer': {
'Meta': {'object_name': 'OrderedContainer', '_ormbases': ['fancypages.Container']},
'container_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.Container']", 'unique': 'True', 'primary_key': 'True'}),
'display_order': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'fancypages.pagenavigationblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'PageNavigationBlock', '_ormbases': ['fancypages.ContentBlock']},
'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'})
},
'fancypages.pagetype': {
'Meta': {'object_name': 'PageType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'fancypages.primarynavigationblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'PrimaryNavigationBlock', '_ormbases': ['fancypages.ContentBlock']},
'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'})
},
'fancypages.tabblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'TabBlock', '_ormbases': ['fancypages.ContentBlock']},
'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'})
},
'fancypages.textblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'TextBlock', '_ormbases': ['fancypages.ContentBlock']},
'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'default': "'Your text goes here.'"})
},
'fancypages.threecolumnlayoutblock': {
'Meta': {'object_name': 'ThreeColumnLayoutBlock'},
'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'})
},
'fancypages.titletextblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'TitleTextBlock', '_ormbases': ['fancypages.ContentBlock']},
'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'default': "'Your text goes here.'"}),
'title': ('django.db.models.fields.CharField', [], {'default': "'Your title goes here.'", 'max_length': '100'})
},
'fancypages.twitterblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'TwitterBlock', '_ormbases': ['fancypages.ContentBlock']},
'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'max_tweets': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'fancypages.twocolumnlayoutblock': {
'Meta': {'object_name': 'TwoColumnLayoutBlock'},
'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'left_width': ('django.db.models.fields.PositiveIntegerField', [], {'default': '6', 'max_length': '3'})
},
'fancypages.videoblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'VideoBlock', '_ormbases': ['fancypages.ContentBlock']},
'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'video_code': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'fancypages.visibilitytype': {
'Meta': {'object_name': 'VisibilityType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['fancypages']
|
|
"""Configuration management setup
Some terminology:
- name
As written in config files.
- value
Value associated with a name
- key
Name combined with it's section (section.name)
- variant
A single word describing where the configuration key-value pair came from
"""
# The following comment should be removed at some point in the future.
# mypy: strict-optional=False
import locale
import logging
import os
import sys
from pip._vendor.six.moves import configparser
from pip._internal.exceptions import (
ConfigurationError,
ConfigurationFileCouldNotBeLoaded,
)
from pip._internal.utils import appdirs
from pip._internal.utils.compat import WINDOWS, expanduser
from pip._internal.utils.misc import ensure_dir, enum
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import (
Any, Dict, Iterable, List, NewType, Optional, Tuple
)
RawConfigParser = configparser.RawConfigParser # Shorthand
Kind = NewType("Kind", str)
logger = logging.getLogger(__name__)
# NOTE: Maybe use the optionx attribute to normalize keynames.
def _normalize_name(name):
# type: (str) -> str
"""Make a name consistent regardless of source (environment or file)
"""
name = name.lower().replace('_', '-')
if name.startswith('--'):
name = name[2:] # only prefer long opts
return name
def _disassemble_key(name):
# type: (str) -> List[str]
if "." not in name:
error_message = (
"Key does not contain dot separated section and key. "
"Perhaps you wanted to use 'global.{}' instead?"
).format(name)
raise ConfigurationError(error_message)
return name.split(".", 1)
# The kinds of configurations there are.
kinds = enum(
USER="user", # User Specific
GLOBAL="global", # System Wide
SITE="site", # [Virtual] Environment Specific
ENV="env", # from PIP_CONFIG_FILE
ENV_VAR="env-var", # from Environment Variables
)
CONFIG_BASENAME = 'pip.ini' if WINDOWS else 'pip.conf'
def get_configuration_files():
# type: () -> Dict[Kind, List[str]]
global_config_files = [
os.path.join(path, CONFIG_BASENAME)
for path in appdirs.site_config_dirs('pip')
]
site_config_file = os.path.join(sys.prefix, CONFIG_BASENAME)
legacy_config_file = os.path.join(
expanduser('~'),
'pip' if WINDOWS else '.pip',
CONFIG_BASENAME,
)
new_config_file = os.path.join(
appdirs.user_config_dir("pip"), CONFIG_BASENAME
)
return {
kinds.GLOBAL: global_config_files,
kinds.SITE: [site_config_file],
kinds.USER: [legacy_config_file, new_config_file],
}
class Configuration(object):
"""Handles management of configuration.
Provides an interface to accessing and managing configuration files.
This class converts provides an API that takes "section.key-name" style
keys and stores the value associated with it as "key-name" under the
section "section".
This allows for a clean interface wherein the both the section and the
key-name are preserved in an easy to manage form in the configuration files
and the data stored is also nice.
"""
def __init__(self, isolated, load_only=None):
# type: (bool, Kind) -> None
super(Configuration, self).__init__()
_valid_load_only = [kinds.USER, kinds.GLOBAL, kinds.SITE, None]
if load_only not in _valid_load_only:
raise ConfigurationError(
"Got invalid value for load_only - should be one of {}".format(
", ".join(map(repr, _valid_load_only[:-1]))
)
)
self.isolated = isolated # type: bool
self.load_only = load_only # type: Optional[Kind]
# The order here determines the override order.
self._override_order = [
kinds.GLOBAL, kinds.USER, kinds.SITE, kinds.ENV, kinds.ENV_VAR
]
self._ignore_env_names = ["version", "help"]
# Because we keep track of where we got the data from
self._parsers = {
variant: [] for variant in self._override_order
} # type: Dict[Kind, List[Tuple[str, RawConfigParser]]]
self._config = {
variant: {} for variant in self._override_order
} # type: Dict[Kind, Dict[str, Any]]
self._modified_parsers = [] # type: List[Tuple[str, RawConfigParser]]
def load(self):
# type: () -> None
"""Loads configuration from configuration files and environment
"""
self._load_config_files()
if not self.isolated:
self._load_environment_vars()
def get_file_to_edit(self):
# type: () -> Optional[str]
"""Returns the file with highest priority in configuration
"""
assert self.load_only is not None, \
"Need to be specified a file to be editing"
try:
return self._get_parser_to_modify()[0]
except IndexError:
return None
def items(self):
# type: () -> Iterable[Tuple[str, Any]]
"""Returns key-value pairs like dict.items() representing the loaded
configuration
"""
return self._dictionary.items()
def get_value(self, key):
# type: (str) -> Any
"""Get a value from the configuration.
"""
try:
return self._dictionary[key]
except KeyError:
raise ConfigurationError("No such key - {}".format(key))
def set_value(self, key, value):
# type: (str, Any) -> None
"""Modify a value in the configuration.
"""
self._ensure_have_load_only()
fname, parser = self._get_parser_to_modify()
if parser is not None:
section, name = _disassemble_key(key)
# Modify the parser and the configuration
if not parser.has_section(section):
parser.add_section(section)
parser.set(section, name, value)
self._config[self.load_only][key] = value
self._mark_as_modified(fname, parser)
def unset_value(self, key):
# type: (str) -> None
"""Unset a value in the configuration.
"""
self._ensure_have_load_only()
if key not in self._config[self.load_only]:
raise ConfigurationError("No such key - {}".format(key))
fname, parser = self._get_parser_to_modify()
if parser is not None:
section, name = _disassemble_key(key)
# Remove the key in the parser
modified_something = False
if parser.has_section(section):
# Returns whether the option was removed or not
modified_something = parser.remove_option(section, name)
if modified_something:
# name removed from parser, section may now be empty
section_iter = iter(parser.items(section))
try:
val = next(section_iter)
except StopIteration:
val = None
if val is None:
parser.remove_section(section)
self._mark_as_modified(fname, parser)
else:
raise ConfigurationError(
"Fatal Internal error [id=1]. Please report as a bug."
)
del self._config[self.load_only][key]
def save(self):
# type: () -> None
"""Save the current in-memory state.
"""
self._ensure_have_load_only()
for fname, parser in self._modified_parsers:
logger.info("Writing to %s", fname)
# Ensure directory exists.
ensure_dir(os.path.dirname(fname))
with open(fname, "w") as f:
parser.write(f)
#
# Private routines
#
def _ensure_have_load_only(self):
# type: () -> None
if self.load_only is None:
raise ConfigurationError("Needed a specific file to be modifying.")
logger.debug("Will be working with %s variant only", self.load_only)
@property
def _dictionary(self):
# type: () -> Dict[str, Any]
"""A dictionary representing the loaded configuration.
"""
# NOTE: Dictionaries are not populated if not loaded. So, conditionals
# are not needed here.
retval = {}
for variant in self._override_order:
retval.update(self._config[variant])
return retval
def _load_config_files(self):
# type: () -> None
"""Loads configuration from configuration files
"""
config_files = dict(self._iter_config_files())
if config_files[kinds.ENV][0:1] == [os.devnull]:
logger.debug(
"Skipping loading configuration files due to "
"environment's PIP_CONFIG_FILE being os.devnull"
)
return
for variant, files in config_files.items():
for fname in files:
# If there's specific variant set in `load_only`, load only
# that variant, not the others.
if self.load_only is not None and variant != self.load_only:
logger.debug(
"Skipping file '%s' (variant: %s)", fname, variant
)
continue
parser = self._load_file(variant, fname)
# Keeping track of the parsers used
self._parsers[variant].append((fname, parser))
def _load_file(self, variant, fname):
# type: (Kind, str) -> RawConfigParser
logger.debug("For variant '%s', will try loading '%s'", variant, fname)
parser = self._construct_parser(fname)
for section in parser.sections():
items = parser.items(section)
self._config[variant].update(self._normalized_keys(section, items))
return parser
def _construct_parser(self, fname):
# type: (str) -> RawConfigParser
parser = configparser.RawConfigParser()
# If there is no such file, don't bother reading it but create the
# parser anyway, to hold the data.
# Doing this is useful when modifying and saving files, where we don't
# need to construct a parser.
if os.path.exists(fname):
try:
parser.read(fname)
except UnicodeDecodeError:
# See https://github.com/pypa/pip/issues/4963
raise ConfigurationFileCouldNotBeLoaded(
reason="contains invalid {} characters".format(
locale.getpreferredencoding(False)
),
fname=fname,
)
except configparser.Error as error:
# See https://github.com/pypa/pip/issues/4893
raise ConfigurationFileCouldNotBeLoaded(error=error)
return parser
def _load_environment_vars(self):
# type: () -> None
"""Loads configuration from environment variables
"""
self._config[kinds.ENV_VAR].update(
self._normalized_keys(":env:", self._get_environ_vars())
)
def _normalized_keys(self, section, items):
# type: (str, Iterable[Tuple[str, Any]]) -> Dict[str, Any]
"""Normalizes items to construct a dictionary with normalized keys.
This routine is where the names become keys and are made the same
regardless of source - configuration files or environment.
"""
normalized = {}
for name, val in items:
key = section + "." + _normalize_name(name)
normalized[key] = val
return normalized
def _get_environ_vars(self):
# type: () -> Iterable[Tuple[str, str]]
"""Returns a generator with all environmental vars with prefix PIP_"""
for key, val in os.environ.items():
should_be_yielded = (
key.startswith("PIP_") and
key[4:].lower() not in self._ignore_env_names
)
if should_be_yielded:
yield key[4:].lower(), val
# XXX: This is patched in the tests.
def _iter_config_files(self):
# type: () -> Iterable[Tuple[Kind, List[str]]]
"""Yields variant and configuration files associated with it.
This should be treated like items of a dictionary.
"""
# SMELL: Move the conditions out of this function
# environment variables have the lowest priority
config_file = os.environ.get('PIP_CONFIG_FILE', None)
if config_file is not None:
yield kinds.ENV, [config_file]
else:
yield kinds.ENV, []
config_files = get_configuration_files()
# at the base we have any global configuration
yield kinds.GLOBAL, config_files[kinds.GLOBAL]
# per-user configuration next
should_load_user_config = not self.isolated and not (
config_file and os.path.exists(config_file)
)
if should_load_user_config:
# The legacy config file is overridden by the new config file
yield kinds.USER, config_files[kinds.USER]
# finally virtualenv configuration first trumping others
yield kinds.SITE, config_files[kinds.SITE]
def _get_parser_to_modify(self):
# type: () -> Tuple[str, RawConfigParser]
# Determine which parser to modify
parsers = self._parsers[self.load_only]
if not parsers:
# This should not happen if everything works correctly.
raise ConfigurationError(
"Fatal Internal error [id=2]. Please report as a bug."
)
# Use the highest priority parser.
return parsers[-1]
# XXX: This is patched in the tests.
def _mark_as_modified(self, fname, parser):
# type: (str, RawConfigParser) -> None
file_parser_tuple = (fname, parser)
if file_parser_tuple not in self._modified_parsers:
self._modified_parsers.append(file_parser_tuple)
|
|
#!/usr/bin/env python
# Copyright (c) 2011, 2012 Walter Bender
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import gi
gi.require_version("Gst", "1.0")
from gi.repository import Gst
Gst.init(None)
from fcntl import ioctl
import os
from time import time
from gettext import gettext as _
from plugins.camera_sensor.tacamera import Camera
from plugins.camera_sensor.v4l2 import v4l2_control, V4L2_CID_AUTOGAIN, \
VIDIOC_G_CTRL, VIDIOC_S_CTRL
from plugins.plugin import Plugin
from TurtleArt.tapalette import make_palette
from TurtleArt.talogo import media_blocks_dictionary
from TurtleArt.tautils import get_path, debug_output, power_manager_off
from TurtleArt.taconstants import MEDIA_SHAPES, NO_IMPORT, SKIN_PATHS, \
BLOCKS_WITH_SKIN
from TurtleArt.taprimitive import (ConstantArg, Primitive)
from TurtleArt.tatype import TYPE_NUMBER
class Camera_sensor(Plugin):
def __init__(self, parent):
Plugin.__init__(self)
''' Make sure there is a camera device '''
self._parent = parent
self._status = False
self._ag_control = None
self.devices = []
self.cameras = []
self.luminance = 0
if os.path.exists('/dev/video0'):
self.devices.append('/dev/video0')
if os.path.exists('/dev/video1'):
self.devices.append('/dev/video1')
if len(self.devices) > 0:
self._status = True
else:
self._status = False
def setup(self):
''' Set up the palettes '''
'''
sensors_palette = make_palette('sensor',
colors=["#FF6060", "#A06060"],
help_string=_(
'Palette of sensor blocks'),
position=6)
'''
media_palette = make_palette('media',
colors=["#A0FF00", "#80A000"],
help_string=_('Palette of media objects'),
position=7)
sensors_palette = media_palette
# set up camera-specific blocks
media_blocks_dictionary['camera'] = self.prim_take_picture0
media_blocks_dictionary['camera1'] = self.prim_take_picture1
SKIN_PATHS.append('plugins/camera_sensor/images')
hidden = True
second_cam = False
if self._status:
hidden = False
if len(self.devices) > 1:
second_cam = True
# ++ Turtle Confusion
hidden = True
second_cam = False
# -- Turtle Confusion
sensors_palette.add_block('luminance',
hidden=hidden,
style='box-style',
label=_('brightness'),
help_string=_(
'light level detected by camera'),
value_block=True,
prim_name='luminance')
self._parent.lc.def_prim(
'luminance', 0,
Primitive(self.prim_read_camera,
return_type=TYPE_NUMBER,
kwarg_descs={'luminance_only': ConstantArg(True)},
call_afterwards=self.after_luminance))
media_palette.add_block('camera',
hidden=hidden,
style='box-style-media',
label=' ',
default='CAMERA',
help_string=_('camera output'),
content_block=True)
media_palette.add_block('camera1',
hidden=not(second_cam),
style='box-style-media',
label=' ',
default='CAMERA',
help_string=_('camera output'),
content_block=True)
# Depreciated block
sensors_palette.add_block(
'read_camera',
hidden=True,
style='box-style',
label=_('brightness'),
help_string=_('Average RGB color from camera is pushed to the stack'),
value_block=True,
prim_name='read_camera')
self._parent.lc.def_prim(
'read_camera', 0,
Primitive(self.prim_read_camera,
return_type=TYPE_NUMBER,
kwarg_descs={'luminance_only': ConstantArg(False)}))
NO_IMPORT.append('camera')
BLOCKS_WITH_SKIN.append('camera')
NO_IMPORT.append('camera1')
BLOCKS_WITH_SKIN.append('camera1')
MEDIA_SHAPES.append('camerasmall')
MEDIA_SHAPES.append('cameraoff')
MEDIA_SHAPES.append('camera1small')
MEDIA_SHAPES.append('camera1off')
def start(self):
''' Initialize the camera if there is an camera block in use '''
camera_blocks = len(self._parent.block_list.get_similar_blocks(
'block', ['camera', 'camera1', 'read_camera', 'luminance']))
if not self._parent.running_turtleart or camera_blocks > 0:
if self._status and len(self.cameras) == 0:
for device in self.devices:
self.cameras.append(Camera(device))
power_manager_off(True)
def quit(self):
''' This gets called when the activity quits '''
self._reset_the_camera()
def stop(self):
''' This gets called by the stop button '''
self._reset_the_camera()
def clear(self):
''' This gets called by the clean button and erase button '''
self._reset_the_camera()
def _reset_the_camera(self):
if self._status and len(self.cameras) > 0:
for i, camera in enumerate(self.cameras):
camera.stop_camera_input()
self._set_autogain(1, camera=i) # enable AUTOGAIN
power_manager_off(False)
def _status_report(self):
debug_output('Reporting camera status: %s' % (str(self._status)),
self._parent.running_sugar)
return self._status
# Block primitives used in talogo
def prim_take_picture0(self):
self._take_picture(camera=0)
def prim_take_picture1(self):
self._take_picture(camera=1)
def _take_picture(self, camera=0):
''' method called by media block '''
self._set_autogain(1, camera) # enable AUTOGAIN
self._get_pixbuf_from_camera(camera)
self._parent.lc.pixbuf = self.cameras[camera].pixbuf
def prim_read_camera(self, luminance_only=False, camera=0):
""" Read average pixel from camera and push b, g, r to the stack """
self.luminance_only = luminance_only
if not self._status:
if self.luminance_only:
return -1
else:
self._parent.lc.heap.append(-1)
self._parent.lc.heap.append(-1)
self._parent.lc.heap.append(-1)
return
array = None
self._set_autogain(0, camera=camera) # disable AUTOGAIN
self._get_pixbuf_from_camera(camera=camera)
self.calc_luminance(camera=camera)
if self.luminance_only:
return int(self.luminance)
else:
self._parent.lc.heap.append(self.b)
self._parent.lc.heap.append(self.g)
self._parent.lc.heap.append(self.r)
return
def calc_luminance(self, camera=0):
array = self.cameras[camera].pixbuf.get_pixels()
width = self.cameras[camera].pixbuf.get_width()
height = self.cameras[camera].pixbuf.get_height()
if array is not None:
length = int(len(array) / 3)
if length != width * height:
debug_output('array length != width x height (%d != %dx%d)' %
(length, width, height),
self._parent.running_sugar)
# Average the 100 pixels in the center of the screen
r, g, b = 0, 0, 0
row_offset = int((height / 2 - 5) * width * 3)
column_offset = int(width / 2 - 5) * 3
for y in range(10):
i = row_offset + column_offset
for x in range(10):
r += ord(array[i])
i += 1
g += ord(array[i])
i += 1
b += ord(array[i])
i += 1
row_offset += width * 3
if self.luminance_only:
self.luminance = int((r * 0.3 + g * 0.6 + b * 0.1) / 100)
else:
self.r = int(r / 100)
self.g = int(g / 100)
self.b = int(b / 100)
else:
if self.luminance_only:
self.luminance = -1
else:
self.r = -1
self.g = -1
self.b = -1
def after_luminance(self, luminance_only=False):
if self._parent.lc.update_values and luminance_only:
self._parent.lc.update_label_value('luminance', self.luminance)
def _set_autogain(self, state, camera=0):
''' 0 is off; 1 is on '''
if self._ag_control is not None and self._ag_control.value == state:
return
try:
video_capture_device = open(self.devices[camera], 'rw')
except BaseException:
video_capture_device = None
debug_output('video capture device not available',
self._parent.running_sugar)
return
self._ag_control = v4l2_control(V4L2_CID_AUTOGAIN)
try:
ioctl(video_capture_device, VIDIOC_G_CTRL, self._ag_control)
self._ag_control.value = state
ioctl(video_capture_device, VIDIOC_S_CTRL, self._ag_control)
except BaseException:
pass
video_capture_device.close()
def _get_pixbuf_from_camera(self, camera):
''' Regardless of how we get it, we want to return a pixbuf '''
self._parent.lc.pixbuf = None
if self._status:
self.cameras[camera].start_camera_input()
|
|
from datetime import timedelta
try:
from django.utils.timezone import now
except ImportError:
from datetime import datetime
now = datetime.now
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.db import models
from django.conf import settings
from django.http import HttpResponseRedirect
from django.utils import six
from django.utils.http import urlencode
from django.utils.http import int_to_base36, base36_to_int
from django.core.exceptions import ValidationError
from allauth.compat import OrderedDict
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
from ..exceptions import ImmediateHttpResponse
from ..utils import (import_callable, valid_email_or_none,
get_user_model, get_request_param)
from . import signals
from .app_settings import EmailVerificationMethod
from . import app_settings
from .adapter import get_adapter
def get_next_redirect_url(request, redirect_field_name="next"):
"""
Returns the next URL to redirect to, if it was explicitly passed
via the request.
"""
redirect_to = get_request_param(request, redirect_field_name)
if not get_adapter().is_safe_url(redirect_to):
redirect_to = None
return redirect_to
def get_login_redirect_url(request, url=None, redirect_field_name="next"):
if url and callable(url):
# In order to be able to pass url getters around that depend
# on e.g. the authenticated state.
url = url()
redirect_url \
= (url
or get_next_redirect_url(request,
redirect_field_name=redirect_field_name)
or get_adapter().get_login_redirect_url(request))
return redirect_url
_user_display_callable = None
def default_user_display(user):
if app_settings.USER_MODEL_USERNAME_FIELD:
return getattr(user, app_settings.USER_MODEL_USERNAME_FIELD)
else:
return force_text(user)
def user_display(user):
global _user_display_callable
if not _user_display_callable:
f = getattr(settings, "ACCOUNT_USER_DISPLAY",
default_user_display)
_user_display_callable = import_callable(f)
return _user_display_callable(user)
def user_field(user, field, *args):
"""
Gets or sets (optional) user model fields. No-op if fields do not exist.
"""
if field and hasattr(user, field):
if args:
# Setter
v = args[0]
if v:
User = get_user_model()
v = v[0:User._meta.get_field(field).max_length]
setattr(user, field, v)
else:
# Getter
return getattr(user, field)
def user_username(user, *args):
return user_field(user, app_settings.USER_MODEL_USERNAME_FIELD, *args)
def user_email(user, *args):
return user_field(user, app_settings.USER_MODEL_EMAIL_FIELD, *args)
def perform_login(request, user, email_verification,
redirect_url=None, signal_kwargs=None,
signup=False):
"""
Keyword arguments:
signup -- Indicates whether or not sending the
email is essential (during signup), or if it can be skipped (e.g. in
case email verification is optional and we are only logging in).
"""
# Local users are stopped due to form validation checking
# is_active, yet, adapter methods could toy with is_active in a
# `user_signed_up` signal. Furthermore, social users should be
# stopped anyway.
if not user.is_active:
return HttpResponseRedirect(reverse('account_inactive'))
from .models import EmailAddress
has_verified_email = EmailAddress.objects.filter(user=user,
verified=True).exists()
if email_verification == EmailVerificationMethod.NONE:
pass
elif email_verification == EmailVerificationMethod.OPTIONAL:
# In case of OPTIONAL verification: send on signup.
if not has_verified_email and signup:
send_email_confirmation(request, user, signup=signup)
elif email_verification == EmailVerificationMethod.MANDATORY:
if not has_verified_email:
send_email_confirmation(request, user, signup=signup)
return HttpResponseRedirect(
reverse('account_email_verification_sent'))
try:
get_adapter().login(request, user)
response = HttpResponseRedirect(
get_login_redirect_url(request, redirect_url))
if signal_kwargs is None:
signal_kwargs = {}
signals.user_logged_in.send(sender=user.__class__,
request=request,
response=response,
user=user,
**signal_kwargs)
get_adapter().add_message(request,
messages.SUCCESS,
'account/messages/logged_in.txt',
{'user': user})
except ImmediateHttpResponse as e:
response = e.response
return response
def complete_signup(request, user, email_verification, success_url,
signal_kwargs=None):
if signal_kwargs is None:
signal_kwargs = {}
signals.user_signed_up.send(sender=user.__class__,
request=request,
user=user,
**signal_kwargs)
return perform_login(request, user,
email_verification=email_verification,
signup=True,
redirect_url=success_url,
signal_kwargs=signal_kwargs)
def cleanup_email_addresses(request, addresses):
"""
Takes a list of EmailAddress instances and cleans it up, making
sure only valid ones remain, without multiple primaries etc.
Order is important: e.g. if multiple primary e-mail addresses
exist, the first one encountered will be kept as primary.
"""
from .models import EmailAddress
adapter = get_adapter()
# Let's group by `email`
e2a = OrderedDict() # maps email to EmailAddress
primary_addresses = []
verified_addresses = []
primary_verified_addresses = []
for address in addresses:
# Pick up only valid ones...
email = valid_email_or_none(address.email)
if not email:
continue
# ... and non-conflicting ones...
if (app_settings.UNIQUE_EMAIL
and EmailAddress.objects
.filter(email__iexact=email)
.exists()):
continue
a = e2a.get(email.lower())
if a:
a.primary = a.primary or address.primary
a.verified = a.verified or address.verified
else:
a = address
a.verified = a.verified or adapter.is_email_verified(request,
a.email)
e2a[email.lower()] = a
if a.primary:
primary_addresses.append(a)
if a.verified:
primary_verified_addresses.append(a)
if a.verified:
verified_addresses.append(a)
# Now that we got things sorted out, let's assign a primary
if primary_verified_addresses:
primary_address = primary_verified_addresses[0]
elif verified_addresses:
# Pick any verified as primary
primary_address = verified_addresses[0]
elif primary_addresses:
# Okay, let's pick primary then, even if unverified
primary_address = primary_addresses[0]
elif e2a:
# Pick the first
primary_address = e2a.keys()[0]
else:
# Empty
primary_address = None
# There can only be one primary
for a in e2a.values():
a.primary = primary_address.email.lower() == a.email.lower()
return list(e2a.values()), primary_address
def setup_user_email(request, user, addresses):
"""
Creates proper EmailAddress for the user that was just signed
up. Only sets up, doesn't do any other handling such as sending
out email confirmation mails etc.
"""
from .models import EmailAddress
assert EmailAddress.objects.filter(user=user).count() == 0
priority_addresses = []
# Is there a stashed e-mail?
adapter = get_adapter()
stashed_email = adapter.unstash_verified_email(request)
if stashed_email:
priority_addresses.append(EmailAddress(user=user,
email=stashed_email,
primary=True,
verified=True))
email = user_email(user)
if email:
priority_addresses.append(EmailAddress(user=user,
email=email,
primary=True,
verified=False))
addresses, primary = cleanup_email_addresses(request,
priority_addresses
+ addresses)
for a in addresses:
a.user = user
a.save()
EmailAddress.objects.fill_cache_for_user(user, addresses)
if (primary
and email
and email.lower() != primary.email.lower()):
user_email(user, primary.email)
user.save()
return primary
def send_email_confirmation(request, user, signup=False):
"""
E-mail verification mails are sent:
a) Explicitly: when a user signs up
b) Implicitly: when a user attempts to log in using an unverified
e-mail while EMAIL_VERIFICATION is mandatory.
Especially in case of b), we want to limit the number of mails
sent (consider a user retrying a few times), which is why there is
a cooldown period before sending a new mail.
"""
from .models import EmailAddress, EmailConfirmation
COOLDOWN_PERIOD = timedelta(minutes=3)
email = user_email(user)
if email:
try:
email_address = EmailAddress.objects.get_for_user(user, email)
if not email_address.verified:
send_email = not EmailConfirmation.objects \
.filter(sent__gt=now() - COOLDOWN_PERIOD,
email_address=email_address) \
.exists()
if send_email:
email_address.send_confirmation(request,
signup=signup)
else:
send_email = False
except EmailAddress.DoesNotExist:
send_email = True
email_address = EmailAddress.objects.add_email(request,
user,
email,
signup=signup,
confirm=True)
assert email_address
# At this point, if we were supposed to send an email we have sent it.
if send_email:
get_adapter().add_message(request,
messages.INFO,
'account/messages/'
'email_confirmation_sent.txt',
{'email': email})
if signup:
get_adapter().stash_user(request, user_pk_to_url_str(user))
def sync_user_email_addresses(user):
"""
Keep user.email in sync with user.emailaddress_set.
Under some circumstances the user.email may not have ended up as
an EmailAddress record, e.g. in the case of manually created admin
users.
"""
from .models import EmailAddress
email = user_email(user)
if email and not EmailAddress.objects.filter(user=user,
email__iexact=email).exists():
if app_settings.UNIQUE_EMAIL \
and EmailAddress.objects.filter(email__iexact=email).exists():
# Bail out
return
EmailAddress.objects.create(user=user,
email=email,
primary=False,
verified=False)
def filter_users_by_email(email):
"""Return list of users by email address
Typically one, at most just a few in length. First we look through
EmailAddress table, than customisable User model table. Add results
together avoiding SQL joins and deduplicate.
"""
from .models import EmailAddress
User = get_user_model()
mails = EmailAddress.objects.filter(email__iexact=email)
users = [e.user for e in mails.prefetch_related('user')]
if app_settings.USER_MODEL_EMAIL_FIELD:
q_dict = {app_settings.USER_MODEL_EMAIL_FIELD + '__iexact': email}
users += list(User.objects.filter(**q_dict))
return list(set(users))
def passthrough_next_redirect_url(request, url, redirect_field_name):
assert url.find("?") < 0 # TODO: Handle this case properly
next_url = get_next_redirect_url(request, redirect_field_name)
if next_url:
url = url + '?' + urlencode({redirect_field_name: next_url})
return url
def user_pk_to_url_str(user):
"""
This should return a string.
"""
User = get_user_model()
if (hasattr(models, 'UUIDField')
and issubclass(type(User._meta.pk), models.UUIDField)):
if isinstance(user.pk, six.string_types):
return user.pk
return user.pk.hex
ret = user.pk
if isinstance(ret, six.integer_types):
ret = int_to_base36(user.pk)
return str(ret)
def url_str_to_user_pk(s):
User = get_user_model()
# TODO: Ugh, isn't there a cleaner way to determine whether or not
# the PK is a str-like field?
if getattr(User._meta.pk, 'rel', None):
pk_field = User._meta.pk.rel.to._meta.pk
else:
pk_field = User._meta.pk
if (hasattr(models, 'UUIDField')
and issubclass(type(pk_field), models.UUIDField)):
return s
try:
pk_field.to_python('a')
pk = s
except ValidationError:
pk = base36_to_int(s)
return pk
|
|
from __future__ import absolute_import
import logging
import sys
import threading
from Queue import Full, PriorityQueue
from concurrent.futures import Future
from concurrent.futures._base import RUNNING, FINISHED
from time import time
from six.moves import xrange
logger = logging.getLogger(__name__)
def execute(function, daemon=True):
future = Future()
def run():
if not future.set_running_or_notify_cancel():
return
try:
result = function()
except Exception:
future.set_exception_info(*sys.exc_info()[1:])
else:
future.set_result(result)
t = threading.Thread(target=run)
t.daemon = daemon
t.start()
return future
class TimedFuture(Future):
def __init__(self, *args, **kwargs):
self.__timing = [None, None] # [started, finished/cancelled]
super(TimedFuture, self).__init__(*args, **kwargs)
def get_timing(self):
"""\
Return the timing data for this future in the form ``(started, finished)``.
The ``started`` value can be either a timestamp or ``None`` (if the
future has not been started.) The ``finished`` value can also be either
a timestamp or ``None`` (if the future has not been either completed or
cancelled.)
There are some idiosyncracies with the way the timings are recorded:
- The ``started`` value will generally not be ``None`` if the
``finished`` value is also not ``None``. However, for a future that
was marked as cancelled and has yet to be attempted to be executed,
the ``finished`` value may be set while the ``started`` value is
``None``.
- Similarly, the ``started`` value will generally be equal to or less
than the ``finished`` value (ignoring non-monotonic clock phenomena.)
However, for a future was is marked as cancelled prior to execution,
the ``finished`` time (when the future was cancelled) may be before
the ``started`` time (when the future was attempted to be executed.)
"""
return tuple(self.__timing)
def set_running_or_notify_cancel(self, *args, **kwargs):
result = super(TimedFuture, self).set_running_or_notify_cancel(*args, **kwargs)
# This method can only be called once (the second invocation will raise
# a ``RuntimeError``) so if we've gotten this far we can be reasonably
# confident that the start time hasn't been set.
self.__timing[0] = time()
return result
def cancel(self, *args, **kwargs):
with self._condition:
# Futures can only be marked as cancelled if they are neither
# running or finished (we have to duplicate this check that is also
# performed in the superclass to ensure the timing is set before
# callbacks are invoked.) As long as the future is in the correct
# state, this call is guaranteed to succeed. This method can be
# called multiple times, but we only record the first time the
# future was cancelled.
if self._state not in [RUNNING, FINISHED] and self.__timing[1] is None:
self.__timing[1] = time()
return super(TimedFuture, self).cancel(*args, **kwargs)
def set_result(self, *args, **kwargs):
with self._condition:
# This method always overwrites the result, so we always overwrite
# the timing, even if another timing was already recorded.
self.__timing[1] = time()
return super(TimedFuture, self).set_result(*args, **kwargs)
def set_exception_info(self, *args, **kwargs):
# XXX: This makes the potentially unsafe assumption that
# ``set_exception`` will always continue to call this function.
with self._condition:
# This method always overwrites the result, so we always overwrite
# the timing, even if another timing was already recorded.
self.__timing[1] = time()
return super(TimedFuture, self).set_exception_info(*args, **kwargs)
class Executor(object):
"""
This class provides an API for executing tasks in different contexts
(immediately, or asynchronously.)
NOTE: This is *not* compatible with the ``concurrent.futures.Executor``
API! Rather than ``submit`` accepting the function arguments, the function
must already have the argument values bound (via ``functools.partial`` or
similar), and ``submit`` passes all additional arguments to ``queue.put``
to allow controlling whether or not queue insertion should be blocking.
"""
Future = TimedFuture
def submit(self, callable, priority=0, block=True, timeout=None):
"""
Enqueue a task to be executed, returning a ``TimedFuture``.
All implementations *must* accept the ``callable`` parameter, but other
parameters may or may not be implemented, depending on the specific
implementation used.
"""
raise NotImplementedError
class SynchronousExecutor(Executor):
"""
This executor synchronously executes callables in the current thread.
This is primarily exists to provide API compatibility with
``ThreadedExecutor`` for calls that do not do significant I/O.
"""
# TODO: The ``Future`` implementation here could be replaced with a
# lock-free future for efficiency.
def submit(self, callable, *args, **kwargs):
"""
Immediately execute a callable, returning a ``TimedFuture``.
"""
future = self.Future()
assert future.set_running_or_notify_cancel()
try:
result = callable()
except Exception:
future.set_exception_info(*sys.exc_info()[1:])
else:
future.set_result(result)
return future
class ThreadedExecutor(Executor):
"""\
This executor provides a method of executing callables in a threaded worker
pool. The number of outstanding requests can be limited by the ``maxsize``
parameter, which has the same behavior as the parameter of the same name
for the ``PriorityQueue`` constructor.
All threads are daemon threads and will remain alive until the main thread
exits. Any items remaining in the queue at this point may not be executed!
"""
def __init__(self, worker_count=1, maxsize=0):
self.__worker_count = worker_count
self.__workers = set([])
self.__started = False
self.__queue = PriorityQueue(maxsize)
self.__lock = threading.Lock()
def __worker(self):
queue = self.__queue
while True:
priority, (function, future) = queue.get(True)
if not future.set_running_or_notify_cancel():
continue
try:
result = function()
except Exception:
future.set_exception_info(*sys.exc_info()[1:])
else:
future.set_result(result)
queue.task_done()
def start(self):
with self.__lock:
if self.__started:
return
for i in xrange(self.__worker_count):
t = threading.Thread(target=self.__worker)
t.daemon = True
t.start()
self.__workers.add(t)
self.__started = True
def submit(self, callable, priority=0, block=True, timeout=None):
"""\
Enqueue a task to be executed, returning a ``TimedFuture``.
Tasks can be prioritized by providing a value for the ``priority``
argument, which follows the same specification as the standard library
``Queue.PriorityQueue`` (lowest valued entries are retrieved first.)
If the worker pool has not already been started, calling this method
will cause all of the worker threads to start running.
"""
if not self.__started:
self.start()
future = self.Future()
task = (priority, (callable, future))
try:
self.__queue.put(task, block=block, timeout=timeout)
except Full as error:
if future.set_running_or_notify_cancel():
future.set_exception(error)
return future
class FutureSet(object):
"""\
Coordinates a set of ``Future`` objects (either from
``concurrent.futures``, or otherwise API compatible), and allows for
attaching a callback when all futures have completed execution.
"""
def __init__(self, futures):
self.__pending = set(futures)
self.__completed = set()
self.__callbacks = []
self.__lock = threading.Lock()
for future in futures:
future.add_done_callback(self.__mark_completed)
def __iter__(self):
with self.__lock:
futures = self.__pending | self.__completed
return iter(futures)
def __execute_callback(self, callback):
try:
callback(self)
except Exception as error:
logger.warning("Error when calling callback %r: %s", callback, error, exc_info=True)
def __mark_completed(self, future):
with self.__lock:
self.__pending.remove(future)
self.__completed.add(future)
remaining = len(self.__pending)
if remaining == 0:
for callback in self.__callbacks:
self.__execute_callback(callback)
def add_done_callback(self, callback):
with self.__lock:
remaining = len(self.__pending)
if remaining > 0:
self.__callbacks.append(callback)
if remaining == 0:
self.__execute_callback(callback)
|
|
import albow
from albow.dialogs import Dialog
from config import config
import pygame
from albow.translate import _, buildTemplate
import sys
import os
import logging
import traceback
import directories
old_lang = None
old_fprop = None
class OptionsPanel(Dialog):
anchor = 'wh'
def __init__(self, mcedit):
Dialog.__init__(self)
self.mcedit = mcedit
self.langs = {}
self.sgnal = {}
self.portableVar = albow.AttrRef(self, 'portableLabelText')
self.saveOldPortable = self.portableVar.get()
self.saveOldConfig = {
config.controls.autobrake: config.controls.autobrake.get(),
config.controls.swapAxes: config.controls.swapAxes.get(),
config.controls.cameraAccel: config.controls.cameraAccel.get(),
config.controls.cameraDrag: config.controls.cameraDrag.get(),
config.controls.cameraMaxSpeed: config.controls.cameraMaxSpeed.get(),
config.controls.cameraBrakingSpeed: config.controls.cameraBrakingSpeed.get(),
config.controls.mouseSpeed: config.controls.mouseSpeed.get(),
config.settings.undoLimit: config.settings.undoLimit.get(),
config.settings.maxCopies: config.settings.maxCopies.get(),
config.controls.invertMousePitch: config.controls.invertMousePitch.get(),
config.settings.spaceHeight: config.settings.spaceHeight.get(),
albow.AttrRef(self, 'blockBuffer'): albow.AttrRef(self, 'blockBuffer').get(),
config.settings.setWindowPlacement: config.settings.setWindowPlacement.get(),
config.settings.rotateBlockBrush: config.settings.rotateBlockBrush.get(),
config.settings.shouldResizeAlert: config.settings.shouldResizeAlert.get(),
config.settings.superSecretSettings: config.settings.superSecretSettings.get(),
config.settings.longDistanceMode: config.settings.longDistanceMode.get(),
config.settings.flyMode: config.settings.flyMode.get(),
config.settings.langCode: config.settings.langCode.get(),
config.settings.compassToggle: config.settings.compassToggle.get(),
config.settings.compassSize: config.settings.compassSize.get(),
config.settings.fontProportion: config.settings.fontProportion.get(),
config.settings.fogIntensity: config.settings.fogIntensity.get(),
config.schematicCopying.cancelCommandBlockOffset: config.schematicCopying.cancelCommandBlockOffset.get()
}
global old_lang
if old_lang == None:
old_lang = config.settings.langCode.get()
global old_fprop
if old_fprop == None:
old_fprop = config.settings.fontProportion.get()
def initComponents(self):
"""Initilize the window components. Call this after translation hs been loaded."""
autoBrakeRow = albow.CheckBoxLabel("Autobrake",
ref=config.controls.autobrake,
tooltipText="Apply brake when not pressing movement keys")
swapAxesRow = albow.CheckBoxLabel("Swap Axes Looking Down",
ref=config.controls.swapAxes,
tooltipText="Change the direction of the Forward and Backward keys when looking down")
cameraAccelRow = albow.FloatInputRow("Camera Acceleration: ",
ref=config.controls.cameraAccel, width=100, min=5.0)
cameraDragRow = albow.FloatInputRow("Camera Drag: ",
ref=config.controls.cameraDrag, width=100, min=1.0)
cameraMaxSpeedRow = albow.FloatInputRow("Camera Max Speed: ",
ref=config.controls.cameraMaxSpeed, width=100, min=1.0)
cameraBrakeSpeedRow = albow.FloatInputRow("Camera Braking Speed: ",
ref=config.controls.cameraBrakingSpeed, width=100,
min=1.0)
mouseSpeedRow = albow.FloatInputRow("Mouse Speed: ",
ref=config.controls.mouseSpeed, width=100, min=0.1,
max=20.0)
undoLimitRow = albow.IntInputRow("Undo Limit: ",
ref=config.settings.undoLimit, width=100, min=0)
maxCopiesRow = albow.IntInputRow("Copy Stack Size: ",
ref=config.settings.maxCopies, width=100, min=0,
tooltipText="Maximum number of copied objects.")
compassSizeRow = albow.IntInputRow("Compass Size (%): ",
ref=config.settings.compassSize, width=100, min=0, max=100)
fontProportion = albow.IntInputRow("Fonts Proportion (%): ",
ref=config.settings.fontProportion, width=100, min=0,
tooltipText="Fonts sizing proportion. The number is a percentage.\nRestart needed!")
albow.resource.font_proportion = config.settings.fontProportion.get()
fogIntensityRow = albow.IntInputRow("Fog Intensity (%): ",
ref=config.settings.fogIntensity, width=100, min=0, max=100)
invertRow = albow.CheckBoxLabel("Invert Mouse",
ref=config.controls.invertMousePitch,
tooltipText="Reverse the up and down motion of the mouse.")
spaceHeightRow = albow.IntInputRow("Low Detail Height",
ref=config.settings.spaceHeight,
tooltipText="When you are this far above the top of the world, move fast and use low-detail mode.")
blockBufferRow = albow.IntInputRow("Block Buffer (MB):",
ref=albow.AttrRef(self, 'blockBuffer'), min=1,
tooltipText="Amount of memory used for temporary storage. When more than this is needed, the disk is used instead.")
setWindowPlacementRow = albow.CheckBoxLabel("Set Window Placement",
ref=config.settings.setWindowPlacement,
tooltipText="Try to save and restore the window position.")
rotateBlockBrushRow = albow.CheckBoxLabel("Rotate block with brush",
ref=config.settings.rotateBlockBrush,
tooltipText="When rotating your brush, also rotate the orientation of the block your brushing with")
compassToggleRow =albow.CheckBoxLabel("Toggle compass",
ref=config.settings.compassToggle)
windowSizeRow = albow.CheckBoxLabel("Window Resize Alert",
ref=config.settings.shouldResizeAlert,
tooltipText="Reminds you that the cursor won't work correctly after resizing the window.")
superSecretSettingsRow = albow.CheckBoxLabel("Super Secret Settings",
ref=config.settings.superSecretSettings,
tooltipText="Weird stuff happen!")
longDistanceRow = albow.CheckBoxLabel("Long-Distance Mode",
ref=config.settings.longDistanceMode,
tooltipText="Always target the farthest block under the cursor, even in mouselook mode.")
flyModeRow = albow.CheckBoxLabel("Fly Mode",
ref=config.settings.flyMode,
tooltipText="Moving forward and Backward will not change your altitude in Fly Mode.")
showCommandsRow = albow.CheckBoxLabel("Show Commands",
ref=config.settings.showCommands,
tooltipText="Show the command in a Command Block when hovering over it.")
cancelCommandBlockOffset = albow.CheckBoxLabel("Cancel Command Block Offset",
ref=config.schematicCopying.cancelCommandBlockOffset,
tooltipText="Cancels the command blocks coords changed when copied.")
lng = config.settings.langCode.get()
langs = sorted(self.getLanguageChoices().items())
langNames = [k for k, v in langs]
self.languageButton = albow.ChoiceButton(langNames, choose=self.changeLanguage, doNotTranslate=True)
if self.sgnal[lng] in self.languageButton.choices:
self.languageButton.selectedChoice = self.sgnal[lng]
langButtonRow = albow.Row((albow.Label("Language", tooltipText="Choose your language."), self.languageButton))
portableList = ["Portable", "Fixed"]
self.goPortableButton = goPortableButton = albow.ChoiceButton(portableList, choose=self.togglePortable)
goPortableButton.selectedChoice = self.saveOldPortable
goPortableButton.tooltipText = self.portableButtonTooltip()
goPortableRow = albow.Row((albow.Label("Install Mode"), goPortableButton))
# Disabled Crash Reporting Option
# reportRow = albow.CheckBoxLabel("Report Errors",
# ref=config.settings.reportCrashes,
# tooltipText="Automatically report errors to the developer.")
self.inputs = (
spaceHeightRow,
cameraAccelRow,
cameraDragRow,
cameraMaxSpeedRow,
cameraBrakeSpeedRow,
blockBufferRow,
mouseSpeedRow,
undoLimitRow,
maxCopiesRow,
compassSizeRow,
fontProportion,
fogIntensityRow,
)
options = (
longDistanceRow,
flyModeRow,
autoBrakeRow,
swapAxesRow,
invertRow,
superSecretSettingsRow,
rotateBlockBrushRow,
compassToggleRow,
showCommandsRow,
cancelCommandBlockOffset,
langButtonRow,
) + (
((sys.platform == "win32" and pygame.version.vernum == (1, 9, 1)) and (windowSizeRow,) or ())
) + (
(sys.platform == "win32") and (setWindowPlacementRow,) or ()
) + (
(not sys.platform == "darwin") and (goPortableRow,) or ()
)
rightcol = albow.Column(options, align='r')
leftcol = albow.Column(self.inputs, align='r')
optionsColumn = albow.Column((albow.Label("Options"),
albow.Row((leftcol, rightcol), align="t")))
settingsRow = albow.Row((optionsColumn,))
buttonsRow = albow.Row((albow.Button("OK", action=self.dismiss), albow.Button("Cancel", action=self.cancel)))
resetToDefaultRow = albow.Row((albow.Button("Reset to default", action=self.resetDefault),))
optionsColumn = albow.Column((settingsRow, buttonsRow, resetToDefaultRow))
optionsColumn.key_down = self.key_down
self.add(optionsColumn)
self.shrink_wrap()
@property
def blockBuffer(self):
return config.settings.blockBuffer.get() / 1048576
@blockBuffer.setter
def blockBuffer(self, val):
config.settings.blockBuffer.set(int(val * 1048576))
def getLanguageChoices(self, current=None):
files = os.listdir(albow.translate.langPath)
langs = {}
sgnal = {}
for file in files:
name, ext = os.path.splitext(file)
if ext == ".trn" and len(name) == 5 and name[2] == "_":
langName = albow.translate.getLangName(file)
langs[langName] = name
sgnal[name] = langName
if "English (US)" not in langs.keys():
langs[u"English (US)"] = "en_US"
sgnal["en_US"] = u"English (US)"
self.langs = langs
self.sgnal = sgnal
logging.debug("Detected languages: %s"%self.langs)
return langs
def changeLanguage(self):
if albow.translate.buildTemplate:
self.languageButton.selectedChoice = 'English (US)'
return
langName = self.languageButton.selectedChoice
if langName not in self.langs:
lng = "en_US"
else:
lng = self.langs[langName]
config.settings.langCode.set(lng)
#-# Translation live update preparation
logging.debug('*** Language change detected.')
logging.debug(' Former language: %s.'%albow.translate.getLang())
logging.debug(' New language: %s.'%lng)
albow.translate.langPath = os.sep.join((directories.getDataDir(), "lang"))
update = albow.translate.setLang(lng)[2]
logging.debug(' Update done? %s (Magic %s)'%(update, update or lng == 'en_US'))
self.mcedit.root.set_update_ui(update or lng == 'en_US')
self.mcedit.root.set_update_ui(False)
self.mcedit.editor.set_update_ui(update or lng == 'en_US')
self.mcedit.editor.set_update_ui(False)
#-#
@staticmethod
def portableButtonTooltip():
return (
"Click to make your MCEdit install self-contained by moving the settings and schematics into the program folder",
"Click to make your MCEdit install persistent by moving the settings and schematics into your Documents folder")[
directories.portable]
@property
def portableLabelText(self):
return ("Portable", "Fixed")[1 - directories.portable]
@portableLabelText.setter
def portableLabelText(self, *args, **kwargs):
pass
def togglePortable(self):
if sys.platform == "darwin":
return False
textChoices = [
_("This will make your MCEdit \"portable\" by moving your settings and schematics into the same folder as {0}. Continue?").format(
(sys.platform == "darwin" and _("the MCEdit application") or _("MCEditData"))),
_("This will move your settings and schematics to your Documents folder. Continue?"),
]
useExisting = False
alertText = textChoices[directories.portable]
if albow.ask(alertText) == "OK":
if [directories.hasPreviousPortableInstallation, directories.hasPreviousFixedInstallation][directories.portable]():
asked = albow.ask("Found a previous %s installation"%["portable", "fixed"][directories.portable], responses=["Use", "Overwrite", "Cancel"])
if asked == "Use":
useExisting = True
elif asked == "Overwrite":
useExisting = False
elif asked == "Cancel":
return False
try:
[directories.goPortable, directories.goFixed][directories.portable](useExisting)
except Exception, e:
traceback.print_exc()
albow.alert(_(u"Error while moving files: {0}").format(repr(e)))
else:
self.goPortableButton.selectedChoice = self.saveOldPortable
self.goPortableButton.tooltipText = self.portableButtonTooltip()
return True
def dismiss(self, *args, **kwargs):
"""Used to change the font proportion."""
# If font proportion setting has changed, update the UI.
if config.settings.fontProportion.get() != self.saveOldConfig[config.settings.fontProportion]:
albow.resource.reload_fonts(proportion=config.settings.fontProportion.get())
self.mcedit.root.set_update_ui(True)
self.mcedit.root.set_update_ui(False)
self.mcedit.editor.set_update_ui(True)
self.mcedit.editor.set_update_ui(False)
self.reshowNumberFields()
for key in self.saveOldConfig.keys():
self.saveOldConfig[key] = key.get()
config.save()
Dialog.dismiss(self, *args, **kwargs)
def cancel(self, *args, **kwargs):
Changes = False
self.reshowNumberFields()
for key in self.saveOldConfig.keys():
if key.get() != self.saveOldConfig[key]:
Changes = True
oldLanguage = self.saveOldConfig[config.settings.langCode]
if config.settings.langCode.get() != oldLanguage:
Changes = True
newPortable = self.portableVar.get()
if newPortable != self.saveOldPortable:
Changes = True
if not Changes:
Dialog.dismiss(self, *args, **kwargs)
return
result = albow.ask("Do you want to save your changes?", ["Save", "Don't Save", "Cancel"])
if result == "Cancel":
return
if result == "Save":
self.dismiss(*args, **kwargs)
return
if config.settings.langCode.get() != oldLanguage:
self.languageButton.selectedChoice = self.sgnal[oldLanguage]
self.changeLanguage()
if _(newPortable) != _(self.saveOldPortable):
self.portableVar.set(newPortable)
self.togglePortable()
for key in self.saveOldConfig.keys():
key.set(self.saveOldConfig[key])
config.save()
Dialog.dismiss(self, *args, **kwargs)
def resetDefault(self):
self.reshowNumberFields()
for key in self.saveOldConfig.keys():
if "AttrRef" in str(key):
key.set(config.settings.blockBuffer.default / 1048576)
elif "lang" not in str(key):
key.set(key.default)
if config.settings.langCode.get() != "en_US":
config.settings.langCode.set("en_US")
self.changeLanguage()
if "Fixed" != self.portableVar.get():
self.portableVar.set("Fixed")
self.togglePortable()
config.save()
def reshowNumberFields(self):
for key in self.inputs:
key.subwidgets[1].editing = False
def dispatch_key(self, name, evt):
super(OptionsPanel, self).dispatch_key(name, evt)
if name == "key_down":
keyname = self.get_root().getKey(evt)
if keyname == 'Escape':
self.cancel()
|
|
from __future__ import print_function, division
import scipy
from keras.datasets import mnist
from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
import datetime
import matplotlib.pyplot as plt
import sys
from data_loader import DataLoader
import numpy as np
import os
class CycleGAN():
def __init__(self):
# Input shape
self.img_rows = 128
self.img_cols = 128
self.channels = 3
self.img_shape = (self.img_rows, self.img_cols, self.channels)
# Configure data loader
self.dataset_name = 'apple2orange'
self.data_loader = DataLoader(dataset_name=self.dataset_name,
img_res=(self.img_rows, self.img_cols))
# Calculate output shape of D (PatchGAN)
patch = int(self.img_rows / 2**4)
self.disc_patch = (patch, patch, 1)
# Number of filters in the first layer of G and D
self.gf = 32
self.df = 64
# Loss weights
self.lambda_cycle = 10.0 # Cycle-consistency loss
self.lambda_id = 0.1 * self.lambda_cycle # Identity loss
optimizer = Adam(0.0002, 0.5)
# Build and compile the discriminators
self.d_A = self.build_discriminator()
self.d_B = self.build_discriminator()
self.d_A.compile(loss='mse',
optimizer=optimizer,
metrics=['accuracy'])
self.d_B.compile(loss='mse',
optimizer=optimizer,
metrics=['accuracy'])
#-------------------------
# Construct Computational
# Graph of Generators
#-------------------------
# Build the generators
self.g_AB = self.build_generator()
self.g_BA = self.build_generator()
# Input images from both domains
img_A = Input(shape=self.img_shape)
img_B = Input(shape=self.img_shape)
# Translate images to the other domain
fake_B = self.g_AB(img_A)
fake_A = self.g_BA(img_B)
# Translate images back to original domain
reconstr_A = self.g_BA(fake_B)
reconstr_B = self.g_AB(fake_A)
# Identity mapping of images
img_A_id = self.g_BA(img_A)
img_B_id = self.g_AB(img_B)
# For the combined model we will only train the generators
self.d_A.trainable = False
self.d_B.trainable = False
# Discriminators determines validity of translated images
valid_A = self.d_A(fake_A)
valid_B = self.d_B(fake_B)
# Combined model trains generators to fool discriminators
self.combined = Model(inputs=[img_A, img_B],
outputs=[ valid_A, valid_B,
reconstr_A, reconstr_B,
img_A_id, img_B_id ])
self.combined.compile(loss=['mse', 'mse',
'mae', 'mae',
'mae', 'mae'],
loss_weights=[ 1, 1,
self.lambda_cycle, self.lambda_cycle,
self.lambda_id, self.lambda_id ],
optimizer=optimizer)
def build_generator(self):
"""U-Net Generator"""
def conv2d(layer_input, filters, f_size=4):
"""Layers used during downsampling"""
d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
d = LeakyReLU(alpha=0.2)(d)
d = InstanceNormalization()(d)
return d
def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0):
"""Layers used during upsampling"""
u = UpSampling2D(size=2)(layer_input)
u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)
if dropout_rate:
u = Dropout(dropout_rate)(u)
u = InstanceNormalization()(u)
u = Concatenate()([u, skip_input])
return u
# Image input
d0 = Input(shape=self.img_shape)
# Downsampling
d1 = conv2d(d0, self.gf)
d2 = conv2d(d1, self.gf*2)
d3 = conv2d(d2, self.gf*4)
d4 = conv2d(d3, self.gf*8)
# Upsampling
u1 = deconv2d(d4, d3, self.gf*4)
u2 = deconv2d(u1, d2, self.gf*2)
u3 = deconv2d(u2, d1, self.gf)
u4 = UpSampling2D(size=2)(u3)
output_img = Conv2D(self.channels, kernel_size=4, strides=1, padding='same', activation='tanh')(u4)
return Model(d0, output_img)
def build_discriminator(self):
def d_layer(layer_input, filters, f_size=4, normalization=True):
"""Discriminator layer"""
d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
d = LeakyReLU(alpha=0.2)(d)
if normalization:
d = InstanceNormalization()(d)
return d
img = Input(shape=self.img_shape)
d1 = d_layer(img, self.df, normalization=False)
d2 = d_layer(d1, self.df*2)
d3 = d_layer(d2, self.df*4)
d4 = d_layer(d3, self.df*8)
validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(d4)
return Model(img, validity)
def train(self, epochs, batch_size=1, sample_interval=50):
start_time = datetime.datetime.now()
# Adversarial loss ground truths
valid = np.ones((batch_size,) + self.disc_patch)
fake = np.zeros((batch_size,) + self.disc_patch)
for epoch in range(epochs):
for batch_i, (imgs_A, imgs_B) in enumerate(self.data_loader.load_batch(batch_size)):
# ----------------------
# Train Discriminators
# ----------------------
# Translate images to opposite domain
fake_B = self.g_AB.predict(imgs_A)
fake_A = self.g_BA.predict(imgs_B)
# Train the discriminators (original images = real / translated = Fake)
dA_loss_real = self.d_A.train_on_batch(imgs_A, valid)
dA_loss_fake = self.d_A.train_on_batch(fake_A, fake)
dA_loss = 0.5 * np.add(dA_loss_real, dA_loss_fake)
dB_loss_real = self.d_B.train_on_batch(imgs_B, valid)
dB_loss_fake = self.d_B.train_on_batch(fake_B, fake)
dB_loss = 0.5 * np.add(dB_loss_real, dB_loss_fake)
# Total disciminator loss
d_loss = 0.5 * np.add(dA_loss, dB_loss)
# ------------------
# Train Generators
# ------------------
# Train the generators
g_loss = self.combined.train_on_batch([imgs_A, imgs_B],
[valid, valid,
imgs_A, imgs_B,
imgs_A, imgs_B])
elapsed_time = datetime.datetime.now() - start_time
# Plot the progress
print ("[Epoch %d/%d] [Batch %d/%d] [D loss: %f, acc: %3d%%] [G loss: %05f, adv: %05f, recon: %05f, id: %05f] time: %s " \
% ( epoch, epochs,
batch_i, self.data_loader.n_batches,
d_loss[0], 100*d_loss[1],
g_loss[0],
np.mean(g_loss[1:3]),
np.mean(g_loss[3:5]),
np.mean(g_loss[5:6]),
elapsed_time))
# If at save interval => save generated image samples
if batch_i % sample_interval == 0:
self.sample_images(epoch, batch_i)
def sample_images(self, epoch, batch_i):
os.makedirs('images/%s' % self.dataset_name, exist_ok=True)
r, c = 2, 3
imgs_A = self.data_loader.load_data(domain="A", batch_size=1, is_testing=True)
imgs_B = self.data_loader.load_data(domain="B", batch_size=1, is_testing=True)
# Demo (for GIF)
#imgs_A = self.data_loader.load_img('datasets/apple2orange/testA/n07740461_1541.jpg')
#imgs_B = self.data_loader.load_img('datasets/apple2orange/testB/n07749192_4241.jpg')
# Translate images to the other domain
fake_B = self.g_AB.predict(imgs_A)
fake_A = self.g_BA.predict(imgs_B)
# Translate back to original domain
reconstr_A = self.g_BA.predict(fake_B)
reconstr_B = self.g_AB.predict(fake_A)
gen_imgs = np.concatenate([imgs_A, fake_B, reconstr_A, imgs_B, fake_A, reconstr_B])
# Rescale images 0 - 1
gen_imgs = 0.5 * gen_imgs + 0.5
titles = ['Original', 'Translated', 'Reconstructed']
fig, axs = plt.subplots(r, c)
cnt = 0
for i in range(r):
for j in range(c):
axs[i,j].imshow(gen_imgs[cnt])
axs[i, j].set_title(titles[j])
axs[i,j].axis('off')
cnt += 1
fig.savefig("images/%s/%d_%d.png" % (self.dataset_name, epoch, batch_i))
plt.close()
if __name__ == '__main__':
gan = CycleGAN()
gan.train(epochs=200, batch_size=1, sample_interval=200)
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Virtual Me2Me implementation. This script runs and manages the processes
# required for a Virtual Me2Me desktop, which are: X server, X desktop
# session, and Host process.
# This script is intended to run continuously as a background daemon
# process, running under an ordinary (non-root) user account.
import atexit
import base64
import errno
import getpass
import hashlib
import hmac
import json
import logging
import optparse
import os
import random
import signal
import socket
import subprocess
import sys
import tempfile
import time
import urllib2
import uuid
# Local modules
import gaia_auth
import keygen
REMOTING_COMMAND = "remoting_me2me_host"
# Command-line switch for passing the config path to remoting_me2me_host.
HOST_CONFIG_SWITCH_NAME = "host-config"
# Needs to be an absolute path, since the current working directory is changed
# when this process self-daemonizes.
SCRIPT_PATH = os.path.dirname(sys.argv[0])
if SCRIPT_PATH:
SCRIPT_PATH = os.path.abspath(SCRIPT_PATH)
else:
SCRIPT_PATH = os.getcwd()
# These are relative to SCRIPT_PATH.
EXE_PATHS_TO_TRY = [
".",
"../../out/Debug",
"../../out/Release"
]
CONFIG_DIR = os.path.expanduser("~/.config/chrome-remote-desktop")
HOME_DIR = os.environ["HOME"]
X_LOCK_FILE_TEMPLATE = "/tmp/.X%d-lock"
FIRST_X_DISPLAY_NUMBER = 20
X_AUTH_FILE = os.path.expanduser("~/.Xauthority")
os.environ["XAUTHORITY"] = X_AUTH_FILE
# Globals needed by the atexit cleanup() handler.
g_desktops = []
g_pidfile = None
class Authentication:
"""Manage authentication tokens for Chromoting/xmpp"""
def __init__(self, config_file):
self.config_file = config_file
def generate_tokens(self):
"""Prompt for username/password and use them to generate new authentication
tokens.
Raises:
Exception: Failed to get new authentication tokens.
"""
print "Email:",
self.login = raw_input()
password = getpass.getpass("Password: ")
chromoting_auth = gaia_auth.GaiaAuthenticator('chromoting')
self.chromoting_auth_token = chromoting_auth.authenticate(self.login,
password)
xmpp_authenticator = gaia_auth.GaiaAuthenticator('chromiumsync')
self.xmpp_auth_token = xmpp_authenticator.authenticate(self.login,
password)
def load_config(self):
try:
settings_file = open(self.config_file, 'r')
data = json.load(settings_file)
settings_file.close()
self.login = data["xmpp_login"]
self.chromoting_auth_token = data["chromoting_auth_token"]
self.xmpp_auth_token = data["xmpp_auth_token"]
except:
return False
return True
def save_config(self):
data = {
"xmpp_login": self.login,
"chromoting_auth_token": self.chromoting_auth_token,
"xmpp_auth_token": self.xmpp_auth_token,
}
# File will contain private keys, so deny read/write access to others.
old_umask = os.umask(0066)
settings_file = open(self.config_file, 'w')
settings_file.write(json.dumps(data, indent=2))
settings_file.close()
os.umask(old_umask)
class Host:
"""This manages the configuration for a host.
Callers should instantiate a Host object (passing in a filename where the
config will be kept), then should call either of the methods:
* register(auth): Create a new Host configuration and register it
with the Directory Service (the "auth" parameter is used to
authenticate with the Service).
* load_config(): Load a config from disk, with details of an existing Host
registration.
After calling register() (or making any config changes) the method
save_config() should be called to save the details to disk.
"""
server = 'www.googleapis.com'
url = 'https://' + server + '/chromoting/v1/@me/hosts'
def __init__(self, config_file):
self.config_file = config_file
self.host_id = str(uuid.uuid1())
self.host_name = socket.gethostname()
self.host_secret_hash = None
self.private_key = None
def register(self, auth):
"""Generates a private key for the stored |host_id|, and registers it with
the Directory service.
Args:
auth: Authentication object with credentials for authenticating with the
Directory service.
Raises:
urllib2.HTTPError: An error occurred talking to the Directory server
(for example, if the |auth| credentials were rejected).
"""
logging.info("HostId: " + self.host_id)
logging.info("HostName: " + self.host_name)
logging.info("Generating RSA key pair...")
(self.private_key, public_key) = keygen.generateRSAKeyPair()
logging.info("Done")
json_data = {
"data": {
"hostId": self.host_id,
"hostName": self.host_name,
"publicKey": public_key,
}
}
params = json.dumps(json_data)
headers = {
"Authorization": "GoogleLogin auth=" + auth.chromoting_auth_token,
"Content-Type": "application/json",
}
request = urllib2.Request(self.url, params, headers)
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
logging.info("Registering host with directory service...")
res = urllib2.urlopen(request)
data = res.read()
logging.info("Done")
def ask_pin(self):
print \
"""Chromoting host supports PIN-based authentication, but it doesn't
work with Chrome 16 and Chrome 17 clients. Leave the PIN empty if you
need to use Chrome 16 or Chrome 17 clients. If you only use Chrome 18
or above, please set a non-empty PIN. You can change PIN later using
-p flag."""
while 1:
pin = getpass.getpass("Host PIN: ")
if len(pin) == 0:
print "Using empty PIN"
break
if len(pin) < 4:
print "PIN must be at least 4 characters long."
continue
pin2 = getpass.getpass("Confirm host PIN: ")
if pin2 != pin:
print "PINs didn't match. Please try again."
continue
break
self.set_pin(pin)
def set_pin(self, pin):
if pin == "":
self.host_secret_hash = "plain:"
else:
self.host_secret_hash = "hmac:" + base64.b64encode(
hmac.new(str(self.host_id), pin, hashlib.sha256).digest())
def is_pin_set(self):
return self.host_secret_hash
def load_config(self):
try:
settings_file = open(self.config_file, 'r')
data = json.load(settings_file)
settings_file.close()
except:
logging.info("Failed to load: " + self.config_file)
return False
self.host_id = data["host_id"]
self.host_name = data["host_name"]
self.host_secret_hash = data.get("host_secret_hash")
self.private_key = data["private_key"]
return True
def save_config(self):
data = {
"host_id": self.host_id,
"host_name": self.host_name,
"host_secret_hash": self.host_secret_hash,
"private_key": self.private_key,
}
if self.host_secret_hash:
data["host_secret_hash"] = self.host_secret_hash
old_umask = os.umask(0066)
settings_file = open(self.config_file, 'w')
settings_file.write(json.dumps(data, indent=2))
settings_file.close()
os.umask(old_umask)
class Desktop:
"""Manage a single virtual desktop"""
def __init__(self, width, height):
self.x_proc = None
self.session_proc = None
self.host_proc = None
self.width = width
self.height = height
g_desktops.append(self)
@staticmethod
def get_unused_display_number():
"""Return a candidate display number for which there is currently no
X Server lock file"""
display = FIRST_X_DISPLAY_NUMBER
while os.path.exists(X_LOCK_FILE_TEMPLATE % display):
display += 1
return display
def launch_x_server(self, extra_x_args):
display = self.get_unused_display_number()
ret_code = subprocess.call("xauth add :%d . `mcookie`" % display,
shell=True)
if ret_code != 0:
raise Exception("xauth failed with code %d" % ret_code)
logging.info("Starting Xvfb on display :%d" % display);
screen_option = "%dx%dx24" % (self.width, self.height)
self.x_proc = subprocess.Popen(["Xvfb", ":%d" % display,
"-noreset",
"-auth", X_AUTH_FILE,
"-nolisten", "tcp",
"-screen", "0", screen_option
] + extra_x_args)
if not self.x_proc.pid:
raise Exception("Could not start Xvfb.")
# Create clean environment for new session, so it is cleanly separated from
# the user's console X session.
self.child_env = {
"DISPLAY": ":%d" % display,
"REMOTING_ME2ME_SESSION": "1" }
for key in [
"HOME",
"LANG",
"LOGNAME",
"PATH",
"SHELL",
"USER",
"USERNAME"]:
if os.environ.has_key(key):
self.child_env[key] = os.environ[key]
# Wait for X to be active.
for test in range(5):
proc = subprocess.Popen("xdpyinfo > /dev/null", env=self.child_env,
shell=True)
pid, retcode = os.waitpid(proc.pid, 0)
if retcode == 0:
break
time.sleep(0.5)
if retcode != 0:
raise Exception("Could not connect to Xvfb.")
else:
logging.info("Xvfb is active.")
# The remoting host expects the server to use "evdev" keycodes, but Xvfb
# starts configured to use the "base" ruleset, resulting in XKB configuring
# for "xfree86" keycodes, and screwing up some keys. See crbug.com/119013.
# Reconfigure the X server to use "evdev" keymap rules. The X server must
# be started with -noreset otherwise it'll reset as soon as the command
# completes, since there are no other X clients running yet.
proc = subprocess.Popen("setxkbmap -rules evdev", env=self.child_env,
shell=True)
pid, retcode = os.waitpid(proc.pid, 0)
if retcode != 0:
logging.error("Failed to set XKB to 'evdev'")
def launch_x_session(self):
# Start desktop session
# The /dev/null input redirection is necessary to prevent Xsession from
# reading from stdin. If this code runs as a shell background job in a
# terminal, any reading from stdin causes the job to be suspended.
# Daemonization would solve this problem by separating the process from the
# controlling terminal.
#
# This assumes that GDM is installed and configured on the system.
self.session_proc = subprocess.Popen("/etc/gdm/Xsession",
stdin=open(os.devnull, "r"),
cwd=HOME_DIR,
env=self.child_env)
if not self.session_proc.pid:
raise Exception("Could not start X session")
def launch_host(self, host):
# Start remoting host
args = [locate_executable(REMOTING_COMMAND),
"--%s=%s" % (HOST_CONFIG_SWITCH_NAME, host.config_file)]
self.host_proc = subprocess.Popen(args, env=self.child_env)
if not self.host_proc.pid:
raise Exception("Could not start remoting host")
class PidFile:
"""Class to allow creating and deleting a file which holds the PID of the
running process. This is used to detect if a process is already running, and
inform the user of the PID. On process termination, the PID file is
deleted.
Note that PID files are not truly atomic or reliable, see
http://mywiki.wooledge.org/ProcessManagement for more discussion on this.
So this class is just to prevent the user from accidentally running two
instances of this script, and to report which PID may be the other running
instance.
"""
def __init__(self, filename):
"""Create an object to manage a PID file. This does not create the PID
file itself."""
self.filename = filename
self.created = False
def check(self):
"""Checks current status of the process.
Returns:
Tuple (running, pid):
|running| is True if the daemon is running.
|pid| holds the process ID of the running instance if |running| is True.
If the PID file exists but the PID couldn't be read from the file
(perhaps if the data hasn't been written yet), 0 is returned.
Raises:
IOError: Filesystem error occurred.
"""
if os.path.exists(self.filename):
pid_file = open(self.filename, 'r')
file_contents = pid_file.read()
pid_file.close()
try:
pid = int(file_contents)
except ValueError:
return True, 0
# Test to see if there's a process currently running with that PID.
# If there is no process running, the existing PID file is definitely
# stale and it is safe to overwrite it. Otherwise, report the PID as
# possibly a running instance of this script.
if os.path.exists("/proc/%d" % pid):
return True, pid
return False, 0
def create(self):
"""Creates an empty PID file."""
pid_file = open(self.filename, 'w')
pid_file.close()
self.created = True
def write_pid(self):
"""Write the current process's PID to the PID file.
This is done separately from create() as this needs to be called
after any daemonization, when the correct PID becomes known. But
check() and create() has to happen before daemonization, so that
if another instance is already running, this fact can be reported
to the user's terminal session. This also avoids corrupting the
log file of the other process, since daemonize() would create a
new log file.
"""
pid_file = open(self.filename, 'w')
pid_file.write('%d\n' % os.getpid())
pid_file.close()
self.created = True
def delete_file(self):
"""Delete the PID file if it was created by this instance.
This is called on process termination.
"""
if self.created:
os.remove(self.filename)
def locate_executable(exe_name):
for path in EXE_PATHS_TO_TRY:
exe_path = os.path.join(SCRIPT_PATH, path, exe_name)
if os.path.exists(exe_path):
return exe_path
raise Exception("Could not locate executable '%s'" % exe_name)
def daemonize(log_filename):
"""Background this process and detach from controlling terminal, redirecting
stdout/stderr to |log_filename|."""
# TODO(lambroslambrou): Having stdout/stderr redirected to a log file is not
# ideal - it could create a filesystem DoS if the daemon or a child process
# were to write excessive amounts to stdout/stderr. Ideally, stdout/stderr
# should be redirected to a pipe or socket, and a process at the other end
# should consume the data and write it to a logging facility which can do
# data-capping or log-rotation. The 'logger' command-line utility could be
# used for this, but it might cause too much syslog spam.
# Create new (temporary) file-descriptors before forking, so any errors get
# reported to the main process and set the correct exit-code.
# The mode is provided, since Python otherwise sets a default mode of 0777,
# which would result in the new file having permissions of 0777 & ~umask,
# possibly leaving the executable bits set.
devnull_fd = os.open(os.devnull, os.O_RDONLY)
log_fd = os.open(log_filename, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0600)
pid = os.fork()
if pid == 0:
# Child process
os.setsid()
# The second fork ensures that the daemon isn't a session leader, so that
# it doesn't acquire a controlling terminal.
pid = os.fork()
if pid == 0:
# Grandchild process
pass
else:
# Child process
os._exit(0)
else:
# Parent process
os._exit(0)
logging.info("Daemon process running, logging to '%s'" % log_filename)
os.chdir(HOME_DIR)
# Copy the file-descriptors to create new stdin, stdout and stderr. Note
# that dup2(oldfd, newfd) closes newfd first, so this will close the current
# stdin, stdout and stderr, detaching from the terminal.
os.dup2(devnull_fd, sys.stdin.fileno())
os.dup2(log_fd, sys.stdout.fileno())
os.dup2(log_fd, sys.stderr.fileno())
# Close the temporary file-descriptors.
os.close(devnull_fd)
os.close(log_fd)
def cleanup():
logging.info("Cleanup.")
if g_pidfile:
try:
g_pidfile.delete_file()
except Exception, e:
logging.error("Unexpected error deleting PID file: " + str(e))
for desktop in g_desktops:
if desktop.x_proc:
logging.info("Terminating Xvfb")
desktop.x_proc.terminate()
def reload_config():
for desktop in g_desktops:
if desktop.host_proc:
# Terminating the Host will cause the main loop to spawn another
# instance, which will read any changes made to the Host config file.
desktop.host_proc.terminate()
def signal_handler(signum, stackframe):
if signum == signal.SIGUSR1:
logging.info("SIGUSR1 caught, reloading configuration.")
reload_config()
else:
# Exit cleanly so the atexit handler, cleanup(), gets called.
raise SystemExit
def main():
parser = optparse.OptionParser(
"Usage: %prog [options] [ -- [ X server options ] ]")
parser.add_option("-s", "--size", dest="size", default="1280x1024",
help="dimensions of virtual desktop (default: %default)")
parser.add_option("-f", "--foreground", dest="foreground", default=False,
action="store_true",
help="don't run as a background daemon")
parser.add_option("-k", "--stop", dest="stop", default=False,
action="store_true",
help="stop the daemon currently running")
parser.add_option("-p", "--new-pin", dest="new_pin", default=False,
action="store_true",
help="set new PIN before starting the host")
parser.add_option("", "--check-running", dest="check_running", default=False,
action="store_true",
help="return 0 if the daemon is running, or 1 otherwise")
parser.add_option("", "--explicit-config", dest="explicit_config",
help="explicitly specify content of the config")
(options, args) = parser.parse_args()
size_components = options.size.split("x")
if len(size_components) != 2:
parser.error("Incorrect size format, should be WIDTHxHEIGHT");
host_hash = hashlib.md5(socket.gethostname()).hexdigest()
pid_filename = os.path.join(CONFIG_DIR, "host#%s.pid" % host_hash)
if options.check_running:
running, pid = PidFile(pid_filename).check()
return 0 if (running and pid != 0) else 1
if options.stop:
running, pid = PidFile(pid_filename).check()
if not running:
print "The daemon currently is not running"
else:
print "Killing process %s" % pid
os.kill(pid, signal.SIGTERM)
return 0
try:
width = int(size_components[0])
height = int(size_components[1])
# Enforce minimum desktop size, as a sanity-check. The limit of 100 will
# detect typos of 2 instead of 3 digits.
if width < 100 or height < 100:
raise ValueError
except ValueError:
parser.error("Width and height should be 100 pixels or greater")
atexit.register(cleanup)
for s in [signal.SIGHUP, signal.SIGINT, signal.SIGTERM, signal.SIGUSR1]:
signal.signal(s, signal_handler)
# Ensure full path to config directory exists.
if not os.path.exists(CONFIG_DIR):
os.makedirs(CONFIG_DIR, mode=0700)
if options.explicit_config:
for file_name in ["auth.json", "host#%s.json" % host_hash]:
settings_file = open(os.path.join(CONFIG_DIR, file_name), 'w')
settings_file.write(options.explicit_config)
settings_file.close()
auth = Authentication(os.path.join(CONFIG_DIR, "auth.json"))
need_auth_tokens = not auth.load_config()
host = Host(os.path.join(CONFIG_DIR, "host#%s.json" % host_hash))
register_host = not host.load_config()
# Outside the loop so user doesn't get asked twice.
if register_host:
host.ask_pin()
elif options.new_pin or not host.is_pin_set():
host.ask_pin()
host.save_config()
running, pid = PidFile(pid_filename).check()
if running and pid != 0:
os.kill(pid, signal.SIGUSR1)
print "The running instance has been updated with the new PIN."
return 0
if not options.explicit_config:
# The loop is to deal with the case of registering a new Host with
# previously-saved auth tokens (from a previous run of this script), which
# may require re-prompting for username & password.
while True:
try:
if need_auth_tokens:
auth.generate_tokens()
auth.save_config()
need_auth_tokens = False
except Exception:
logging.error("Authentication failed")
return 1
try:
if register_host:
host.register(auth)
host.save_config()
except urllib2.HTTPError, err:
if err.getcode() == 401:
# Authentication failed - re-prompt for username & password.
need_auth_tokens = True
continue
else:
# Not an authentication error.
logging.error("Directory returned error: " + str(err))
logging.error(err.read())
return 1
# |auth| and |host| are both set up, so break out of the loop.
break
global g_pidfile
g_pidfile = PidFile(pid_filename)
running, pid = g_pidfile.check()
if running:
print "An instance of this script is already running."
print "Use the -k flag to terminate the running instance."
print "If this isn't the case, delete '%s' and try again." % pid_filename
return 1
g_pidfile.create()
# daemonize() must only be called after prompting for user/password, as the
# process will become detached from the controlling terminal.
if not options.foreground:
log_file = tempfile.NamedTemporaryFile(prefix="me2me_host_", delete=False)
daemonize(log_file.name)
g_pidfile.write_pid()
logging.info("Using host_id: " + host.host_id)
desktop = Desktop(width, height)
# Remember the time when the last session was launched, in order to enforce
# a minimum time between launches. This avoids spinning in case of a
# misconfigured system, or other error that prevents a session from starting
# properly.
last_launch_time = 0
while True:
# If the session process stops running (e.g. because the user logged out),
# the X server should be reset and the session restarted, to provide a
# completely clean new session.
if desktop.session_proc is None and desktop.x_proc is not None:
logging.info("Terminating X server")
desktop.x_proc.terminate()
if desktop.x_proc is None:
if desktop.session_proc is not None:
# The X session would probably die soon if the X server is not
# running (because of the loss of the X connection). Terminate it
# anyway, to be sure.
logging.info("Terminating X session")
desktop.session_proc.terminate()
else:
# Neither X server nor X session are running.
elapsed = time.time() - last_launch_time
if elapsed < 60:
logging.error("The session lasted less than 1 minute. Waiting " +
"before starting new session.")
time.sleep(60 - elapsed)
logging.info("Launching X server and X session")
last_launch_time = time.time()
desktop.launch_x_server(args)
desktop.launch_x_session()
if desktop.host_proc is None:
logging.info("Launching host process")
desktop.launch_host(host)
try:
pid, status = os.wait()
except OSError, e:
if e.errno == errno.EINTR:
# Retry on EINTR, which can happen if a signal such as SIGUSR1 is
# received.
continue
else:
# Anything else is an unexpected error.
raise
logging.info("wait() returned (%s,%s)" % (pid, status))
# When os.wait() notifies that a process has terminated, any Popen instance
# for that process is no longer valid. Reset any affected instance to
# None.
if desktop.x_proc is not None and pid == desktop.x_proc.pid:
logging.info("X server process terminated")
desktop.x_proc = None
if desktop.session_proc is not None and pid == desktop.session_proc.pid:
logging.info("Session process terminated")
desktop.session_proc = None
if desktop.host_proc is not None and pid == desktop.host_proc.pid:
logging.info("Host process terminated")
desktop.host_proc = None
# These exit-codes must match the ones used by the host.
# See remoting/host/constants.h.
# Delete the host or auth configuration depending on the returned error
# code, so the next time this script is run, a new configuration
# will be created and registered.
if os.WEXITSTATUS(status) == 2:
logging.info("Host configuration is invalid - exiting.")
os.remove(auth.config_file)
os.remove(host.config_file)
return 0
elif os.WEXITSTATUS(status) == 3:
logging.info("Host ID has been deleted - exiting.")
os.remove(host.config_file)
return 0
elif os.WEXITSTATUS(status) == 4:
logging.info("OAuth credentials are invalid - exiting.")
os.remove(auth.config_file)
return 0
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
sys.exit(main())
|
|
# Copyright 2015, 2016 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for pypowervm.utils.transaction."""
import copy
import mock
import oslo_concurrency.lockutils as lock
import oslo_context.context as ctx
from taskflow import engines as tf_eng
from taskflow import exceptions as tf_ex
from taskflow.patterns import unordered_flow as tf_uf
from taskflow import task as tf_task
import unittest
import pypowervm.const as c
import pypowervm.exceptions as ex
import pypowervm.tests.test_fixtures as fx
import pypowervm.tests.test_utils.test_wrapper_abc as twrap
from pypowervm.utils import retry
import pypowervm.utils.transaction as tx
import pypowervm.wrappers.entry_wrapper as ewrap
import pypowervm.wrappers.logical_partition as lpar
class TestWrapperTask(twrap.TestWrapper):
file = 'lpar.txt'
wrapper_class_to_test = lpar.LPAR
def setUp(self):
super(TestWrapperTask, self).setUp()
self.useFixture(fx.SleepFx())
self.getter = lpar.LPAR.getter(self.adpt, 'getter_uuid')
# Set this up for getter.get()
self.adpt.read.return_value = self.dwrap.entry
self.tracker = mock.Mock(counter=0)
class LparNameAndMem(tx.Subtask):
"""Subtask modifying an LPAR's name and desired memory."""
def execute(self, lpar_wrapper, new_name, des_mem=None, logger=None):
"""Modify an LPAR's name and desired memory.
:param lpar_wrapper: The LPAR EntryWrapper to update.
:param new_name: The new name to give the LPAR.
:param des_mem: (Optional) The new desired memory value, an int.
:param logger: (Optional) If specified, "log" the class name for
test inspection purposes.
:return: The (possibly modified) lpar_wrapper.
"""
update_needed = False
if logger:
logger.log('LparNameAndMem_%s' % new_name)
old_name = lpar_wrapper.name
if old_name != new_name:
lpar_wrapper.name = new_name
update_needed = True
if des_mem is not None:
orig_mem = lpar_wrapper.mem_config.desired
if des_mem != orig_mem:
lpar_wrapper.mem_config.desired = des_mem
update_needed = True
return update_needed
@staticmethod
def retry_twice(wrapper, tracker, logger):
# Force a couple of retries
tracker.counter += 1
logger.log('update %d' % tracker.counter)
if tracker.counter < 3:
raise ex.HttpError(mock.Mock(status=c.HTTPStatus.ETAG_MISMATCH))
return wrapper
@mock.patch('oslo_concurrency.lockutils.Semaphores.get')
def test_synchronized_called_with_uuid(self, mock_semget):
"""Ensure the synchronizer is locking with the first arg's .uuid."""
@tx.entry_transaction
def blacklist_this(wrapper_or_getter):
pass
# At this point, the outer decorator has been invoked, but the
# synchronizing decorator has not.
self.assertEqual(0, mock_semget.call_count)
# If we call the decorated method with an EntryWrapper, synchronize
# should be invoked with the EntryWrapper's UUID
blacklist_this(self.dwrap)
self.assertEqual(1, mock_semget.call_count)
mock_semget.assert_called_with('089FFB20-5D19-4A8C-BB80-13650627D985')
# Calling with an EntryWrapperGetter should synchronize on the getter's
# registered UUID. (IRL, this will match the wrapper's UUID. Here we
# are making sure the right code path is being taken.)
mock_semget.reset_mock()
blacklist_this(self.getter)
self.assertEqual(1, mock_semget.call_count)
mock_semget.assert_called_with('getter_uuid')
def test_sequence(self):
"""Prove the sequence of events on a transaction-decorated method.
We expect it to look like:
lock
get the wrapper if necessary
invoke the method
while the method raises etag error, refresh the wrapper and re-invoke
unlock
"""
txfx = self.useFixture(fx.WrapperTaskFx(self.dwrap))
@tx.entry_transaction
def blacklist_this(wrapper_or_getter):
# Always converted by now
self.assertIsInstance(wrapper_or_getter, ewrap.EntryWrapper)
return self.retry_twice(wrapper_or_getter, self.tracker, txfx)
# With an EntryWrapperGetter, get() is invoked
self.assertEqual(self.dwrap, blacklist_this(self.getter))
self.assertEqual(['lock', 'get', 'update 1', 'refresh', 'update 2',
'refresh', 'update 3', 'unlock'], txfx.get_log())
# With an EntryWrapper, get() is not invoked
self.tracker.counter = 0
txfx.reset_log()
self.assertEqual(self.dwrap, blacklist_this(self.dwrap))
self.assertEqual(['lock', 'update 1', 'refresh', 'update 2', 'refresh',
'update 3', 'unlock'], txfx.get_log())
@mock.patch('pypowervm.utils.retry.retry')
def test_retry_args(self, mock_retry):
"""Ensure the correct arguments are passed to @retry."""
@tx.entry_transaction
def blacklist_this(wrapper_or_getter):
pass
blacklist_this(mock.Mock())
# Stepped random delay func was invoked
mock_retry.assert_called_once_with(
argmod_func=retry.refresh_wrapper, tries=60,
delay_func=retry.STEPPED_RANDOM_DELAY)
@staticmethod
def tx_subtask_invoke(tst, wrapper):
"""Simulates how Subtasks are invoked by WrapperTask.
:param tst: A Subtask
:param wrapper: The wrapper with which to invoke execute()
:return: The value returned by execute()
"""
return tst.execute(wrapper, *tst.save_args, **tst.save_kwargs)
def test_wrapper_task_subtask(self):
"""Tests around Subtask."""
# Same name, should result in no changes and no update_needed
txst1 = self.LparNameAndMem('z3-9-5-126-127-00000001')
self.assertFalse(self.tx_subtask_invoke(txst1, self.dwrap))
self.assertEqual('z3-9-5-126-127-00000001', self.dwrap.name)
self.assertEqual(512, self.dwrap.mem_config.desired)
# New name should prompt update_needed. Specified-but-same des_mem.
txst2 = self.LparNameAndMem('new-name', des_mem=512)
self.assertTrue(self.tx_subtask_invoke(txst2, self.dwrap))
self.assertEqual('new-name', self.dwrap.name)
self.assertEqual(512, self.dwrap.mem_config.desired)
# New name and mem should prompt update_needed
txst3 = self.LparNameAndMem('newer-name', des_mem=1024)
self.assertTrue(self.tx_subtask_invoke(txst3, self.dwrap))
self.assertEqual('newer-name', self.dwrap.name)
self.assertEqual(1024, self.dwrap.mem_config.desired)
# Same name and explicit same mem - no update_needed
txst4 = self.LparNameAndMem('newer-name', des_mem=1024)
self.assertFalse(self.tx_subtask_invoke(txst4, self.dwrap))
self.assertEqual('newer-name', self.dwrap.name)
self.assertEqual(1024, self.dwrap.mem_config.desired)
def test_wrapper_task_subtask_returns(self):
"""Test that execute methods' return values are processed properly."""
# Use internal _FunctorSubtask to make this easier. Bonus: testing
# _FunctorSubtask at the same time.
def returns_second_arg(wrapper, boolable):
"""Used to test various boolable single returns."""
return boolable
# Various valid 'False' boolables - update not needed
falseables = (0, '', [], {}, False)
for falseable in falseables:
txst = tx._FunctorSubtask(returns_second_arg, falseable)
self.assertFalse(self.tx_subtask_invoke(txst, self.dwrap))
# Various valid 'True' boolables - update needed
trueables = (1, 'string', [0], {'k': 'v'}, True)
for trueable in trueables:
txst = tx._FunctorSubtask(returns_second_arg, trueable)
self.assertTrue(self.tx_subtask_invoke(txst, self.dwrap))
def test_wrapper_task_allow_empty(self):
"""Test the allow_empty=True condition."""
# No mocks - no REST calls should be run.
tx1 = tx.WrapperTask('tx1', self.getter, allow_empty=True)
# Does not raise, returns None
self.assertIsNone(tx1.execute())
def test_wrapper_task1(self):
txfx = self.useFixture(fx.WrapperTaskFx(self.dwrap))
# Must supply a wrapper or getter to instantiate
self.assertRaises(ValueError, tx.WrapperTask, 'foo', 'bar')
# Create a valid WrapperTask
tx1 = tx.WrapperTask('tx1', self.getter)
self.assertEqual('tx1', tx1.name)
self.assertIn('wrapper_getter_uuid', tx1.provides)
self.assertIn('subtask_rets_getter_uuid', tx1.provides)
# Nothing has been run yet
self.assertEqual([], txfx.get_log())
# Try running with no subtasks
self.assertRaises(ex.WrapperTaskNoSubtasks, tx1.execute)
# Try adding something that isn't a Subtask
self.assertRaises(ValueError, tx1.add_subtask, 'Not a Subtask')
# Error paths don't run anything.
self.assertEqual([], txfx.get_log())
# Add a subtask that doesn't change anything
tx1.add_subtask(self.LparNameAndMem('z3-9-5-126-127-00000001',
logger=txfx))
# Adding a subtask does not run anything
self.assertEqual([], txfx.get_log())
# Get the wrapper - this should invoke GET, but *not* under lock
self.assertEqual(self.dwrap, tx1.wrapper)
self.assertEqual(['get'], txfx.get_log())
# Run the transaction
lwrap, subtask_rets = tx1.execute()
# The name should be unchanged
self.assertEqual('z3-9-5-126-127-00000001', lwrap.name)
# And update should not have been called, which should be reflected in
# the log. Note that 'get' is NOT called a second time.
self.assertEqual(['get', 'lock',
'LparNameAndMem_z3-9-5-126-127-00000001', 'unlock'],
txfx.get_log())
self.assertEqual({}, subtask_rets)
txfx.reset_log()
# These subtasks do change the name.
tx1.add_subtask(self.LparNameAndMem('new_name', logger=txfx))
tx1.add_subtask(self.LparNameAndMem('newer_name', logger=txfx))
# But this one doesn't. We're making sure the last 'no update needed'
# doesn't make the overall update_needed status False.
tx1.add_subtask(self.LparNameAndMem('newer_name', logger=txfx))
# Get the wrapper - this should *not* reinvoke GET
self.assertEqual(self.dwrap, tx1.wrapper)
self.assertEqual([], txfx.get_log())
# Now execute the transaction
lwrap, subtask_rets = tx1.execute()
# The last change should be the one that stuck
self.assertEqual('newer_name', lwrap.name)
# Check the overall order. Update was called.
self.assertEqual([
'lock', 'LparNameAndMem_z3-9-5-126-127-00000001',
'LparNameAndMem_new_name', 'LparNameAndMem_newer_name',
'LparNameAndMem_newer_name', 'update', 'unlock'], txfx.get_log())
self.assertEqual({}, subtask_rets)
# Test 'cloning' the subtask list
txfx.reset_log()
tx2 = tx.WrapperTask('tx2', self.getter, subtasks=tx1.subtasks)
# Add another one to make sure it goes at the end
tx2.add_subtask(self.LparNameAndMem('newest_name', logger=txfx))
# Add one to the original transaction to make sure it doesn't affect
# this one.
tx1.add_subtask(self.LparNameAndMem('bogus_name', logger=txfx))
lwrap, subtask_rets = tx2.execute()
# The last change should be the one that stuck
self.assertEqual('newest_name', lwrap.name)
# Check the overall order. This one GETs under lock. Update called.
self.assertEqual([
'lock', 'get', 'LparNameAndMem_z3-9-5-126-127-00000001',
'LparNameAndMem_new_name', 'LparNameAndMem_newer_name',
'LparNameAndMem_newer_name', 'LparNameAndMem_newest_name',
'update', 'unlock'], txfx.get_log())
self.assertEqual({}, subtask_rets)
def test_logspec(self):
txfx = self.useFixture(fx.WrapperTaskFx(self.dwrap))
tx1 = tx.WrapperTask('tx1', self.getter)
mock_log = mock.Mock()
mock_log.side_effect = lambda *args: txfx.log('log')
def functor(wrp):
txfx.log('functor')
# "False" logspec ignored
tx1.add_functor_subtask(functor, logspec=[])
# logspec must have at least two args
self.assertRaises(ValueError, tx1.add_functor_subtask, functor,
logspec=[1])
# First arg must be callable
self.assertRaises(ValueError, tx1.add_functor_subtask, functor,
logspec=[1, 2])
# Valid call with just a string
tx1.add_functor_subtask(functor, logspec=[mock_log, "string"])
# Valid call with a format string and args
tx1.add_functor_subtask(functor, logspec=[
mock_log, "one %s two %s", 1, 2])
# Valid call with named args
tx1.add_functor_subtask(functor, logspec=[
mock_log,
"three %(three)s four %(four)s", {'three': 3, 'four': 4}])
tx1.execute()
self.assertEqual([
'lock', 'get', 'functor', 'log', 'functor', 'log', 'functor',
'log', 'functor', 'unlock'], txfx.get_log())
mock_log.assert_has_calls([
mock.call("string"),
mock.call("one %s two %s", 1, 2),
mock.call("three %(three)s four %(four)s", {'three': 3, 'four': 4})
])
def test_flag_update(self):
"""flag_update=False avoids update even if Subtask returns True."""
txfx = self.useFixture(fx.WrapperTaskFx(self.dwrap))
tx1 = tx.WrapperTask('tx1', self.getter)
tx1.add_functor_subtask(lambda x: True, flag_update=False)
tx1.execute()
self.assertEqual(0, txfx.patchers['update'].mock.call_count)
# But if there's another Subtask that returns True without
# flag_update=False, it does trigger an update.
tx1.add_functor_subtask(lambda x: True)
tx1.execute()
self.assertEqual(1, txfx.patchers['update'].mock.call_count)
def test_wrapper_task2(self):
# Now:
# o Fake like update forces retry
# o Test add_functor_subtask, including chaining
# o Ensure GET is deferred when .wrapper() is not called ahead of time.
# o Make sure subtask args are getting to the subtask.
txfx = fx.WrapperTaskFx(self.dwrap)
def _update_retries_twice(timeout=-1):
self.assertEqual(123, timeout)
return self.retry_twice(self.dwrap, self.tracker, txfx)
txfx.patchers['update'].side_effect = _update_retries_twice
self.useFixture(txfx)
def functor(wrapper, arg1, arg2, kwarg3=None, kwarg4=None):
txfx.log('functor')
# Make sure args are getting here
self.assertEqual(['arg', 1], arg1)
self.assertEqual('arg2', arg2)
self.assertIsNone(kwarg3)
self.assertEqual('kwarg4', kwarg4)
return wrapper, True
# Instantiate-add-execute chain
tx.WrapperTask(
'tx2', self.getter,
update_timeout=123).add_functor_subtask(functor, ['arg', 1],
'arg2',
kwarg4='kwarg4').execute()
# Check the overall order. Update should have been called thrice (two
# retries)
self.assertEqual(3, txfx.patchers['update'].mock.call_count)
self.assertEqual(['lock', 'get', 'functor', 'update 1', 'refresh',
'functor', 'update 2', 'refresh', 'functor',
'update 3', 'unlock'], txfx.get_log())
def test_subtask_provides(self):
self.useFixture(fx.WrapperTaskFx(self.dwrap))
test_case = self
class ChainSubtask(tx.Subtask):
def __init__(self, val, *args, **kwargs):
self.val = val
super(ChainSubtask, self).__init__(*args, **kwargs)
def execute(self, *args, **kwargs):
test_case.assertEqual(test_case.dwrap, args[0])
# If execute accepts **kwargs, 'provided' is provided.
test_case.assertIn('provided', kwargs)
test_case.assertEqual(kwargs['expected_provided'],
kwargs['provided'])
return self.val
class ChainSubtask2(tx.Subtask):
def execute(self, wrp, provided, expected_provided):
test_case.assertEqual(test_case.dwrap, wrp)
# Able to get 'provided' as a named parameter
test_case.assertEqual(expected_provided, provided)
wtsk = tx.WrapperTask('name', self.getter)
wtsk.add_subtask(ChainSubtask(1, provides='one', expected_provided={}))
# Can't add another Subtask with the same 'provides'
self.assertRaises(ValueError, wtsk.add_subtask,
ChainSubtask(2, provides='one'))
# Next subtask should see the result from the first.
wtsk.add_subtask(ChainSubtask(2, provides='two', expected_provided={
'one': 1}))
# Add one that doesn't provide. Its return shouldn't show up in
# 'provided'.
wtsk.add_subtask(ChainSubtask(3, expected_provided={
'one': 1, 'two': 2}))
# 'provided' works implicitly when it's a named parameter on execute
wtsk.add_subtask(ChainSubtask2(expected_provided={'one': 1, 'two': 2}))
# Even when execute doesn't return anything, we 'provide' that None
wtsk.add_subtask(ChainSubtask2(provides='four', expected_provided={
'one': 1, 'two': 2}))
# Make sure the same stuff works for functors
def ret_val_kwargs(*args, **kwargs):
self.assertEqual(self.dwrap, args[0])
self.assertIn('provided', kwargs)
self.assertEqual(kwargs['expected_provided'], kwargs['provided'])
return args[1]
def ret_val_explicit(wrp, val, provided, expected_provided):
self.assertEqual(self.dwrap, wrp)
self.assertEqual(expected_provided, provided)
return val
self.assertRaises(ValueError, wtsk.add_functor_subtask, int,
provides='one')
wtsk.add_functor_subtask(
ret_val_kwargs, 5, provides='five',
expected_provided={'one': 1, 'two': 2, 'four': None})
wtsk.add_functor_subtask(
ret_val_kwargs, 6,
expected_provided={'one': 1, 'two': 2, 'four': None, 'five': 5})
wtsk.add_functor_subtask(
ret_val_explicit, 7, provides='seven',
expected_provided={'one': 1, 'two': 2, 'four': None, 'five': 5})
wtsk.add_functor_subtask(
ret_val_explicit, 8,
expected_provided={'one': 1, 'two': 2, 'four': None, 'five': 5,
'seven': 7})
# Execute the WrapperTask, verifying assertions in ChainSubtask[2] and
# ret_val_{kwargs|explicit)
wrapper, subtask_rets = wtsk.execute()
self.assertEqual(self.dwrap, wrapper)
# Verify final form of subtask_rets returned from WrapperTask.execute()
self.assertEqual(
{'one': 1, 'two': 2, 'four': None, 'five': 5, 'seven': 7},
subtask_rets)
class TestFeedTask(twrap.TestWrapper):
file = 'lpar.txt'
wrapper_class_to_test = lpar.LPAR
def setUp(self):
super(TestFeedTask, self).setUp()
self.getter = lpar.LPAR.getter(self.adpt)
# Set this up for getter.get()
self.adpt.read.return_value = self.resp
self.feed_task = tx.FeedTask('name', lpar.LPAR.getter(self.adpt))
def test_invalid_feed_or_getter(self):
"""Various evil inputs to FeedTask.__init__'s feed_or_getter."""
self.assertRaises(ValueError, tx.FeedTask, 'name', 'something bogus')
# A "feed" of things that aren't EntryWrappers
self.assertRaises(ValueError, tx.FeedTask, 'name', [1, 2, 3])
# This one fails because .getter(..., uuid) produces EntryWrapperGetter
self.assertRaises(ValueError, tx.FeedTask, 'name',
lpar.LPAR.getter(self.adpt, 'a_uuid'))
# Init with explicit empty feed tested below in test_empty_feed
@mock.patch('pypowervm.wrappers.entry_wrapper.FeedGetter.get')
def test_empty_feed(self, mock_get):
mock_get.return_value = []
# We're allowed to initialize it with a FeedGetter
fm = tx.FeedTask('name', ewrap.FeedGetter('mock', ewrap.EntryWrapper))
# But as soon as we call a 'greedy' method, which does a .get, we raise
self.assertRaises(ex.FeedTaskEmptyFeed, fm.get_wrapper, 'uuid')
# Init with an explicit empty feed (list) raises right away
self.assertRaises(ex.FeedTaskEmptyFeed, tx.FeedTask, 'name', [])
def test_wrapper_task_adds_and_replication(self):
"""Deferred replication of individual WrapperTasks with adds.
Covers:
- wrapper_tasks
- get_wrapper
- add_subtask
- add_functor_subtask
"""
def wt_check(wt1, wt2, len1, len2=None, upto=None):
"""Assert that two WrapperTasks have the same Subtasks.
:param wt1, wt2: The WrapperTask instances to compare.
:param len1, len2: The expected lengths of the WrapperTask.subtasks
of wt1 and wt2, respectively. If len2 is None,
it is assumed to be the same as len1.
:param upto: (Optional, int) If specified, only the first 'upto'
Subtasks are compared. Otherwise, the subtask lists
are compared up to the lesser of len1 and len2.
"""
if len2 is None:
len2 = len1
self.assertEqual(len1, len(wt1.subtasks))
self.assertEqual(len2, len(wt2.subtasks))
if upto is None:
upto = min(len1, len2)
for i in range(upto):
self.assertIs(wt1.subtasks[i], wt2.subtasks[i])
# "Functors" for easy subtask creation. Named so we can identify them.
foo = lambda: None
bar = lambda: None
baz = lambda: None
xyz = lambda: None
abc = lambda: None
# setUp's initialization of feed_task creates empty dict and common_tx
self.assertEqual({}, self.feed_task._tx_by_uuid)
self.assertEqual(0, len(self.feed_task._common_tx.subtasks))
# Asking for the feed does *not* replicate the WrapperTasks
feed = self.feed_task.feed
self.assertEqual({}, self.feed_task._tx_by_uuid)
self.assertEqual(0, len(self.feed_task._common_tx.subtasks))
# Add to the FeedTask
self.feed_task.add_subtask(tx._FunctorSubtask(foo))
self.feed_task.add_functor_subtask(bar)
# Still does not replicate
self.assertEqual({}, self.feed_task._tx_by_uuid)
subtasks = self.feed_task._common_tx.subtasks
# Make sure the subtasks are legit and in order
self.assertEqual(2, len(subtasks))
self.assertIsInstance(subtasks[0], tx.Subtask)
self.assertIsInstance(subtasks[1], tx.Subtask)
# Yes, these are both _FunctorSubtasks, but the point is verifying that
# they are in the right order.
self.assertIs(foo, subtasks[0]._func)
self.assertIs(bar, subtasks[1]._func)
# Now call something that triggers replication
wrap10 = self.feed_task.get_wrapper(feed[10].uuid)
self.assertEqual(feed[10], wrap10)
self.assertNotEqual({}, self.feed_task._tx_by_uuid)
self.assertEqual({lwrap.uuid for lwrap in feed},
set(self.feed_task.wrapper_tasks.keys()))
# Pick a couple of wrapper tasks at random.
wt5, wt8 = (self.feed_task.wrapper_tasks[feed[i].uuid] for i in (5, 8))
# They should not be the same
self.assertNotEqual(wt5, wt8)
# Their subtasks should not refer to the same lists
self.assertIsNot(wt5.subtasks, wt8.subtasks)
# But they should have the same Subtasks (the same actual instances)
wt_check(wt5, wt8, 2)
# Adding more subtasks to the feed manager adds to all (and by the way,
# we don't have to refetch the WrapperTasks).
self.feed_task.add_functor_subtask(baz)
wt_check(wt5, wt8, 3)
self.assertIs(baz, wt5.subtasks[2]._func)
# Adding to an individual WrapperTask just adds to that one
wt5.add_functor_subtask(xyz)
wt_check(wt5, wt8, 4, 3)
self.assertIs(xyz, wt5.subtasks[3]._func)
# And we can still add another to both afterward
self.feed_task.add_functor_subtask(abc)
wt_check(wt5, wt8, 5, 4, upto=3)
# Check the last couple by hand
self.assertIs(xyz, wt5.subtasks[3]._func)
self.assertIs(wt5.subtasks[4], wt8.subtasks[3])
self.assertIs(abc, wt5.subtasks[4]._func)
def test_deferred_feed_get(self):
"""Test deferred and unique GET of the internal feed."""
# setUp inits self.feed_task with FeedGetter. This doesn't call read.
self.assertEqual(0, self.adpt.read.call_count)
lfeed = self.feed_task.feed
self.assertEqual(1, self.adpt.read.call_count)
self.adpt.read.assert_called_with(
'LogicalPartition', None, child_id=None, child_type=None, xag=None)
self.assertEqual(21, len(lfeed))
self.assertEqual('089FFB20-5D19-4A8C-BB80-13650627D985', lfeed[0].uuid)
# Getting feed again doesn't invoke GET again.
lfeed = self.feed_task.feed
self.assertEqual(1, self.adpt.read.call_count)
self.assertEqual(21, len(lfeed))
self.assertEqual('089FFB20-5D19-4A8C-BB80-13650627D985', lfeed[0].uuid)
# Init with a feed - read is never called
self.adpt.read.reset_mock()
ftsk = tx.FeedTask('name', lfeed)
self.assertEqual(0, self.adpt.read.call_count)
nfeed = ftsk.feed
self.assertEqual(0, self.adpt.read.call_count)
self.assertEqual(lfeed, nfeed)
def test_rebuild_feed(self):
"""Feed gets rebuilt when transactions exist and an etag mismatches."""
# Populate and retrieve the feed
lfeed = self.feed_task.feed
# Pick out a wrapper UUID to use, from somewhere in the middle
uuid = lfeed[13].uuid
# Populate etags
for i in range(len(lfeed)):
lfeed[i]._etag = i + 100
# This get_wrapper will replicate the UUID-to-WrapperTask dict.
# Create a *copy* of the wrapper so that changing it will simulate how
# a WrapperTask modifies its internal EntryWrapper on update() without
# that change being reflected back to the FeedTask's _feed. (Current
# mocks are just returning the same wrapper all the time.)
lpar13 = copy.deepcopy(self.feed_task.get_wrapper(uuid))
self.assertNotEqual({}, self.feed_task._tx_by_uuid)
# Set unique etag.
lpar13._etag = 42
# And stuff it back in the WrapperTask
self.feed_task.wrapper_tasks[uuid]._wrapper = lpar13
# Now we're set up. First prove that the feed (as previously grabbed)
# isn't already reflecting the new entry.
self.assertNotEqual(lpar13.etag, lfeed[13].etag)
# Ask for the feed again and this should change
# The feed may have been reshuffled, so we have to find our LPAR again.
lfind = None
for entry in self.feed_task.feed:
if entry.uuid == uuid:
lfind = entry
break
self.assertEqual(lpar13.etag, lfind.etag)
# And it is in fact the new one now in the feed.
self.assertEqual(42, lfind.etag)
def test_execute(self):
"""Execute a 'real' FeedTask."""
feed = self.feed_task.feed
# Initialize expected/actual flags dicts:
# {uuid: [ordered, list, of, flags]}
# The list of flags for a given UUID should be ordered the same as the
# subtasks, though they may get shotgunned to the dict via parallel
# execution of the WrapperTasks.
exp_flags = {ent.uuid: [] for ent in feed}
act_flags = {ent.uuid: [] for ent in feed}
# A function that we can run within a Subtask. No triggering update
# since we're just making sure the Subtasks run.
def func(wrapper, flag):
with lock.lock('act_flags'):
act_flags[wrapper.uuid].append(flag)
return False
# Start with a subtask common to all
self.feed_task.add_functor_subtask(func, 'common1')
for ent in feed:
exp_flags[ent.uuid].append('common1')
# Add individual separate subtasks to a few of the WrapperTasks
for i in range(5, 15):
self.feed_task.wrapper_tasks[
feed[i].uuid].add_functor_subtask(func, i)
exp_flags[feed[i].uuid].append(i)
# Add another common subtask
self.feed_task.add_functor_subtask(func, 'common2')
for ent in feed:
exp_flags[ent.uuid].append('common2')
# Run it!
self.feed_task.execute()
self.assertEqual(exp_flags, act_flags)
@mock.patch('taskflow.patterns.unordered_flow.Flow.__init__')
def test_no_subtasks(self, mock_flow):
"""Ensure that a FeedTask with no Subtasks is a no-op."""
# No REST mocks - any REST calls will blow up.
# Mocking Flow initializer to fail, ensuring it doesn't get called.
mock_flow.side_effect = self.fail
tx.FeedTask('feed_task', lpar.LPAR.getter(None)).execute()
def test_post_exec(self):
def log_func(msg):
def _log(*a, **k):
ftfx.log(msg)
return _log
def log_task(msg):
return tf_task.FunctorTask(log_func(msg), name='functor_%s' % msg)
# Limit the feed to two to keep the logging sane
ftfx = self.useFixture(fx.FeedTaskFx(self.entries[:2]))
# Make the logging predictable by limiting to one thread
ftsk = tx.FeedTask('post_exec', lpar.LPAR.getter(None), max_workers=1)
# First prove that a FeedTask with *only* post-execs can run.
ftsk.add_post_execute(log_task('post1'))
ftsk.add_post_execute(log_task('post2'))
ftsk.execute()
# Note that no GETs or locks happen
self.assertEqual(['post1', 'post2'], ftfx.get_log())
# Now add regular subtasks
ftfx.reset_log()
ftsk.add_functor_subtask(log_func('main1'))
ftsk.add_functor_subtask(log_func('main2'))
ftsk.execute()
# One GET, up front. Posts happen at the end.
self.assertEqual(['get',
'lock', 'main1', 'main2', 'unlock',
'lock', 'main1', 'main2', 'unlock',
'post1', 'post2'], ftfx.get_log())
def test_wrapper_task_rets(self):
# Limit the feed to two to keep the return size sane
ftfx = self.useFixture(fx.FeedTaskFx(self.entries[:2]))
ftsk = tx.FeedTask('subtask_rets', lpar.LPAR.getter(None),
update_timeout=123)
exp_wtr = {
wrp.uuid: {
'wrapper': wrp,
'the_id': wrp.id,
'the_name': wrp.name}
for wrp in ftsk.feed}
called = []
def return_wrapper_name(wrapper):
return wrapper.name
def return_wrapper_id(wrapper):
return wrapper.id
def verify_rets_implicit(wrapper_task_rets):
called.append('implicit')
self.assertEqual(exp_wtr, wrapper_task_rets)
return 'verify_rets_implicit_return'
def verify_rets_explicit(**kwargs):
called.append('explicit')
self.assertEqual(exp_wtr, kwargs['wrapper_task_rets'])
return 'verify_rets_explicit_return'
ftsk.add_functor_subtask(return_wrapper_name, provides='the_name')
ftsk.add_functor_subtask(return_wrapper_id, provides='the_id')
# Execute once here to make sure the return is in the right shape when
# there are no post-execs
self.assertEqual({
'wrapper_task_rets': {
self.entries[0].uuid: {'the_name': self.entries[0].name,
'the_id': self.entries[0].id,
'wrapper': self.entries[0]},
self.entries[1].uuid: {'the_name': self.entries[1].name,
'the_id': self.entries[1].id,
'wrapper': self.entries[1]}}},
ftsk.execute())
ftsk.add_post_execute(tf_task.FunctorTask(
verify_rets_implicit, provides='post_exec_implicit'))
ftsk.add_post_execute(tf_task.FunctorTask(
verify_rets_explicit, requires='wrapper_task_rets',
provides='post_exec_explicit'))
ret = ftsk.execute()
# Make sure the post-execs actually ran (to guarantee their internal
# assertions passed).
self.assertEqual(['implicit', 'explicit'], called)
ftfx.patchers['update'].mock.assert_called_with(mock.ANY, timeout=123)
# Verify that we got the returns from the subtasks AND the post-execs
self.assertEqual({
'wrapper_task_rets': {
self.entries[0].uuid: {'the_name': self.entries[0].name,
'the_id': self.entries[0].id,
'wrapper': self.entries[0]},
self.entries[1].uuid: {'the_name': self.entries[1].name,
'the_id': self.entries[1].id,
'wrapper': self.entries[1]}},
'post_exec_implicit': 'verify_rets_implicit_return',
'post_exec_explicit': 'verify_rets_explicit_return'}, ret)
def test_subtask_thread_local(self):
"""Security context and locks, if set, propagates to WrapperTasks."""
def verify_no_ctx(wrapper):
self.assertIsNone(ctx.get_current())
tx.FeedTask('test_no_context', lpar.LPAR.getter(
self.adpt)).add_functor_subtask(verify_no_ctx).execute()
def verify_ctx(wrapper):
_context = ctx.get_current()
self.assertIsNotNone(_context)
self.assertEqual('123', _context.request_id)
# Copy the base set of locks to expect
our_locks = list(locks)
# Add our wrappers uuid since that will be set also.
our_locks.append(wrapper.uuid)
self.assertEqual(set(our_locks), set(tx._get_locks()))
ctx.RequestContext(request_id='123')
locks = ['L123', 'L456', 'L789']
tx._set_locks(locks)
tx.FeedTask('test_set_context', lpar.LPAR.getter(
self.adpt)).add_functor_subtask(verify_ctx).execute()
# Context propagates even if FeedTask is executed in a subthread, as
# long as our executor is used.
# Make two to ensure they're run in separate threads
ft1 = tx.FeedTask('subthread1', lpar.LPAR.getter(
self.adpt)).add_functor_subtask(verify_ctx)
ft2 = tx.FeedTask('subthread2', lpar.LPAR.getter(
self.adpt)).add_functor_subtask(verify_ctx)
self.assertRaises(tf_ex.WrappedFailure, tf_eng.run,
tf_uf.Flow('subthread_flow').add(ft1, ft2),
engine='parallel')
tf_eng.run(
tf_uf.Flow('subthread_flow').add(ft1, ft2), engine='parallel',
executor=tx.ContextThreadPoolExecutor(2))
class TestExceptions(unittest.TestCase):
def test_exceptions(self):
def bad1(wrapper, s):
bad2(wrapper)
def bad2(wrapper):
bad3(wrapper.field)
def bad3(tag):
raise IOError("this is an exception on %s!" % tag)
# With one entry in the feed, one exception should be raised, and it
# should bubble up as normal.
feed = [mock.Mock(spec=lpar.LPAR, field='lpar1')]
ft = tx.FeedTask('ft', feed).add_functor_subtask(bad1, 'this is bad')
flow = tf_uf.Flow('the flow')
flow.add(ft)
self.assertRaises(IOError, tf_eng.run, flow)
# With multiple entries in the feed, TaskFlow will wrap the exceptions
# in a WrappedFailure. We should repackage it, and the message in the
# resulting MultipleExceptionsInFeedTask should contain all the
# exception messages.
feed.append(mock.Mock(spec=lpar.LPAR, field='lpar2'))
ft = tx.FeedTask('ft', feed).add_functor_subtask(bad1, 'this is bad')
flow = tf_uf.Flow('the flow')
flow.add(ft)
with self.assertRaises(ex.MultipleExceptionsInFeedTask) as mult_ex:
tf_eng.run(flow)
# Make sure the wrapped exception messages show up in the exception.
self.assertIn('exception on lpar1!', mult_ex.exception.args[0])
self.assertIn('exception on lpar2!', mult_ex.exception.args[0])
|
|
# coding: utf-8
# In[1]:
import numpy as np
import random
from ipythonblocks import BlockGrid as BG
from IPython.html import widgets
# In[2]:
def make_num_grid(width,height,Red_perc,Blue_perc):
grid = np.random.choice((0,1,2),size = (height,width), p = [1.00-((Red_perc+Blue_perc)/100),Red_perc/100,Blue_perc/100])
return grid
# In[3]:
def near_me_num(grid,percent):
i = 0
j = 0
moving_array = []
moving_ZERO_array = []
moving_ONE_array = []
moving_TWO_array = []
height = grid.shape[0]
width = grid.shape[1]
for row in range(0,height):
for col in range(0,width):
if grid[row,col] == 1:
#Every row but first and last
if (row != 0 and row != height-1) and (col != 0 and col != width-1) and (height !=2 and width != 2):
if grid[row-1,col] == 2:
i+=1
if grid[row+1,col] == 2:
i+=1
if grid[row,col+1] == 2:
i+=1
if grid[row,col-1] == 2:
i+=1
if grid[row+1,col+1] == 2:
i+=1
if grid[row+1,col-1] == 2:
i+=1
if grid[row-1,col+1] == 2:
i+=1
if grid[row-1,col+1] == 2:
i+=1
if i/8 >= percent/100:
moving_ONE_array.append((row,col))
i = 0
#Upper Left Corner
if row == 0 and col == 0:
if grid[row+1,col] == 2:
i+=1
if grid[row+1,col+1] == 2:
i+=1
if grid[row,col+1] == 2:
i+=1
if i/3 >= percent/100:
moving_ONE_array.append((row,col))
i = 0
#Lower Left Corner
if row == height-1 and col == 0:
if grid[row-1,col] == 2:
i+=1
if grid[row-1,col+1] == 2:
i+=1
if grid[row,col+1] == 2:
i+=1
if i/3 >= percent/100:
moving_ONE_array.append((row,col))
i = 0
#Upper Right Corner
if col == width-1 and row == 0:
if grid[row+1,col] == 2:
i+=1
if grid[row+1,col-1] == 2:
i+=1
if grid[row,col-1] == 2:
i+=1
if i/3 >= percent/100:
moving_ONE_array.append((row,col))
i = 0
#Lower Right Corner
if col == width-1 and row == height-1:
if grid[row-1,col] == 2:
i+=1
if grid[row-1,col-1] == 2:
i+=1
if grid[row,col-1] == 2:
i+=1
if i/3 >= percent/100:
moving_ONE_array.append((row,col))
i = 0
#First col, no corners
if col == 0 and (row!= 0 and row!= height-1):
if grid[row+1,col] == 2:
i+=1
if grid[row+1,col+1] == 2:
i+=1
if grid[row,col+1] == 2:
i+=1
if grid[row-1,col+1] == 2:
i+=1
if grid[row-1,col] == 2:
i+=1
if i/5 >= percent/100:
moving_ONE_array.append((row,col))
i = 0
#Bottom row, no corners
if row == height-1 and (col != 0 and col != width-1):
if grid[row,col-1] == 2:
i+=1
if grid[row-1,col-1] == 2:
i+=1
if grid[row-1,col] == 2:
i+=1
if grid[row-1,col+1] == 2:
i+=1
if grid[row,col+1] == 2:
i+=1
if i/5 >= percent/100:
moving_ONE_array.append((row,col))
i = 0
#Last col, no corners
if col == width-1 and (row != 0 and row != height-1):
if grid[row+1,col] == 2:
i+=1
if grid[row+1,col-1] == 2:
i+=1
if grid[row,col-1] == 2:
i+=1
if grid[row-1,col-1] == 2:
i+=1
if grid[row-1,col] == 2:
i+=1
if i/5 >= percent/100:
moving_ONE_array.append((row,col))
i = 0
#First row, no corners
if row == 0 and (col != 0 and col != width-1):
if grid[row,col-1] == 2:
i+=1
if grid[row+1,col-1] == 2:
i+=1
if grid[row+1,col] == 2:
i+=1
if grid[row+1,col+1] == 2:
i+=1
if grid[row,col+1] == 2:
i+=1
if i/5 >= percent/100:
moving_ONE_array.append((row,col))
i = 0
if grid[row,col] == 2:
#Every row but first and last
if (row != 0 and row != height-1) and (col != 0 and col != width-1) and (height !=2 and width != 2):
if grid[row-1,col] == 1:
j+=1
if grid[row+1,col] == 1:
j+=1
if grid[row,col+1] == 1:
j+=1
if grid[row,col-1] == 1:
j+=1
if grid[row+1,col+1] == 1:
j+=1
if grid[row+1,col-1] == 1:
j+=1
if grid[row-1,col+1] == 1:
j+=1
if grid[row-1,col+1] == 1:
j+=1
if j/8 >= percent/100:
moving_TWO_array.append((row,col))
j = 0
#Upper Left Corner
if row == 0 and col == 0:
if grid[row-1,col] == 1:
j+=1
if grid[row-1,col+1] == 1:
j+=1
if grid[row,col+1] == 1:
j+=1
if j/3 >= percent/100:
moving_TWO_array.append((row,col))
j = 0
#Lower Left Corner
if row == height-1 and col == 0:
if grid[row-1,col] ==1:
j+=1
if grid[row-1,col+1] == 1:
j+=1
if grid[row,col+1] == 1:
j+=1
if j/3 >= percent/100:
moving_TWO_array.append((row,col))
j = 0
#Upper Right Corner
if col == width-1 and row == 0:
if grid[row-1,col] == 1:
j+=1
if grid[row-1,col-1] == 1:
j+=1
if grid[row,col-1] == 1:
j+=1
if j/3 >= percent/100:
moving_TWO_array.append((row,col))
j = 0
#Lower Right Corner
if col == width-1 and row == height-1:
if grid[row-1,col] == 1:
j+=1
if grid[row-1,col-1] == 1:
j+=1
if grid[row,col-1] == 1:
j+=1
if j/3 >= percent/100:
moving_TWO_array.append((row,col))
j = 0
#First col, no corners
if col == 0 and (row!= 0 and row!= height-1):
if grid[row+1,col] == 1:
j+=1
if grid[row+1,col+1] ==1:
j+=1
if grid[row,col+1] == 1:
j+=1
if grid[row-1,col+1] == 1:
j+=1
if grid[row-1,col] == 1:
j+=1
if j/5 >= percent/100:
moving_TWO_array.append((row,col))
j = 0
#Bottom row, no corners
if row == height-1 and (col != 0 and col != width-1):
if grid[row,col-1] == 1:
j+=1
if grid[row-1,col-1] == 1:
j+=1
if grid[row-1,col] == 1:
j+=1
if grid[row-1,col+1] == 1:
j+=1
if grid[row,col+1] == 1:
j+=1
if j/5 >= percent/100:
moving_TWO_array.append((row,col))
j = 0
#Last col, no corners
if col == width-1 and (row != 0 and row != height-1):
if grid[row+1,col] == 1:
j+=1
if grid[row+1,col-1] == 1:
j+=1
if grid[row,col-1] == 1:
j+=1
if grid[row-1,col-1] == 1:
j+=1
if grid[row-1,col] == 1:
j+=1
if j/5 >= percent/100:
moving_TWO_array.append((row,col))
j = 0
#First row, no corners
if row == 0 and (col != 0 and col != width-1):
if grid[row,col-1] == 1:
j+=1
if grid[row+1,col-1] == 1:
j+=1
if grid[row+1,col] == 1:
j+=1
if grid[row+1,col+1] == 1:
j+=1
if grid[row,col+1] == 1:
j+=1
if j/5 >= percent/100:
moving_TWO_array.append((row,col))
j = 0
if grid[row,col] == 0:
moving_ZERO_array.append((row,col))
return grid, moving_ZERO_array, moving_ONE_array, moving_TWO_array
# In[4]:
test_grid = np.array([[1,1,1],[2,2,2],[1,1,1]])
print (test_grid)
near_test = near_me_num(test_grid,50)
assert near_test[3][0] == (1,0)
assert near_test[3][1] == (1,1)
assert near_test[2][0] == (0,0)
assert near_test[2][1] == (0,1)
# In[5]:
def Meathead_Movers(newgrid, array0, array1, array2):
i = 0
j = 0
k = 0
np.random.shuffle(array0)
np.random.shuffle(array1)
np.random.shuffle(array2)
LEN_A0 = len(array0)
LEN_A1 = len(array1)
LEN__A1 = len(array1)-1
LEN_A2 = len(array2)
if LEN_A1 < LEN_A2 and LEN_A0 != 0:
while i <= LEN_A1-1:
newgrid[array1[i]] = 2
newgrid[array2[k]] = 1
#print ("MOVED",array1[i],1,array2[k],2)
i+=1
k+=1
while i >= LEN_A1 and i < LEN_A2:
newgrid[array0[j]] = 2
newgrid[array2[i]] = 0
#print ("MOVED",array2[i],0)
i+=1
j+=1
if LEN_A1 > LEN_A2 and LEN_A0 != 0:
while i <= LEN_A2-1:
newgrid[array1[i]] = 2
newgrid[array2[k]] = 1
#print ("MOVED",array1[i],1,array2[i],2)
i+=1
k+=1
while LEN__A1 > LEN_A1:
if LEN_A0 == 0:
break
newgrid[array0[j]] = 1
newgrid[array1[LEN__A1-1]] = 0
#print ("MOVED",array1[LEN__A1-1],1,array0[j],0)
yy -=1
j+=1
if LEN_A2 == LEN_A1:
np.random.shuffle(array1)
while i < LEN_A2:
newgrid[array1[i]] = 2
newgrid[array2[i]] = 1
#print ("MOVED",array1[i],1,array2[i],2)
i+=1
if LEN_A0 == 0:
if LEN_A1 < LEN_A2:
while i <= LEN_A1-1:
newgrid[array1[i]] = 2
newgrid[array2[i]] = 1
#print ("MOVED",array1[i],1,array2[k],2)
i+=1
k+=1
if LEN_A1>=LEN_A2:
while i <= LEN_A2-1:
newgrid[array1[i]] = 2
newgrid[array2[i]] = 1
#print ("MOVED",array1[i],1,array2[i],2)
i+=1
k+=1
return newgrid
# In[6]:
meat_time = Meathead_Movers(*near_test)
print (meat_time)
assert meat_time[1,0] != 2
assert meat_time[1,1] != 2
assert meat_time[1,2] != 2
# In[7]:
def colorful(grid):
f = BG(grid.shape[1],grid.shape[0],fill=(0, 0, 0),block_size=4)
for row in range(f.height):
for col in range(f.width):
sq = f[row,col]
if grid[row,col] == 1:
sq.blue = 900000000
if grid[row,col] == 2:
sq.red = 900000000
return f
# In[ ]:
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# We need to keep same argument names for mocked calls (to accept kwargs), and
# thus can't use '_' prefix to silence the warming.
# pylint: disable=unused-argument
import datetime
import hashlib
import StringIO
from testing_utils import testing
from components import auth
from components import auth_testing
from components import utils
import cloudstorage
import config
from cas import impl
from . import common
class CASServiceImplTest(testing.AppengineTestCase):
def setUp(self):
super(CASServiceImplTest, self).setUp()
self.mock_cloudstorage_stat([])
self.mock_cloudstorage_delete()
def mock_cloudstorage_stat(self, existing_files):
existing_files = set(existing_files)
def stat_mock(filename, retry_params):
if filename in existing_files:
return cloudstorage.GCSFileStat(filename, 0, 'etag', 0)
raise cloudstorage.NotFoundError()
self.mock(impl.cloudstorage, 'stat', stat_mock)
def mock_cloudstorage_delete(self):
deleted_set = set()
def delete_mock(filename, retry_params):
deleted_set.add(filename)
self.mock(impl.cloudstorage, 'delete', delete_mock)
return deleted_set
def mock_now(self, now):
super(CASServiceImplTest, self).mock_now(now)
self.mock(utils, 'utcnow', lambda: now)
def test_get_cas_service_ok(self):
conf = config.GlobalConfig(
cas_gs_path='/cas_gs_path/abc/',
cas_gs_temp='/cas_gs_temp/def/')
self.mock(config, 'cached', lambda: conf)
self.assertIsNotNone(impl.get_cas_service())
def test_get_cas_service_no_config(self):
conf = config.GlobalConfig()
self.mock(config, 'cached', lambda: conf)
self.assertIsNone(impl.get_cas_service())
def test_get_cas_service_bad_config(self):
conf = config.GlobalConfig(
cas_gs_path='blah',
cas_gs_temp='/cas_gs_temp/def')
self.mock(config, 'cached', lambda: conf)
self.assertIsNone(impl.get_cas_service())
def test_fetch(self):
service = impl.CASService(
'/bucket/real', '/bucket/temp',
auth.ServiceAccountKey('account@email.com', 'PEM private key', 'id'))
# Actual _rsa_sign implementation depends on PyCrypto, that for some reason
# is not importable in unit tests. _rsa_sign is small enough to be "tested"
# manually on the dev server.
calls = []
def fake_sign(pkey, data):
calls.append((pkey, data))
return '+signature+'
self.mock(service, '_rsa_sign', fake_sign)
self.mock_now(utils.timestamp_to_datetime(1416444987 * 1000000.))
# Signature and email should be urlencoded.
url = service.generate_fetch_url('SHA1', 'a' * 40)
self.assertEqual(
'https://storage.googleapis.com/bucket/real/SHA1/'
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?'
'GoogleAccessId=account%40email.com&'
'Expires=1416448587&'
'Signature=%2Bsignature%2B', url)
# Since _rsa_sign is mocked out, at least verify it is called as expected.
self.assertEqual([(
'PEM private key',
'GET\n\n\n1416448587\n/bucket/real/SHA1/' + 'a'*40
)], calls)
def test_is_object_present(self):
service = impl.CASService('/bucket/real', '/bucket/temp')
self.mock_cloudstorage_stat(['/bucket/real/SHA1/' + 'a' * 40])
self.assertTrue(service.is_object_present('SHA1', 'a' * 40))
self.assertFalse(service.is_object_present('SHA1', 'b' * 40))
with self.assertRaises(AssertionError):
service.is_object_present('SHA1', 'wrong')
def test_create_upload_session_and_fetch_upload_session(self):
service = impl.CASService('/bucket/real', '/bucket/temp')
mocked_time = utils.timestamp_to_datetime(1416444987 * 1000000.)
self.mock_now(mocked_time)
def mocked_open(filename, mode, retry_params):
self.assertEqual(filename, '/bucket/temp/1416444987_1')
self.assertEqual(mode, 'w')
self.assertEqual(retry_params, service._retry_params)
# Mock guts of ReadingBuffer :(
return common.Mock(
_path_with_token='/bucket/temp/1416444987_1?upload_id=abc',
_api=common.Mock(api_url='https://fake.com'))
self.mock(impl.cloudstorage, 'open', mocked_open)
obj, signed_id = service.create_upload_session(
'SHA1', 'a' * 40, auth_testing.DEFAULT_MOCKED_IDENTITY)
self.assertEqual(obj.key.id(), 1)
self.assertEqual(obj.to_dict(), {
'created_by': auth_testing.DEFAULT_MOCKED_IDENTITY,
'created_ts': mocked_time,
'error_message': None,
'final_gs_location': '/bucket/real/SHA1/' + 'a' * 40,
'hash_algo': 'SHA1',
'hash_digest': 'a' * 40,
'status': impl.UploadSession.STATUS_UPLOADING,
'temp_gs_location': '/bucket/temp/1416444987_1',
'upload_url': 'https://fake.com/bucket/temp/1416444987_1?upload_id=abc',
})
# Token should be readable.
embedded = impl.UploadIdSignature.validate(
signed_id, [auth_testing.DEFAULT_MOCKED_IDENTITY.to_bytes()])
self.assertEqual(embedded, {'id': '1'})
# Verify fetch_upload_session can use it too.
fetched = service.fetch_upload_session(
signed_id, auth_testing.DEFAULT_MOCKED_IDENTITY)
self.assertIsNotNone(fetched)
self.assertEqual(fetched.to_dict(), obj.to_dict())
def test_fetch_upload_session_bad_token(self):
service = impl.CASService('/bucket/real', '/bucket/temp')
obj = service.fetch_upload_session(
'blah', auth_testing.DEFAULT_MOCKED_IDENTITY)
self.assertIsNone(obj)
def test_maybe_finish_upload_non_uploading(self):
service = impl.CASService('/bucket/real', '/bucket/temp')
def die(**_kwargs): # pragma: no cover
self.fail('Should not be called')
self.mock(impl.utils, 'enqueue_task', die)
obj1 = common.make_fake_session(status=impl.UploadSession.STATUS_ERROR)
obj1.put()
# Left in the same state.
obj2 = service.maybe_finish_upload(obj1)
self.assertEqual(obj2.status, impl.UploadSession.STATUS_ERROR)
def test_maybe_finish_upload(self):
service = impl.CASService('/bucket/real', '/bucket/temp')
calls = []
def mocked_enqueue_task(**kwargs):
calls.append(kwargs)
return True
self.mock(impl.utils, 'enqueue_task', mocked_enqueue_task)
obj1 = common.make_fake_session(status=impl.UploadSession.STATUS_UPLOADING)
obj1.put()
# Changed state.
obj2 = service.maybe_finish_upload(obj1)
self.assertEqual(obj2.status, impl.UploadSession.STATUS_VERIFYING)
# Task enqueued.
self.assertEqual(calls, [{
'queue_name': 'cas-verify',
'transactional': True,
'url': '/internal/taskqueue/cas-verify/666',
}])
def test_verify_pending_upload_bad_session(self):
service = impl.CASService('/bucket/real', '/bucket/temp')
self.assertTrue(service.verify_pending_upload(1234))
def test_verify_pending_upload_bad_state(self):
obj = common.make_fake_session(status=impl.UploadSession.STATUS_ERROR)
obj.put()
service = impl.CASService('/bucket/real', '/bucket/temp')
self.assertTrue(service.verify_pending_upload(obj.key.id()))
def test_verify_pending_upload_when_file_exists(self):
obj = common.make_fake_session(
status=impl.UploadSession.STATUS_VERIFYING,
final_gs_location='/bucket/real/SHA1/' + 'a' * 40,
temp_gs_location='/bucket/temp/temp_crap')
obj.put()
self.mock_cloudstorage_stat([obj.final_gs_location])
deleted_files = self.mock_cloudstorage_delete()
service = impl.CASService('/bucket/real', '/bucket/temp')
self.assertTrue(service.verify_pending_upload(obj.key.id()))
# Moved to PUBLISHED.
obj = obj.key.get()
self.assertEqual(obj.status, impl.UploadSession.STATUS_PUBLISHED)
# Temp clean up called.
self.assertEqual(deleted_files, set(['/bucket/temp/temp_crap']))
def test_verify_pending_upload_unfinalized(self):
obj = common.make_fake_session(
status=impl.UploadSession.STATUS_VERIFYING,
final_gs_location='/bucket/real/SHA1/' + 'a' * 40,
temp_gs_location='/bucket/temp/temp_crap')
obj.put()
def mocked_open(filename, mode, read_buffer_size, retry_params):
self.assertEqual(filename, '/bucket/temp/temp_crap')
self.assertEqual(mode, 'r')
self.assertEqual(read_buffer_size, impl.READ_BUFFER_SIZE)
raise cloudstorage.NotFoundError()
self.mock(impl.cloudstorage, 'open', mocked_open)
service = impl.CASService('/bucket/real', '/bucket/temp')
self.assertTrue(service.verify_pending_upload(obj.key.id()))
# Moved to ERROR.
obj = obj.key.get()
self.assertEqual(obj.status, impl.UploadSession.STATUS_ERROR)
self.assertEqual(
obj.error_message, 'Google Storage upload wasn\'t finalized.')
def test_verify_pending_upload_bad_hash(self):
fake_file = StringIO.StringIO('test buffer')
fake_file._etag = 'fake_etag'
obj = common.make_fake_session(
status=impl.UploadSession.STATUS_VERIFYING,
hash_algo='SHA1',
hash_digest='a' * 40,
final_gs_location='/bucket/real/SHA1/' + 'a' * 40,
temp_gs_location='/bucket/temp/temp_crap')
obj.put()
def mocked_open(filename, mode, read_buffer_size, retry_params):
self.assertEqual(filename, '/bucket/temp/temp_crap')
self.assertEqual(mode, 'r')
self.assertEqual(read_buffer_size, impl.READ_BUFFER_SIZE)
return fake_file
self.mock(impl.cloudstorage, 'open', mocked_open)
service = impl.CASService('/bucket/real', '/bucket/temp')
self.assertTrue(service.verify_pending_upload(obj.key.id()))
# Moved to ERROR.
obj = obj.key.get()
self.assertEqual(obj.status, impl.UploadSession.STATUS_ERROR)
self.assertEqual(
obj.error_message,
'Invalid SHA1 hash: expected aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa, '
'got 9682248358c830bcb5f8cb867186022acfe6eeb3.')
def test_verify_pending_upload_good_hash(self):
fake_file = StringIO.StringIO('test buffer')
fake_file._etag = 'fake_etag'
obj = common.make_fake_session(
status=impl.UploadSession.STATUS_VERIFYING,
hash_algo='SHA1',
hash_digest='9682248358c830bcb5f8cb867186022acfe6eeb3',
final_gs_location=(
'/bucket/real/SHA1/9682248358c830bcb5f8cb867186022acfe6eeb3'),
temp_gs_location='/bucket/temp/temp_crap')
obj.put()
def mocked_open(filename, mode, read_buffer_size, retry_params):
self.assertEqual(filename, '/bucket/temp/temp_crap')
self.assertEqual(mode, 'r')
self.assertEqual(read_buffer_size, impl.READ_BUFFER_SIZE)
return fake_file
self.mock(impl.cloudstorage, 'open', mocked_open)
service = impl.CASService('/bucket/real', '/bucket/temp')
def mocked_copy(src, dst, src_etag):
self.assertEqual(src, '/bucket/temp/temp_crap')
self.assertEqual(
dst, '/bucket/real/SHA1/9682248358c830bcb5f8cb867186022acfe6eeb3')
self.assertEqual(src_etag, 'fake_etag')
self.mock(service, '_gs_copy', mocked_copy)
self.assertTrue(service.verify_pending_upload(obj.key.id()))
# Moved to PUBLISHED.
obj = obj.key.get()
self.assertEqual(obj.status, impl.UploadSession.STATUS_PUBLISHED)
def test_open_ok(self):
service = impl.CASService('/bucket/real', '/bucket/temp')
calls = []
def mocked_cloudstorage_open(**kwargs):
calls.append(kwargs)
return object()
self.mock(impl.cloudstorage, 'open', mocked_cloudstorage_open)
service.open('SHA1', 'a'*40, 1234)
self.assertEqual(calls, [{
'filename': '/bucket/real/SHA1/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'mode': 'r',
'read_buffer_size': 1234,
'retry_params': service._retry_params,
}])
def test_open_not_found(self):
service = impl.CASService('/bucket/real', '/bucket/temp')
def mocked_cloudstorage_open(**kwargs):
raise impl.cloudstorage.NotFoundError()
self.mock(impl.cloudstorage, 'open', mocked_cloudstorage_open)
with self.assertRaises(impl.NotFoundError):
service.open('SHA1', 'a'*40, 1234)
def test_direct_upload(self):
service = impl.CASService('/bucket/real', '/bucket/temp')
calls = []
def mocked_cloudstorage_open(filename, **_kwargs):
calls.append(('open', filename))
return StringIO.StringIO()
self.mock(impl.cloudstorage, 'open', mocked_cloudstorage_open)
self.mock(service, '_gs_copy', lambda *a: calls.append(('copy',) + a))
self.mock(service, '_gs_delete', lambda *a: calls.append(('delete',) + a))
self.mock_now(datetime.datetime(2014, 1, 1))
self.mock(impl.random, 'choice', lambda x: x[0])
with service.start_direct_upload('SHA1') as f:
f.write('abc')
f.write('def')
self.assertEqual(f.hash_digest, '1f8ac10f23c5b5bc1167bda84b833e5c057a77d2')
self.assertEqual(f.length, 6)
self.assertEqual([
(
'open',
'/bucket/temp/1388534400_direct_aaaaaaaaaaaaaaaaaaaa',
),
(
'copy',
'/bucket/temp/1388534400_direct_aaaaaaaaaaaaaaaaaaaa',
'/bucket/real/SHA1/1f8ac10f23c5b5bc1167bda84b833e5c057a77d2',
),
(
'delete',
'/bucket/temp/1388534400_direct_aaaaaaaaaaaaaaaaaaaa'
),
], calls)
# Code coverage for second noop close.
f.close()
# Code coverage for commit=False code path.
del calls[:]
with self.assertRaises(ValueError):
with service.start_direct_upload('SHA1') as f:
f.write('abc')
raise ValueError()
self.assertEqual([
(
'open',
'/bucket/temp/1388534400_direct_aaaaaaaaaaaaaaaaaaaa',
),
(
'delete',
'/bucket/temp/1388534400_direct_aaaaaaaaaaaaaaaaaaaa'
),
], calls)
|
|
#!/usr/bin/env python
"""
=============================================
dMRI: Connectivity - MRtrix, CMTK, FreeSurfer
=============================================
Introduction
============
This script, connectivity_tutorial_advanced.py, demonstrates the ability to perform connectivity mapping
using Nipype for pipelining, Freesurfer for Reconstruction / Segmentation, MRtrix for spherical deconvolution
and tractography, and the Connectome Mapping Toolkit (CMTK) for further parcellation and connectivity analysis::
python connectivity_tutorial_advanced.py
We perform this analysis using the FSL course data, which can be acquired from here:
* http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz
This pipeline also requires the Freesurfer directory for 'subj1' from the FSL course data.
To save time, this data can be downloaded from here:
* http://dl.dropbox.com/u/315714/subj1.zip?dl=1
The result of this processing will be the connectome for subj1 as a Connectome File Format (CFF) File, using
the Lausanne2008 parcellation scheme. A data package containing the outputs of this pipeline can be obtained
from here:
* http://db.tt/909Q3AC1
.. seealso::
connectivity_tutorial.py
Original tutorial using Camino and the NativeFreesurfer Parcellation Scheme
www.cmtk.org
For more info about the parcellation scheme
.. warning::
The ConnectomeMapper (https://github.com/LTS5/cmp or www.cmtk.org) must be installed for this tutorial to function!
Packages and Data Setup
=======================
Import necessary modules from nipype.
"""
import nipype.interfaces.io as nio # Data i/o
import nipype.interfaces.utility as util # utility
import nipype.pipeline.engine as pe # pypeline engine
import nipype.interfaces.fsl as fsl
import nipype.interfaces.freesurfer as fs # freesurfer
import nipype.interfaces.mrtrix as mrtrix
import nipype.algorithms.misc as misc
import nipype.interfaces.cmtk as cmtk
import nipype.interfaces.dipy as dipy
import inspect
import os, os.path as op # system functions
from nipype.workflows.dmri.fsl.dti import create_eddy_correct_pipeline
from nipype.workflows.dmri.camino.connectivity_mapping import select_aparc_annot
from nipype.utils.misc import package_check
import warnings
from nipype.workflows.dmri.connectivity.nx import create_networkx_pipeline, create_cmats_to_csv_pipeline
from nipype.workflows.smri.freesurfer import create_tessellation_flow
try:
package_check('cmp')
except Exception, e:
warnings.warn('cmp not installed')
else:
import cmp
"""
This needs to point to the freesurfer subjects directory (Recon-all must have been run on subj1 from the FSL course data)
Alternatively, the reconstructed subject data can be downloaded from:
* http://dl.dropbox.com/u/315714/subj1.zip
"""
subjects_dir = op.abspath(op.join(op.curdir,'./subjects'))
fs.FSCommand.set_default_subjects_dir(subjects_dir)
fsl.FSLCommand.set_default_output_type('NIFTI')
fs_dir = os.environ['FREESURFER_HOME']
lookup_file = op.join(fs_dir,'FreeSurferColorLUT.txt')
"""
This needs to point to the fdt folder you can find after extracting
* http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz
"""
data_dir = op.abspath(op.join(op.curdir,'exdata/'))
subject_list = ['subj1']
"""
Use infosource node to loop through the subject list and define the input files.
For our purposes, these are the diffusion-weighted MR image, b vectors, and b values.
"""
infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']), name="infosource")
infosource.iterables = ('subject_id', subject_list)
info = dict(dwi=[['subject_id', 'data']],
bvecs=[['subject_id','bvecs']],
bvals=[['subject_id','bvals']])
"""
Use datasource node to perform the actual data grabbing.
Templates for the associated images are used to obtain the correct images.
"""
datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'],
outfields=info.keys()),
name = 'datasource')
datasource.inputs.template = "%s/%s"
datasource.inputs.base_directory = data_dir
datasource.inputs.field_template = dict(dwi='%s/%s.nii.gz')
datasource.inputs.template_args = info
datasource.inputs.sort_filelist = True
"""
The input node and Freesurfer sources declared here will be the main
conduits for the raw data to the rest of the processing pipeline.
"""
inputnode = pe.Node(interface=util.IdentityInterface(fields=["subject_id","dwi", "bvecs", "bvals", "subjects_dir"]), name="inputnode")
inputnode.inputs.subjects_dir = subjects_dir
FreeSurferSource = pe.Node(interface=nio.FreeSurferSource(), name='fssource')
FreeSurferSourceLH = FreeSurferSource.clone('fssourceLH')
FreeSurferSourceLH.inputs.hemi = 'lh'
FreeSurferSourceRH = FreeSurferSource.clone('fssourceRH')
FreeSurferSourceRH.inputs.hemi = 'rh'
"""
Creating the workflow's nodes
=============================
Conversion nodes
----------------
A number of conversion operations are required to obtain NIFTI files from the FreesurferSource for each subject.
Nodes are used to convert the following:
* Original structural image to NIFTI
* Pial, white, inflated, and spherical surfaces for both the left and right hemispheres are converted to GIFTI for visualization in ConnectomeViewer
* Parcellated annotation files for the left and right hemispheres are also converted to GIFTI
"""
mri_convert_Brain = pe.Node(interface=fs.MRIConvert(), name='mri_convert_Brain')
mri_convert_Brain.inputs.out_type = 'nii'
mri_convert_ROI_scale500 = mri_convert_Brain.clone('mri_convert_ROI_scale500')
mris_convertLH = pe.Node(interface=fs.MRIsConvert(), name='mris_convertLH')
mris_convertLH.inputs.out_datatype = 'gii'
mris_convertRH = mris_convertLH.clone('mris_convertRH')
mris_convertRHwhite = mris_convertLH.clone('mris_convertRHwhite')
mris_convertLHwhite = mris_convertLH.clone('mris_convertLHwhite')
mris_convertRHinflated = mris_convertLH.clone('mris_convertRHinflated')
mris_convertLHinflated = mris_convertLH.clone('mris_convertLHinflated')
mris_convertRHsphere = mris_convertLH.clone('mris_convertRHsphere')
mris_convertLHsphere = mris_convertLH.clone('mris_convertLHsphere')
mris_convertLHlabels = mris_convertLH.clone('mris_convertLHlabels')
mris_convertRHlabels = mris_convertLH.clone('mris_convertRHlabels')
"""
Diffusion processing nodes
--------------------------
.. seealso::
dmri_mrtrix_dti.py
Tutorial that focuses solely on the MRtrix diffusion processing
http://www.brain.org.au/software/mrtrix/index.html
MRtrix's online documentation
b-values and b-vectors stored in FSL's format are converted into a single encoding file for MRTrix.
"""
fsl2mrtrix = pe.Node(interface=mrtrix.FSL2MRTrix(),name='fsl2mrtrix')
"""
Distortions induced by eddy currents are corrected prior to fitting the tensors.
The first image is used as a reference for which to warp the others.
"""
eddycorrect = create_eddy_correct_pipeline(name='eddycorrect')
eddycorrect.inputs.inputnode.ref_num = 1
"""
Tensors are fitted to each voxel in the diffusion-weighted image and from these three maps are created:
* Major eigenvector in each voxel
* Apparent diffusion coefficient
* Fractional anisotropy
"""
dwi2tensor = pe.Node(interface=mrtrix.DWI2Tensor(),name='dwi2tensor')
tensor2vector = pe.Node(interface=mrtrix.Tensor2Vector(),name='tensor2vector')
tensor2adc = pe.Node(interface=mrtrix.Tensor2ApparentDiffusion(),name='tensor2adc')
tensor2fa = pe.Node(interface=mrtrix.Tensor2FractionalAnisotropy(),name='tensor2fa')
MRconvert_fa = pe.Node(interface=mrtrix.MRConvert(),name='MRconvert_fa')
MRconvert_fa.inputs.extension = 'nii'
"""
These nodes are used to create a rough brain mask from the b0 image.
The b0 image is extracted from the original diffusion-weighted image,
put through a simple thresholding routine, and smoothed using a 3x3 median filter.
"""
MRconvert = pe.Node(interface=mrtrix.MRConvert(),name='MRconvert')
MRconvert.inputs.extract_at_axis = 3
MRconvert.inputs.extract_at_coordinate = [0]
threshold_b0 = pe.Node(interface=mrtrix.Threshold(),name='threshold_b0')
median3d = pe.Node(interface=mrtrix.MedianFilter3D(),name='median3d')
"""
The brain mask is also used to help identify single-fiber voxels.
This is done by passing the brain mask through two erosion steps,
multiplying the remaining mask with the fractional anisotropy map, and
thresholding the result to obtain some highly anisotropic within-brain voxels.
"""
erode_mask_firstpass = pe.Node(interface=mrtrix.Erode(),name='erode_mask_firstpass')
erode_mask_secondpass = pe.Node(interface=mrtrix.Erode(),name='erode_mask_secondpass')
MRmultiply = pe.Node(interface=mrtrix.MRMultiply(),name='MRmultiply')
MRmult_merge = pe.Node(interface=util.Merge(2), name='MRmultiply_merge')
threshold_FA = pe.Node(interface=mrtrix.Threshold(),name='threshold_FA')
threshold_FA.inputs.absolute_threshold_value = 0.7
"""
For whole-brain tracking we also require a broad white-matter seed mask.
This is created by generating a white matter mask, given a brainmask, and
thresholding it at a reasonably high level.
"""
bet = pe.Node(interface=fsl.BET(mask = True), name = 'bet_b0')
gen_WM_mask = pe.Node(interface=mrtrix.GenerateWhiteMatterMask(),name='gen_WM_mask')
threshold_wmmask = pe.Node(interface=mrtrix.Threshold(),name='threshold_wmmask')
threshold_wmmask.inputs.absolute_threshold_value = 0.4
"""
The spherical deconvolution step depends on the estimate of the response function
in the highly anisotropic voxels we obtained above.
.. warning::
For damaged or pathological brains one should take care to lower the maximum harmonic order of these steps.
"""
estimateresponse = pe.Node(interface=mrtrix.EstimateResponseForSH(),name='estimateresponse')
estimateresponse.inputs.maximum_harmonic_order = 6
csdeconv = pe.Node(interface=mrtrix.ConstrainedSphericalDeconvolution(),name='csdeconv')
csdeconv.inputs.maximum_harmonic_order = 6
"""
Finally, we track probabilistically using the orientation distribution functions obtained earlier.
The tracts are then used to generate a tract-density image, and they are also converted to TrackVis format.
"""
probCSDstreamtrack = pe.Node(interface=mrtrix.ProbabilisticSphericallyDeconvolutedStreamlineTrack(),name='probCSDstreamtrack')
probCSDstreamtrack.inputs.inputmodel = 'SD_PROB'
probCSDstreamtrack.inputs.desired_number_of_tracks = 150000
tracks2prob = pe.Node(interface=mrtrix.Tracks2Prob(),name='tracks2prob')
tracks2prob.inputs.colour = True
MRconvert_tracks2prob = MRconvert_fa.clone(name='MRconvert_tracks2prob')
tck2trk = pe.Node(interface=mrtrix.MRTrix2TrackVis(),name='tck2trk')
trk2tdi = pe.Node(interface=dipy.TrackDensityMap(),name='trk2tdi')
"""
Structural segmentation nodes
-----------------------------
The following node identifies the transformation between the diffusion-weighted
image and the structural image. This transformation is then applied to the tracts
so that they are in the same space as the regions of interest.
"""
coregister = pe.Node(interface=fsl.FLIRT(dof=6), name = 'coregister')
coregister.inputs.cost = ('normmi')
"""
Parcellation is performed given the aparc+aseg image from Freesurfer.
The CMTK Parcellation step subdivides these regions to return a higher-resolution parcellation scheme.
The parcellation used here is entitled "scale500" and returns 1015 regions.
"""
parcellation_name = 'scale500'
parcellate = pe.Node(interface=cmtk.Parcellate(), name="Parcellate")
parcellate.inputs.parcellation_name = parcellation_name
"""
The CreateMatrix interface takes in the remapped aparc+aseg image as well as the label dictionary and fiber tracts
and outputs a number of different files. The most important of which is the connectivity network itself, which is stored
as a 'gpickle' and can be loaded using Python's NetworkX package (see CreateMatrix docstring). Also outputted are various
NumPy arrays containing detailed tract information, such as the start and endpoint regions, and statistics on the mean and
standard deviation for the fiber length of each connection. These matrices can be used in the ConnectomeViewer to plot the
specific tracts that connect between user-selected regions.
Here we choose the Lausanne2008 parcellation scheme, since we are incorporating the CMTK parcellation step.
"""
parcellation_name = 'scale500'
cmp_config = cmp.configuration.PipelineConfiguration()
cmp_config.parcellation_scheme = "Lausanne2008"
createnodes = pe.Node(interface=cmtk.CreateNodes(), name="CreateNodes")
createnodes.inputs.resolution_network_file = cmp_config._get_lausanne_parcellation('Lausanne2008')[parcellation_name]['node_information_graphml']
creatematrix = pe.Node(interface=cmtk.CreateMatrix(), name="CreateMatrix")
creatematrix.inputs.count_region_intersections = True
"""
Next we define the endpoint of this tutorial, which is the CFFConverter node, as well as a few nodes which use
the Nipype Merge utility. These are useful for passing lists of the files we want packaged in our CFF file.
The inspect.getfile command is used to package this script into the resulting CFF file, so that it is easy to
look back at the processing parameters that were used.
"""
CFFConverter = pe.Node(interface=cmtk.CFFConverter(), name="CFFConverter")
CFFConverter.inputs.script_files = op.abspath(inspect.getfile(inspect.currentframe()))
giftiSurfaces = pe.Node(interface=util.Merge(9), name="GiftiSurfaces")
giftiLabels = pe.Node(interface=util.Merge(2), name="GiftiLabels")
niftiVolumes = pe.Node(interface=util.Merge(3), name="NiftiVolumes")
fiberDataArrays = pe.Node(interface=util.Merge(4), name="FiberDataArrays")
gpickledNetworks = pe.Node(interface=util.Merge(2), name="NetworkFiles")
"""
We also create a workflow to calculate several network metrics on our resulting file, and another CFF converter
which will be used to package these networks into a single file.
"""
networkx = create_networkx_pipeline(name='networkx')
cmats_to_csv = create_cmats_to_csv_pipeline(name='cmats_to_csv')
NxStatsCFFConverter = pe.Node(interface=cmtk.CFFConverter(), name="NxStatsCFFConverter")
NxStatsCFFConverter.inputs.script_files = op.abspath(inspect.getfile(inspect.currentframe()))
tessflow = create_tessellation_flow(name='tessflow', out_format='gii')
tessflow.inputs.inputspec.lookup_file = lookup_file
"""
Connecting the workflow
=======================
Here we connect our processing pipeline.
Connecting the inputs, FreeSurfer nodes, and conversions
--------------------------------------------------------
"""
mapping = pe.Workflow(name='mapping')
"""
First, we connect the input node to the FreeSurfer input nodes.
"""
mapping.connect([(inputnode, FreeSurferSource,[("subjects_dir","subjects_dir")])])
mapping.connect([(inputnode, FreeSurferSource,[("subject_id","subject_id")])])
mapping.connect([(inputnode, FreeSurferSourceLH,[("subjects_dir","subjects_dir")])])
mapping.connect([(inputnode, FreeSurferSourceLH,[("subject_id","subject_id")])])
mapping.connect([(inputnode, FreeSurferSourceRH,[("subjects_dir","subjects_dir")])])
mapping.connect([(inputnode, FreeSurferSourceRH,[("subject_id","subject_id")])])
mapping.connect([(inputnode, tessflow,[("subjects_dir","inputspec.subjects_dir")])])
mapping.connect([(inputnode, tessflow,[("subject_id","inputspec.subject_id")])])
mapping.connect([(inputnode, parcellate,[("subjects_dir","subjects_dir")])])
mapping.connect([(inputnode, parcellate,[("subject_id","subject_id")])])
mapping.connect([(parcellate, mri_convert_ROI_scale500,[('roi_file','in_file')])])
"""
Nifti conversion for subject's stripped brain image from Freesurfer:
"""
mapping.connect([(FreeSurferSource, mri_convert_Brain,[('brain','in_file')])])
"""
Surface conversions to GIFTI (pial, white, inflated, and sphere for both hemispheres)
"""
mapping.connect([(FreeSurferSourceLH, mris_convertLH,[('pial','in_file')])])
mapping.connect([(FreeSurferSourceRH, mris_convertRH,[('pial','in_file')])])
mapping.connect([(FreeSurferSourceLH, mris_convertLHwhite,[('white','in_file')])])
mapping.connect([(FreeSurferSourceRH, mris_convertRHwhite,[('white','in_file')])])
mapping.connect([(FreeSurferSourceLH, mris_convertLHinflated,[('inflated','in_file')])])
mapping.connect([(FreeSurferSourceRH, mris_convertRHinflated,[('inflated','in_file')])])
mapping.connect([(FreeSurferSourceLH, mris_convertLHsphere,[('sphere','in_file')])])
mapping.connect([(FreeSurferSourceRH, mris_convertRHsphere,[('sphere','in_file')])])
"""
The annotation files are converted using the pial surface as a map via the MRIsConvert interface.
One of the functions defined earlier is used to select the lh.aparc.annot and rh.aparc.annot files
specifically (rather than e.g. rh.aparc.a2009s.annot) from the output list given by the FreeSurferSource.
"""
mapping.connect([(FreeSurferSourceLH, mris_convertLHlabels,[('pial','in_file')])])
mapping.connect([(FreeSurferSourceRH, mris_convertRHlabels,[('pial','in_file')])])
mapping.connect([(FreeSurferSourceLH, mris_convertLHlabels, [(('annot', select_aparc_annot), 'annot_file')])])
mapping.connect([(FreeSurferSourceRH, mris_convertRHlabels, [(('annot', select_aparc_annot), 'annot_file')])])
"""
Diffusion Processing
--------------------
Now we connect the tensor computations:
"""
mapping.connect([(inputnode, fsl2mrtrix, [("bvecs", "bvec_file"),
("bvals", "bval_file")])])
mapping.connect([(inputnode, eddycorrect,[("dwi","inputnode.in_file")])])
mapping.connect([(eddycorrect, dwi2tensor,[("outputnode.eddy_corrected","in_file")])])
mapping.connect([(fsl2mrtrix, dwi2tensor,[("encoding_file","encoding_file")])])
mapping.connect([(dwi2tensor, tensor2vector,[['tensor','in_file']]),
(dwi2tensor, tensor2adc,[['tensor','in_file']]),
(dwi2tensor, tensor2fa,[['tensor','in_file']]),
])
mapping.connect([(tensor2fa, MRmult_merge,[("FA","in1")])])
mapping.connect([(tensor2fa, MRconvert_fa,[("FA","in_file")])])
"""
This block creates the rough brain mask to be multiplied, mulitplies it with the
fractional anisotropy image, and thresholds it to get the single-fiber voxels.
"""
mapping.connect([(eddycorrect, MRconvert,[("outputnode.eddy_corrected","in_file")])])
mapping.connect([(MRconvert, threshold_b0,[("converted","in_file")])])
mapping.connect([(threshold_b0, median3d,[("out_file","in_file")])])
mapping.connect([(median3d, erode_mask_firstpass,[("out_file","in_file")])])
mapping.connect([(erode_mask_firstpass, erode_mask_secondpass,[("out_file","in_file")])])
mapping.connect([(erode_mask_secondpass, MRmult_merge,[("out_file","in2")])])
mapping.connect([(MRmult_merge, MRmultiply,[("out","in_files")])])
mapping.connect([(MRmultiply, threshold_FA,[("out_file","in_file")])])
"""
Here the thresholded white matter mask is created for seeding the tractography.
"""
mapping.connect([(eddycorrect, bet,[("outputnode.eddy_corrected","in_file")])])
mapping.connect([(eddycorrect, gen_WM_mask,[("outputnode.eddy_corrected","in_file")])])
mapping.connect([(bet, gen_WM_mask,[("mask_file","binary_mask")])])
mapping.connect([(fsl2mrtrix, gen_WM_mask,[("encoding_file","encoding_file")])])
mapping.connect([(gen_WM_mask, threshold_wmmask,[("WMprobabilitymap","in_file")])])
"""
Next we estimate the fiber response distribution.
"""
mapping.connect([(eddycorrect, estimateresponse,[("outputnode.eddy_corrected","in_file")])])
mapping.connect([(fsl2mrtrix, estimateresponse,[("encoding_file","encoding_file")])])
mapping.connect([(threshold_FA, estimateresponse,[("out_file","mask_image")])])
"""
Run constrained spherical deconvolution.
"""
mapping.connect([(eddycorrect, csdeconv,[("outputnode.eddy_corrected","in_file")])])
mapping.connect([(gen_WM_mask, csdeconv,[("WMprobabilitymap","mask_image")])])
mapping.connect([(estimateresponse, csdeconv,[("response","response_file")])])
mapping.connect([(fsl2mrtrix, csdeconv,[("encoding_file","encoding_file")])])
"""
Connect the tractography and compute the tract density image.
"""
mapping.connect([(threshold_wmmask, probCSDstreamtrack,[("out_file","seed_file")])])
mapping.connect([(csdeconv, probCSDstreamtrack,[("spherical_harmonics_image","in_file")])])
mapping.connect([(probCSDstreamtrack, tracks2prob,[("tracked","in_file")])])
mapping.connect([(eddycorrect, tracks2prob,[("outputnode.eddy_corrected","template_file")])])
mapping.connect([(tracks2prob, MRconvert_tracks2prob,[("tract_image","in_file")])])
"""
Structural Processing
---------------------
First, we coregister the diffusion image to the structural image
"""
mapping.connect([(eddycorrect, coregister,[("outputnode.eddy_corrected","in_file")])])
mapping.connect([(mri_convert_Brain, coregister,[('out_file','reference')])])
"""
The MRtrix-tracked fibers are converted to TrackVis format (with voxel and data dimensions grabbed from the DWI).
The connectivity matrix is created with the transformed .trk fibers and the parcellation file.
"""
mapping.connect([(eddycorrect, tck2trk,[("outputnode.eddy_corrected","image_file")])])
mapping.connect([(mri_convert_Brain, tck2trk,[("out_file","registration_image_file")])])
mapping.connect([(coregister, tck2trk,[("out_matrix_file","matrix_file")])])
mapping.connect([(probCSDstreamtrack, tck2trk,[("tracked","in_file")])])
mapping.connect([(tck2trk, creatematrix,[("out_file","tract_file")])])
mapping.connect([(tck2trk, trk2tdi,[("out_file","in_file")])])
mapping.connect([(inputnode, creatematrix,[("subject_id","out_matrix_file")])])
mapping.connect([(inputnode, creatematrix,[("subject_id","out_matrix_mat_file")])])
mapping.connect([(parcellate, creatematrix,[("roi_file","roi_file")])])
mapping.connect([(parcellate, createnodes,[("roi_file","roi_file")])])
mapping.connect([(createnodes, creatematrix,[("node_network","resolution_network_file")])])
"""
The merge nodes defined earlier are used here to create lists of the files which are
destined for the CFFConverter.
"""
mapping.connect([(mris_convertLH, giftiSurfaces,[("converted","in1")])])
mapping.connect([(mris_convertRH, giftiSurfaces,[("converted","in2")])])
mapping.connect([(mris_convertLHwhite, giftiSurfaces,[("converted","in3")])])
mapping.connect([(mris_convertRHwhite, giftiSurfaces,[("converted","in4")])])
mapping.connect([(mris_convertLHinflated, giftiSurfaces,[("converted","in5")])])
mapping.connect([(mris_convertRHinflated, giftiSurfaces,[("converted","in6")])])
mapping.connect([(mris_convertLHsphere, giftiSurfaces,[("converted","in7")])])
mapping.connect([(mris_convertRHsphere, giftiSurfaces,[("converted","in8")])])
mapping.connect([(tessflow, giftiSurfaces,[("outputspec.meshes","in9")])])
mapping.connect([(mris_convertLHlabels, giftiLabels,[("converted","in1")])])
mapping.connect([(mris_convertRHlabels, giftiLabels,[("converted","in2")])])
mapping.connect([(parcellate, niftiVolumes,[("roi_file","in1")])])
mapping.connect([(eddycorrect, niftiVolumes,[("outputnode.eddy_corrected","in2")])])
mapping.connect([(mri_convert_Brain, niftiVolumes,[("out_file","in3")])])
mapping.connect([(creatematrix, fiberDataArrays,[("endpoint_file","in1")])])
mapping.connect([(creatematrix, fiberDataArrays,[("endpoint_file_mm","in2")])])
mapping.connect([(creatematrix, fiberDataArrays,[("fiber_length_file","in3")])])
mapping.connect([(creatematrix, fiberDataArrays,[("fiber_label_file","in4")])])
"""
This block actually connects the merged lists to the CFF converter. We pass the surfaces
and volumes that are to be included, as well as the tracts and the network itself. The currently
running pipeline (dmri_connectivity_advanced.py) is also scraped and included in the CFF file. This
makes it easy for the user to examine the entire processing pathway used to generate the end
product.
"""
mapping.connect([(giftiSurfaces, CFFConverter,[("out","gifti_surfaces")])])
mapping.connect([(giftiLabels, CFFConverter,[("out","gifti_labels")])])
mapping.connect([(creatematrix, CFFConverter,[("matrix_files","gpickled_networks")])])
mapping.connect([(niftiVolumes, CFFConverter,[("out","nifti_volumes")])])
mapping.connect([(fiberDataArrays, CFFConverter,[("out","data_files")])])
mapping.connect([(creatematrix, CFFConverter,[("filtered_tractographies","tract_files")])])
mapping.connect([(inputnode, CFFConverter,[("subject_id","title")])])
"""
The graph theoretical metrics are computed using the networkx workflow and placed in another CFF file
"""
mapping.connect([(inputnode, networkx,[("subject_id","inputnode.extra_field")])])
mapping.connect([(creatematrix, networkx,[("intersection_matrix_file","inputnode.network_file")])])
mapping.connect([(networkx, NxStatsCFFConverter,[("outputnode.network_files","gpickled_networks")])])
mapping.connect([(giftiSurfaces, NxStatsCFFConverter,[("out","gifti_surfaces")])])
mapping.connect([(giftiLabels, NxStatsCFFConverter,[("out","gifti_labels")])])
mapping.connect([(niftiVolumes, NxStatsCFFConverter,[("out","nifti_volumes")])])
mapping.connect([(fiberDataArrays, NxStatsCFFConverter,[("out","data_files")])])
mapping.connect([(inputnode, NxStatsCFFConverter,[("subject_id","title")])])
mapping.connect([(inputnode, cmats_to_csv,[("subject_id","inputnode.extra_field")])])
mapping.connect([(creatematrix, cmats_to_csv,[("matlab_matrix_files","inputnode.matlab_matrix_files")])])
"""
Create a higher-level workflow
------------------------------
Finally, we create another higher-level workflow to connect our mapping workflow with the info and datagrabbing nodes
declared at the beginning. Our tutorial is now extensible to any arbitrary number of subjects by simply adding
their names to the subject list and their data to the proper folders.
"""
connectivity = pe.Workflow(name="connectivity")
connectivity.base_dir = op.abspath('dmri_connectivity_advanced')
connectivity.connect([
(infosource,datasource,[('subject_id', 'subject_id')]),
(datasource,mapping,[('dwi','inputnode.dwi'),
('bvals','inputnode.bvals'),
('bvecs','inputnode.bvecs')
]),
(infosource,mapping,[('subject_id','inputnode.subject_id')])
])
"""
The following functions run the whole workflow and produce a .dot and .png graph of the processing pipeline.
"""
if __name__ == '__main__':
connectivity.run()
connectivity.write_graph()
|
|
from __future__ import unicode_literals
import base64
import calendar
import datetime
import re
import sys
from binascii import Error as BinasciiError
from email.utils import formatdate
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_str, force_text
from django.utils.functional import allow_lazy
from django.utils import six
from django.utils.six.moves.urllib.parse import (
quote, quote_plus, unquote, unquote_plus, urlparse,
urlencode as original_urlencode)
ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"')
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
def urlquote(url, safe='/'):
"""
A version of Python's urllib.quote() function that can operate on unicode
strings. The url is first UTF-8 encoded before quoting. The returned string
can safely be used as part of an argument to a subsequent iri_to_uri() call
without double-quoting occurring.
"""
return force_text(quote(force_str(url), force_str(safe)))
urlquote = allow_lazy(urlquote, six.text_type)
def urlquote_plus(url, safe=''):
"""
A version of Python's urllib.quote_plus() function that can operate on
unicode strings. The url is first UTF-8 encoded before quoting. The
returned string can safely be used as part of an argument to a subsequent
iri_to_uri() call without double-quoting occurring.
"""
return force_text(quote_plus(force_str(url), force_str(safe)))
urlquote_plus = allow_lazy(urlquote_plus, six.text_type)
def urlunquote(quoted_url):
"""
A wrapper for Python's urllib.unquote() function that can operate on
the result of django.utils.http.urlquote().
"""
return force_text(unquote(force_str(quoted_url)))
urlunquote = allow_lazy(urlunquote, six.text_type)
def urlunquote_plus(quoted_url):
"""
A wrapper for Python's urllib.unquote_plus() function that can operate on
the result of django.utils.http.urlquote_plus().
"""
return force_text(unquote_plus(force_str(quoted_url)))
urlunquote_plus = allow_lazy(urlunquote_plus, six.text_type)
def urlencode(query, doseq=0):
"""
A version of Python's urllib.urlencode() function that can operate on
unicode strings. The parameters are first cast to UTF-8 encoded strings and
then encoded as per normal.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
return original_urlencode(
[(force_str(k),
[force_str(i) for i in v] if isinstance(v, (list,tuple)) else force_str(v))
for k, v in query],
doseq)
def cookie_date(epoch_seconds=None):
"""
Formats the time to ensure compatibility with Netscape's cookie standard.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Formats the time to match the RFC1123 date format as specified by HTTP
RFC2616 section 3.3.1.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
return formatdate(epoch_seconds, usegmt=True)
def parse_http_date(date):
"""
Parses a date format as specified by HTTP RFC2616 section 3.3.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Returns an integer expressed in seconds since the epoch, in UTC.
"""
# emails.Util.parsedate does the job for RFC1123 dates; unfortunately
# RFC2616 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception:
six.reraise(ValueError, ValueError("%r is not a valid date" % date), sys.exc_info()[2])
def parse_http_date_safe(date):
"""
Same as parse_http_date, but returns None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Converts a base 36 string to an ``int``. Raises ``ValueError` if the
input won't fit into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is long than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
value = int(s, 36)
# ... then do a final check that the value will fit into an int to avoid
# returning a long (#15067). The long type was removed in Python 3.
if six.PY2 and value > sys.maxint:
raise ValueError("Base36 input too large")
return value
def int_to_base36(i):
"""
Converts an integer to a base36 string
"""
digits = "0123456789abcdefghijklmnopqrstuvwxyz"
factor = 0
if i < 0:
raise ValueError("Negative base36 conversion input.")
if six.PY2:
if not isinstance(i, six.integer_types):
raise TypeError("Non-integer base36 conversion input.")
if i > sys.maxint:
raise ValueError("Base36 conversion input too large.")
# Find starting factor
while True:
factor += 1
if i < 36 ** factor:
factor -= 1
break
base36 = []
# Construct base36 representation
while factor >= 0:
j = 36 ** factor
base36.append(digits[i // j])
i = i % j
factor -= 1
return ''.join(base36)
def urlsafe_base64_encode(s):
"""
Encodes a bytestring in base64 for use in URLs, stripping any trailing
equal signs.
"""
return base64.urlsafe_b64encode(s).rstrip(b'\n=')
def urlsafe_base64_decode(s):
"""
Decodes a base64 encoded string, adding back any trailing equal signs that
might have been stripped.
"""
s = s.encode('utf-8') # base64encode should only return ASCII.
try:
return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'='))
except (LookupError, BinasciiError) as e:
raise ValueError(e)
def parse_etags(etag_str):
"""
Parses a string with one or several etags passed in If-None-Match and
If-Match headers by the rules in RFC 2616. Returns a list of etags
without surrounding double quotes (") and unescaped from \<CHAR>.
"""
etags = ETAG_MATCH.findall(etag_str)
if not etags:
# etag_str has wrong format, treat it as an opaque string then
return [etag_str]
etags = [e.encode('ascii').decode('unicode_escape') for e in etags]
return etags
def quote_etag(etag):
"""
Wraps a string in double quotes escaping contents as necessary.
"""
return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"')
def same_origin(url1, url2):
"""
Checks if two URLs are 'same-origin'
"""
p1, p2 = urlparse(url1), urlparse(url2)
try:
return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)
except ValueError:
return False
def is_safe_url(url, host=None):
"""
Return ``True`` if the url is a safe redirection (i.e. it doesn't point to
a different host and uses a safe scheme).
Always returns ``False`` on an empty url.
"""
if not url:
return False
url_info = urlparse(url)
return (not url_info.netloc or url_info.netloc == host) and \
(not url_info.scheme or url_info.scheme in ['http', 'https'])
|
|
# Copyright 2009-2014 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import base64
import os.path
from requestbuilder import Arg, MutuallyExclusiveArgList
from requestbuilder.exceptions import ArgumentError
from euca2ools.commands.argtypes import (ec2_block_device_mapping,
vpc_interface)
from euca2ools.commands.ec2 import EC2Request
class RunInstances(EC2Request):
DESCRIPTION = 'Launch instances of a machine image'
ARGS = [Arg('ImageId', metavar='IMAGE',
help='ID of the image to instantiate (required)'),
Arg('-n', '--instance-count', dest='count', metavar='MIN[-MAX]',
default='1', route_to=None,
help='''number of instances to launch. If this number of
instances cannot be launched, no instances will launch.
If specified as a range (min-max), the server will
attempt to launch the maximum number, but no fewer
than the minimum number.'''),
Arg('-g', '--group', action='append', default=[], route_to=None,
help='security group(s) in which to launch the instances'),
Arg('-k', '--key', dest='KeyName', metavar='KEYPAIR',
help='name of the key pair to use'),
MutuallyExclusiveArgList(
Arg('-d', '--user-data', metavar='DATA', route_to=None,
help='''user data to make available to instances in this
reservation'''),
Arg('--user-data-force', metavar='DATA', route_to=None,
help='''same as -d/--user-data, but without checking if a
file by that name exists first'''),
Arg('-f', '--user-data-file', metavar='FILE', route_to=None,
help='''file containing user data to make available to the
instances in this reservation''')),
Arg('--addressing', dest='AddressingType',
choices=('public', 'private'), help='''[Eucalyptus only]
addressing scheme to launch the instance with. Use "private"
to run an instance with no public address.'''),
Arg('-t', '--instance-type', dest='InstanceType',
help='type of instance to launch'),
Arg('-z', '--availability-zone', metavar='ZONE',
dest='Placement.AvailabilityZone'),
Arg('--kernel', dest='KernelId', metavar='KERNEL',
help='ID of the kernel to launch the instance(s) with'),
Arg('--ramdisk', dest='RamdiskId', metavar='RAMDISK',
help='ID of the ramdisk to launch the instance(s) with'),
Arg('-b', '--block-device-mapping', metavar='DEVICE=MAPPED',
dest='BlockDeviceMapping', action='append',
type=ec2_block_device_mapping, default=[],
help='''define a block device mapping for the instances, in the
form DEVICE=MAPPED, where "MAPPED" is "none", "ephemeral(0-3)",
or
"[SNAP-ID]:[GiB]:[true|false]:[standard|VOLTYPE[:IOPS]]"'''),
Arg('-m', '--monitor', dest='Monitoring.Enabled',
action='store_const', const='true',
help='enable detailed monitoring for the instance(s)'),
Arg('--disable-api-termination', dest='DisableApiTermination',
action='store_const', const='true',
help='prevent API users from terminating the instance(s)'),
Arg('--instance-initiated-shutdown-behavior',
dest='InstanceInitiatedShutdownBehavior',
choices=('stop', 'terminate'),
help=('whether to "stop" (default) or terminate EBS instances '
'when they shut down')),
Arg('--placement-group', dest='Placement.GroupName',
metavar='PLGROUP', help='''name of a placement group to launch
into'''),
Arg('--tenancy', dest='Placement.Tenancy',
choices=('default', 'dedicated'), help='''[VPC only]
"dedicated" to run on single-tenant hardware'''),
Arg('--client-token', dest='ClientToken', metavar='TOKEN',
help='unique identifier to ensure request idempotency'),
Arg('-s', '--subnet', metavar='SUBNET', route_to=None,
help='''[VPC only] subnet to create the instance's network
interface in'''),
Arg('--private-ip-address', metavar='ADDRESS', route_to=None,
help='''[VPC only] assign a specific primary private IP address
to an instance's interface'''),
MutuallyExclusiveArgList(
Arg('--secondary-private-ip-address', metavar='ADDRESS',
action='append', route_to=None, help='''[VPC only]
assign a specific secondary private IP address to an
instance's network interface. Use this option multiple
times to add additional addresses.'''),
Arg('--secondary-private-ip-address-count', metavar='COUNT',
type=int, route_to=None, help='''[VPC only]
automatically assign a specific number of secondary private
IP addresses to an instance's network interface''')),
Arg('-a', '--network-interface', dest='NetworkInterface',
metavar='INTERFACE', action='append', type=vpc_interface,
help=('[VPC only] add a network interface to the new '
'instance. If the interface already exists, supply its '
'ID and a numeric index for it, separated by ":", in '
'the form "eni-NNNNNNNN:INDEX". To create a new '
'interface, supply a numeric index and subnet ID for '
'it, along with (in order) an optional description, a '
'primary private IP address, a list of security group '
'IDs to associate with the interface, whether to delete '
'the interface upon instance termination ("true" or '
'"false"), a number of secondary private IP addresses '
'to create automatically, and a list of secondary '
'private IP addresses to assign to the interface, '
'separated by ":", in the form ":INDEX:SUBNET:'
'[DESCRIPTION]:[PRIV_IP]:[GROUP1,GROUP2,...]:[true|'
'false]:[SEC_IP_COUNT|:SEC_IP1,SEC_IP2,...]". You '
'cannot specify both of the latter two. This option '
'may be used multiple times. Each adds another network '
'interface.')),
Arg('-p', '--iam-profile', metavar='IPROFILE', route_to=None,
help='''name or ARN of the IAM instance profile to associate
with the new instance(s)'''),
Arg('--ebs-optimized', dest='EbsOptimized', action='store_const',
const='true', help='optimize the new instance(s) for EBS I/O')]
LIST_TAGS = ['reservationSet', 'instancesSet', 'groupSet', 'tagSet',
'blockDeviceMapping', 'productCodes', 'networkInterfaceSet',
'privateIpAddressesSet']
# noinspection PyExceptionInherit
def configure(self):
EC2Request.configure(self)
if self.args.get('user_data'):
if os.path.isfile(self.args['user_data']):
raise ArgumentError(
'argument -d/--user-data: to pass the contents of a file '
'as user data, use -f/--user-data-file. To pass the '
"literal value '{0}' as user data even though it matches "
'the name of a file, use --user-data-force.')
else:
self.params['UserData'] = base64.b64encode(
self.args['user_data'])
elif self.args.get('user_data_force'):
self.params['UserData'] = base64.b64encode(
self.args['user_data_force'])
elif self.args.get('user_data_file'):
with open(self.args['user_data_file']) as user_data_file:
self.params['UserData'] = base64.b64encode(
user_data_file.read())
if self.args.get('KeyName') is None:
default_key_name = self.config.get_region_option(
'ec2-default-keypair')
if default_key_name:
self.log.info("using default key pair '%s'", default_key_name)
self.params['KeyName'] = default_key_name
# noinspection PyExceptionInherit
def preprocess(self):
counts = self.args['count'].split('-')
if len(counts) == 1:
try:
self.params['MinCount'] = int(counts[0])
self.params['MaxCount'] = int(counts[0])
except ValueError:
raise ArgumentError('argument -n/--instance-count: instance '
'count must be an integer')
elif len(counts) == 2:
try:
self.params['MinCount'] = int(counts[0])
self.params['MaxCount'] = int(counts[1])
except ValueError:
raise ArgumentError('argument -n/--instance-count: instance '
'count range must be must be comprised of '
'integers')
else:
raise ArgumentError('argument -n/--instance-count: value must '
'have format "1" or "1-2"')
if self.params['MinCount'] < 1 or self.params['MaxCount'] < 1:
raise ArgumentError('argument -n/--instance-count: instance count '
'must be positive')
if self.params['MinCount'] > self.params['MaxCount']:
self.log.debug('MinCount > MaxCount; swapping')
self.params.update({'MinCount': self.params['MaxCount'],
'MaxCount': self.params['MinCount']})
for group in self.args['group']:
if group.startswith('sg-'):
self.params.setdefault('SecurityGroupId', [])
self.params['SecurityGroupId'].append(group)
else:
self.params.setdefault('SecurityGroup', [])
self.params['SecurityGroup'].append(group)
iprofile = self.args.get('iam_profile')
if iprofile:
if iprofile.startswith('arn:'):
self.params['IamInstanceProfile.Arn'] = iprofile
else:
self.params['IamInstanceProfile.Name'] = iprofile
# Assemble an interface out of the "friendly" split interface options
cli_iface = {}
if self.args.get('private_ip_address'):
cli_iface['PrivateIpAddresses'] = [
{'PrivateIpAddress': self.args['private_ip_address'],
'Primary': 'true'}]
if self.args.get('secondary_private_ip_address'):
sec_ips = [{'PrivateIpAddress': addr} for addr in
self.args['secondary_private_ip_address']]
cli_iface.setdefault('PrivateIpAddresses', [])
cli_iface['PrivateIpAddresses'].extend(sec_ips)
if self.args.get('secondary_private_ip_address_count'):
sec_ip_count = self.args['secondary_private_ip_address_count']
cli_iface['SecondaryPrivateIpAddressCount'] = sec_ip_count
if self.args.get('subnet'):
cli_iface['SubnetId'] = self.args['subnet']
if cli_iface:
cli_iface['DeviceIndex'] = 0
self.params.setdefault('NetworkInterface', [])
self.params['NetworkInterface'].append(cli_iface)
def print_result(self, result):
self.print_reservation(result)
|
|
"""
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
import warnings
from scipy.spatial import distance
from scipy import sparse
import pytest
from sklearn.utils._testing import assert_array_equal
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster import DBSCAN
from sklearn.cluster import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert n_clusters_1 == n_clusters
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert n_clusters_2 == n_clusters
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert n_clusters_1 == n_clusters
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert n_clusters_2 == n_clusters
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
@pytest.mark.parametrize('include_self', [False, True])
def test_dbscan_sparse_precomputed(include_self):
D = pairwise_distances(X)
nn = NearestNeighbors(radius=.9).fit(X)
X_ = X if include_self else None
D_sparse = nn.radius_neighbors_graph(X=X_, mode='distance')
# Ensure it is sparse not merely on diagonals:
assert D_sparse.nnz < D.shape[0] * (D.shape[0] - 1)
core_sparse, labels_sparse = dbscan(D_sparse,
eps=.8,
min_samples=10,
metric='precomputed')
core_dense, labels_dense = dbscan(D, eps=.8, min_samples=10,
metric='precomputed')
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_sparse_precomputed_different_eps():
# test that precomputed neighbors graph is filtered if computed with
# a radius larger than DBSCAN's eps.
lower_eps = 0.2
nn = NearestNeighbors(radius=lower_eps).fit(X)
D_sparse = nn.radius_neighbors_graph(X, mode='distance')
dbscan_lower = dbscan(D_sparse, eps=lower_eps, metric='precomputed')
higher_eps = lower_eps + 0.7
nn = NearestNeighbors(radius=higher_eps).fit(X)
D_sparse = nn.radius_neighbors_graph(X, mode='distance')
dbscan_higher = dbscan(D_sparse, eps=lower_eps, metric='precomputed')
assert_array_equal(dbscan_lower[0], dbscan_higher[0])
assert_array_equal(dbscan_lower[1], dbscan_higher[1])
@pytest.mark.parametrize('use_sparse', [True, False])
@pytest.mark.parametrize('metric', ['precomputed', 'minkowski'])
def test_dbscan_input_not_modified(use_sparse, metric):
# test that the input is not modified by dbscan
X = np.random.RandomState(0).rand(10, 10)
X = sparse.csr_matrix(X) if use_sparse else X
X_copy = X.copy()
dbscan(X, metric=metric)
if use_sparse:
assert_array_equal(X.toarray(), X_copy.toarray())
else:
assert_array_equal(X, X_copy)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert db.core_sample_indices_.shape == (0,)
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert n_clusters_1 == n_clusters
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert n_clusters_2 == n_clusters
def test_dbscan_metric_params():
# Tests that DBSCAN works with the metrics_params argument.
eps = 0.8
min_samples = 10
p = 1
# Compute DBSCAN with metric_params arg
with warnings.catch_warnings(record=True) as warns:
db = DBSCAN(
metric='minkowski', metric_params={'p': p}, eps=eps,
p=None, min_samples=min_samples, algorithm='ball_tree'
).fit(X)
assert not warns
core_sample_1, labels_1 = db.core_sample_indices_, db.labels_
# Test that sample labels are the same as passing Minkowski 'p' directly
db = DBSCAN(metric='minkowski', eps=eps, min_samples=min_samples,
algorithm='ball_tree', p=p).fit(X)
core_sample_2, labels_2 = db.core_sample_indices_, db.labels_
assert_array_equal(core_sample_1, core_sample_2)
assert_array_equal(labels_1, labels_2)
# Minkowski with p=1 should be equivalent to Manhattan distance
db = DBSCAN(metric='manhattan', eps=eps, min_samples=min_samples,
algorithm='ball_tree').fit(X)
core_sample_3, labels_3 = db.core_sample_indices_, db.labels_
assert_array_equal(core_sample_1, core_sample_3)
assert_array_equal(labels_1, labels_3)
with pytest.warns(
SyntaxWarning,
match="Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored."):
# Test that checks p is ignored in favor of metric_params={'p': <val>}
db = DBSCAN(metric='minkowski', metric_params={'p': p}, eps=eps, p=p+1,
min_samples=min_samples, algorithm='ball_tree').fit(X)
core_sample_4, labels_4 = db.core_sample_indices_, db.labels_
assert_array_equal(core_sample_1, core_sample_4)
assert_array_equal(labels_1, labels_4)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert n_clusters_1 == n_clusters
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert n_clusters_2 == n_clusters
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert n_clusters_3 == n_clusters
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert n_clusters_4 == n_clusters
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert n_clusters_5 == n_clusters
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
@pytest.mark.parametrize(
"args",
[{'eps': -1.0}, {'algorithm': 'blah'}, {'metric': 'blah'},
{'leaf_size': -1}, {'p': -1}]
)
def test_dbscan_badargs(args):
# Test bad argument values: these should all raise ValueErrors
with pytest.raises(ValueError):
dbscan(X, **args)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert type(pickle.loads(s)) == obj.__class__
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert 0 in core
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert 0 in core
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert 0 not in core
def test_weighted_dbscan():
# ensure sample_weight is validated
with pytest.raises(ValueError):
dbscan([[0], [1]], sample_weight=[2])
with pytest.raises(ValueError):
dbscan([[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert len(label1) == len(X)
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
@pytest.mark.parametrize('algorithm', ['brute', 'kd_tree', 'ball_tree'])
def test_dbscan_core_samples_toy(algorithm):
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, np.full(n_samples, -1.))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.eye(10)
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert len(set(labels)) == 1
X = np.zeros((10, 10))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert len(set(labels)) == 1
def test_dbscan_precomputed_metric_with_initial_rows_zero():
# sample matrix with initial two row all zero
ar = np.array([
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0],
[0.0, 0.0, 0.1, 0.1, 0.0, 0.0, 0.3],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1],
[0.0, 0.0, 0.0, 0.0, 0.3, 0.1, 0.0]
])
matrix = sparse.csr_matrix(ar)
labels = DBSCAN(eps=0.2, metric='precomputed',
min_samples=2).fit(matrix).labels_
assert_array_equal(labels, [-1, -1, 0, 0, 0, 1, 1])
|
|
import json
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group, Permission
from django.core.files.uploadedfile import SimpleUploadedFile
from django.template.defaultfilters import filesizeformat
from django.test import TestCase, override_settings
from django.urls import reverse
from django.utils.http import RFC3986_SUBDELIMS, urlquote
from wagtail.core.models import Collection, GroupCollectionPermission
from wagtail.images.views.serve import generate_signature
from wagtail.tests.testapp.models import CustomImage
from wagtail.tests.utils import WagtailTestUtils
from .utils import Image, get_test_image_file
# Get the chars that Django considers safe to leave unescaped in a URL
urlquote_safechars = RFC3986_SUBDELIMS + str('/~:@')
class TestImageIndexView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailimages:index'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/index.html')
self.assertContains(response, "Add an image")
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
def test_pagination_preserves_other_params(self):
root_collection = Collection.get_first_root_node()
evil_plans_collection = root_collection.add_child(name="Evil plans")
for i in range(1, 50):
self.image = Image.objects.create(
title="Test image %i" % i,
file=get_test_image_file(size=(1, 1)),
collection=evil_plans_collection
)
response = self.get({'collection_id': evil_plans_collection.id, 'p': 2})
self.assertEqual(response.status_code, 200)
response_body = response.content.decode('utf8')
# prev link should exist and include collection_id
self.assertTrue(
("?p=1&collection_id=%i" % evil_plans_collection.id) in response_body
or ("?collection_id=%i&p=1" % evil_plans_collection.id) in response_body
)
# next link should exist and include collection_id
self.assertTrue(
("?p=3&collection_id=%i" % evil_plans_collection.id) in response_body
or ("?collection_id=%i&p=3" % evil_plans_collection.id) in response_body
)
def test_ordering(self):
orderings = ['title', '-created_at']
for ordering in orderings:
response = self.get({'ordering': ordering})
self.assertEqual(response.status_code, 200)
def test_collection_order(self):
root_collection = Collection.get_first_root_node()
root_collection.add_child(name="Evil plans")
root_collection.add_child(name="Good plans")
response = self.get()
self.assertEqual(
[collection.name for collection in response.context['collections']],
['Root', 'Evil plans', 'Good plans'])
def test_tags(self):
image_two_tags = Image.objects.create(
title="Test image with two tags",
file=get_test_image_file(),
)
image_two_tags.tags.add("one", "two")
response = self.get()
self.assertEqual(response.status_code, 200)
current_tag = response.context['current_tag']
self.assertIsNone(current_tag)
tags = response.context['popular_tags']
self.assertTrue(
[tag.name for tag in tags] == ["one", "two"]
or [tag.name for tag in tags] == ["two", "one"]
)
def test_tag_filtering(self):
Image.objects.create(
title="Test image with no tags",
file=get_test_image_file(),
)
image_one_tag = Image.objects.create(
title="Test image with one tag",
file=get_test_image_file(),
)
image_one_tag.tags.add("one")
image_two_tags = Image.objects.create(
title="Test image with two tags",
file=get_test_image_file(),
)
image_two_tags.tags.add("one", "two")
# no filtering
response = self.get()
self.assertEqual(response.context['images'].paginator.count, 3)
# filter all images with tag 'one'
response = self.get({'tag': 'one'})
self.assertEqual(response.context['images'].paginator.count, 2)
# filter all images with tag 'two'
response = self.get({'tag': 'two'})
self.assertEqual(response.context['images'].paginator.count, 1)
def test_tag_filtering_preserves_other_params(self):
for i in range(1, 100):
image = Image.objects.create(
title="Test image %i" % i,
file=get_test_image_file(size=(1, 1)),
)
if (i % 2 != 0):
image.tags.add('even')
image.save()
response = self.get({'tag': 'even', 'p': 2})
self.assertEqual(response.status_code, 200)
response_body = response.content.decode('utf8')
# prev link should exist and include tag
self.assertTrue(
"?p=2&tag=even" in response_body
or "?tag=even&p=1" in response_body
)
# next link should exist and include tag
self.assertTrue(
"?p=3&tag=even" in response_body
or "?tag=even&p=3" in response_body
)
class TestImageAddView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailimages:add'), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailimages:add'), post_data)
def test_get(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/add.html')
# as standard, only the root collection exists and so no 'Collection' option
# is displayed on the form
self.assertNotContains(response, '<label for="id_collection">')
# Ensure the form supports file uploads
self.assertContains(response, 'enctype="multipart/form-data"')
# draftail should NOT be a standard JS include on this page
self.assertNotContains(response, 'wagtailadmin/js/draftail.js')
def test_get_with_collections(self):
root_collection = Collection.get_first_root_node()
root_collection.add_child(name="Evil plans")
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/add.html')
self.assertContains(response, '<label for="id_collection">')
self.assertContains(response, "Evil plans")
@override_settings(WAGTAILIMAGES_IMAGE_MODEL='tests.CustomImage')
def test_get_with_custom_image_model(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/add.html')
# Ensure the form supports file uploads
self.assertContains(response, 'enctype="multipart/form-data"')
# custom fields should be included
self.assertContains(response, 'name="fancy_caption"')
# form media should be imported
self.assertContains(response, 'wagtailadmin/js/draftail.js')
def test_add(self):
response = self.post({
'title': "Test image",
'file': SimpleUploadedFile('test.png', get_test_image_file().file.getvalue()),
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailimages:index'))
# Check that the image was created
images = Image.objects.filter(title="Test image")
self.assertEqual(images.count(), 1)
# Test that size was populated correctly
image = images.first()
self.assertEqual(image.width, 640)
self.assertEqual(image.height, 480)
# Test that the file_size/hash fields were set
self.assertTrue(image.file_size)
self.assertTrue(image.file_hash)
# Test that it was placed in the root collection
root_collection = Collection.get_first_root_node()
self.assertEqual(image.collection, root_collection)
@override_settings(DEFAULT_FILE_STORAGE='wagtail.tests.dummy_external_storage.DummyExternalStorage')
def test_add_with_external_file_storage(self):
response = self.post({
'title': "Test image",
'file': SimpleUploadedFile('test.png', get_test_image_file().file.getvalue()),
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailimages:index'))
# Check that the image was created
self.assertTrue(Image.objects.filter(title="Test image").exists())
def test_add_no_file_selected(self):
response = self.post({
'title': "Test image",
})
# Shouldn't redirect anywhere
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/add.html')
# The form should have an error
self.assertFormError(response, 'form', 'file', "This field is required.")
@override_settings(WAGTAILIMAGES_MAX_UPLOAD_SIZE=1)
def test_add_too_large_file(self):
file_content = get_test_image_file().file.getvalue()
response = self.post({
'title': "Test image",
'file': SimpleUploadedFile('test.png', file_content),
})
# Shouldn't redirect anywhere
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/add.html')
# The form should have an error
self.assertFormError(
response, 'form', 'file',
"This file is too big ({file_size}). Maximum filesize {max_file_size}.".format(
file_size=filesizeformat(len(file_content)),
max_file_size=filesizeformat(1),
)
)
@override_settings(WAGTAILIMAGES_MAX_IMAGE_PIXELS=1)
def test_add_too_many_pixels(self):
file_content = get_test_image_file().file.getvalue()
response = self.post({
'title': "Test image",
'file': SimpleUploadedFile('test.png', file_content),
})
# Shouldn't redirect anywhere
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/add.html')
# The form should have an error
self.assertFormError(
response, 'form', 'file',
'This file has too many pixels (307200). Maximum pixels 1.'
)
def test_add_with_collections(self):
root_collection = Collection.get_first_root_node()
evil_plans_collection = root_collection.add_child(name="Evil plans")
response = self.post({
'title': "Test image",
'file': SimpleUploadedFile('test.png', get_test_image_file().file.getvalue()),
'collection': evil_plans_collection.id,
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailimages:index'))
# Check that the image was created
images = Image.objects.filter(title="Test image")
self.assertEqual(images.count(), 1)
# Test that it was placed in the Evil Plans collection
image = images.first()
self.assertEqual(image.collection, evil_plans_collection)
class TestImageAddViewWithLimitedCollectionPermissions(TestCase, WagtailTestUtils):
def setUp(self):
add_image_permission = Permission.objects.get(
content_type__app_label='wagtailimages', codename='add_image'
)
admin_permission = Permission.objects.get(
content_type__app_label='wagtailadmin', codename='access_admin'
)
root_collection = Collection.get_first_root_node()
self.evil_plans_collection = root_collection.add_child(name="Evil plans")
conspirators_group = Group.objects.create(name="Evil conspirators")
conspirators_group.permissions.add(admin_permission)
GroupCollectionPermission.objects.create(
group=conspirators_group,
collection=self.evil_plans_collection,
permission=add_image_permission
)
user = get_user_model().objects.create_user(
username='moriarty',
email='moriarty@example.com',
password='password'
)
user.groups.add(conspirators_group)
self.client.login(username='moriarty', password='password')
def get(self, params={}):
return self.client.get(reverse('wagtailimages:add'), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailimages:add'), post_data)
def test_get(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/add.html')
# user only has access to one collection, so no 'Collection' option
# is displayed on the form
self.assertNotContains(response, '<label for="id_collection">')
def test_add(self):
response = self.post({
'title': "Test image",
'file': SimpleUploadedFile('test.png', get_test_image_file().file.getvalue()),
})
# User should be redirected back to the index
self.assertRedirects(response, reverse('wagtailimages:index'))
# Image should be created in the 'evil plans' collection,
# despite there being no collection field in the form, because that's the
# only one the user has access to
self.assertTrue(Image.objects.filter(title="Test image").exists())
self.assertEqual(
Image.objects.get(title="Test image").collection,
self.evil_plans_collection
)
class TestImageEditView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
# Create an image to edit
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
self.storage = self.image.file.storage
def update_from_db(self):
self.image = Image.objects.get(pk=self.image.pk)
def get(self, params={}):
return self.client.get(reverse('wagtailimages:edit', args=(self.image.id,)), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailimages:edit', args=(self.image.id,)), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/edit.html')
# Ensure the form supports file uploads
self.assertContains(response, 'enctype="multipart/form-data"')
# draftail should NOT be a standard JS include on this page
# (see TestImageEditViewWithCustomImageModel - this confirms that form media
# definitions are being respected)
self.assertNotContains(response, 'wagtailadmin/js/draftail.js')
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_with_usage_count(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/edit.html')
self.assertContains(response, "Used 0 times")
expected_url = '/admin/images/usage/%d/' % self.image.id
self.assertContains(response, expected_url)
@override_settings(DEFAULT_FILE_STORAGE='wagtail.tests.dummy_external_storage.DummyExternalStorage')
def test_simple_with_external_storage(self):
# The view calls get_file_size on the image that closes the file if
# file_size wasn't prevously populated.
# The view then attempts to reopen the file when rendering the template
# which caused crashes when certian storage backends were in use.
# See #1397
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/edit.html')
def test_edit(self):
response = self.post({
'title': "Edited",
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailimages:index'))
self.update_from_db()
self.assertEqual(self.image.title, "Edited")
def test_edit_with_new_image_file(self):
file_content = get_test_image_file().file.getvalue()
# Change the file size/hash of the image
self.image.file_size = 100000
self.image.file_hash = 'abcedf'
self.image.save()
response = self.post({
'title': "Edited",
'file': SimpleUploadedFile('new.png', file_content),
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailimages:index'))
self.update_from_db()
self.assertNotEqual(self.image.file_size, 100000)
self.assertNotEqual(self.image.file_hash, 'abcedf')
@override_settings(DEFAULT_FILE_STORAGE='wagtail.tests.dummy_external_storage.DummyExternalStorage')
def test_edit_with_new_image_file_and_external_storage(self):
file_content = get_test_image_file().file.getvalue()
# Change the file size/hash of the image
self.image.file_size = 100000
self.image.file_hash = 'abcedf'
self.image.save()
response = self.post({
'title': "Edited",
'file': SimpleUploadedFile('new.png', file_content),
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailimages:index'))
self.update_from_db()
self.assertNotEqual(self.image.file_size, 100000)
self.assertNotEqual(self.image.file_hash, 'abcedf')
def test_with_missing_image_file(self):
self.image.file.delete(False)
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/edit.html')
def check_get_missing_file_displays_warning(self):
# Need to recreate image to use a custom storage per test.
image = Image.objects.create(title="Test image", file=get_test_image_file())
image.file.storage.delete(image.file.name)
response = self.client.get(reverse('wagtailimages:edit', args=(image.pk,)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/edit.html')
self.assertContains(response, "File not found")
def test_get_missing_file_displays_warning_with_default_storage(self):
self.check_get_missing_file_displays_warning()
@override_settings(DEFAULT_FILE_STORAGE='wagtail.tests.dummy_external_storage.DummyExternalStorage')
def test_get_missing_file_displays_warning_with_custom_storage(self):
self.check_get_missing_file_displays_warning()
def get_content(self, f=None):
if f is None:
f = self.image.file
try:
if f.closed:
f.open('rb')
return f.read()
finally:
f.close()
def test_reupload_same_name(self):
"""
Checks that reuploading the image file with the same file name
changes the file name, to avoid browser cache issues (see #3817).
"""
old_file = self.image.file
old_size = self.image.file_size
old_data = self.get_content()
old_rendition = self.image.get_rendition('fill-5x5')
old_rendition_data = self.get_content(old_rendition.file)
new_name = self.image.filename
new_file = SimpleUploadedFile(
new_name, get_test_image_file(colour='red').file.getvalue())
new_size = new_file.size
response = self.post({
'title': self.image.title, 'file': new_file,
})
self.assertRedirects(response, reverse('wagtailimages:index'))
self.update_from_db()
self.assertFalse(self.storage.exists(old_file.name))
self.assertTrue(self.storage.exists(self.image.file.name))
self.assertNotEqual(self.image.file.name,
'original_images/' + new_name)
self.assertNotEqual(self.image.file_size, old_size)
self.assertEqual(self.image.file_size, new_size)
self.assertNotEqual(self.get_content(), old_data)
new_rendition = self.image.get_rendition('fill-5x5')
self.assertNotEqual(old_rendition.file.name, new_rendition.file.name)
self.assertNotEqual(self.get_content(new_rendition.file),
old_rendition_data)
def test_reupload_different_name(self):
"""
Checks that reuploading the image file with a different file name
correctly uses the new file name.
"""
old_file = self.image.file
old_size = self.image.file_size
old_data = self.get_content()
old_rendition = self.image.get_rendition('fill-5x5')
old_rendition_data = self.get_content(old_rendition.file)
new_name = 'test_reupload_different_name.png'
new_file = SimpleUploadedFile(
new_name, get_test_image_file(colour='red').file.getvalue())
new_size = new_file.size
response = self.post({
'title': self.image.title, 'file': new_file,
})
self.assertRedirects(response, reverse('wagtailimages:index'))
self.update_from_db()
self.assertFalse(self.storage.exists(old_file.name))
self.assertTrue(self.storage.exists(self.image.file.name))
self.assertEqual(self.image.file.name,
'original_images/' + new_name)
self.assertNotEqual(self.image.file_size, old_size)
self.assertEqual(self.image.file_size, new_size)
self.assertNotEqual(self.get_content(), old_data)
new_rendition = self.image.get_rendition('fill-5x5')
self.assertNotEqual(old_rendition.file.name, new_rendition.file.name)
self.assertNotEqual(self.get_content(new_rendition.file),
old_rendition_data)
@override_settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True)
def test_no_thousand_separators_in_focal_point_editor(self):
large_image = Image.objects.create(
title="Test image",
file=get_test_image_file(size=(1024, 768)),
)
response = self.client.get(reverse('wagtailimages:edit', args=(large_image.id,)))
self.assertContains(response, 'data-original-width="1024"')
@override_settings(WAGTAILIMAGES_IMAGE_MODEL='tests.CustomImage')
class TestImageEditViewWithCustomImageModel(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
# Create an image to edit
self.image = CustomImage.objects.create(
title="Test image",
file=get_test_image_file(),
)
self.storage = self.image.file.storage
def get(self, params={}):
return self.client.get(reverse('wagtailimages:edit', args=(self.image.id,)), params)
def test_get_with_custom_image_model(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/edit.html')
# Ensure the form supports file uploads
self.assertContains(response, 'enctype="multipart/form-data"')
# form media should be imported
self.assertContains(response, 'wagtailadmin/js/draftail.js')
class TestImageDeleteView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
# Create an image to edit
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def get(self, params={}):
return self.client.get(reverse('wagtailimages:delete', args=(self.image.id,)), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailimages:delete', args=(self.image.id,)), post_data)
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=False)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/confirm_delete.html')
self.assertNotIn('Used ', str(response.content))
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_usage_link(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/confirm_delete.html')
self.assertContains(response, 'Used 0 times')
expected_url = '/admin/images/usage/%d/' % self.image.id
self.assertContains(response, expected_url)
def test_delete(self):
response = self.post()
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailimages:index'))
# Check that the image was deleted
images = Image.objects.filter(title="Test image")
self.assertEqual(images.count(), 0)
class TestImageChooserView(TestCase, WagtailTestUtils):
def setUp(self):
self.user = self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailimages:chooser'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content.decode())
self.assertEqual(response_json['step'], 'chooser')
self.assertTemplateUsed(response, 'wagtailimages/chooser/chooser.html')
# draftail should NOT be a standard JS include on this page
self.assertNotIn('wagtailadmin/js/draftail.js', response_json['html'])
@override_settings(WAGTAILIMAGES_IMAGE_MODEL='tests.CustomImage')
def test_with_custom_image_model(self):
response = self.get()
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content.decode())
self.assertEqual(response_json['step'], 'chooser')
self.assertTemplateUsed(response, 'wagtailimages/chooser/chooser.html')
# custom form fields should be present
self.assertIn('name="image-chooser-upload-fancy_caption"', response_json['html'])
# form media imports should appear on the page
self.assertIn('wagtailadmin/js/draftail.js', response_json['html'])
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
def test_filter_by_tag(self):
for i in range(0, 10):
image = Image.objects.create(
title="Test image %d is even better than the last one" % i,
file=get_test_image_file(),
)
if i % 2 == 0:
image.tags.add('even')
response = self.get({'tag': "even"})
self.assertEqual(response.status_code, 200)
# Results should include images tagged 'even'
self.assertContains(response, "Test image 2 is even better")
# Results should not include images that just have 'even' in the title
self.assertNotContains(response, "Test image 3 is even better")
def test_construct_queryset_hook_browse(self):
image = Image.objects.create(
title="Test image shown",
file=get_test_image_file(),
uploaded_by_user=self.user,
)
Image.objects.create(
title="Test image not shown",
file=get_test_image_file(),
)
def filter_images(images, request):
# Filter on `uploaded_by_user` because it is
# the only default FilterField in search_fields
return images.filter(uploaded_by_user=self.user)
with self.register_hook('construct_image_chooser_queryset', filter_images):
response = self.get()
self.assertEqual(len(response.context['images']), 1)
self.assertEqual(response.context['images'][0], image)
def test_construct_queryset_hook_search(self):
image = Image.objects.create(
title="Test image shown",
file=get_test_image_file(),
uploaded_by_user=self.user,
)
Image.objects.create(
title="Test image not shown",
file=get_test_image_file(),
)
def filter_images(images, request):
# Filter on `uploaded_by_user` because it is
# the only default FilterField in search_fields
return images.filter(uploaded_by_user=self.user)
with self.register_hook('construct_image_chooser_queryset', filter_images):
response = self.get({'q': 'Test'})
self.assertEqual(len(response.context['images']), 1)
self.assertEqual(response.context['images'][0], image)
class TestImageChooserChosenView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
# Create an image to edit
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def get(self, params={}):
return self.client.get(reverse('wagtailimages:image_chosen', args=(self.image.id,)), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content.decode())
self.assertEqual(response_json['step'], 'image_chosen')
class TestImageChooserSelectFormatView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
# Create an image to edit
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def get(self, params={}):
return self.client.get(reverse('wagtailimages:chooser_select_format', args=(self.image.id,)), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailimages:chooser_select_format', args=(self.image.id,)), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content.decode())
self.assertEqual(response_json['step'], 'select_format')
self.assertTemplateUsed(response, 'wagtailimages/chooser/select_format.html')
def test_with_edit_params(self):
response = self.get(params={'alt_text': "some previous alt text"})
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'value=\\"some previous alt text\\"')
def test_post_response(self):
response = self.post({'image-chooser-insertion-format': 'left', 'image-chooser-insertion-alt_text': 'Arthur "two sheds" Jackson'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
response_json = json.loads(response.content.decode())
self.assertEqual(response_json['step'], 'image_chosen')
result = response_json['result']
self.assertEqual(result['id'], self.image.id)
self.assertEqual(result['title'], "Test image")
self.assertEqual(result['format'], 'left')
self.assertEqual(result['alt'], 'Arthur "two sheds" Jackson')
self.assertIn('alt="Arthur "two sheds" Jackson"', result['html'])
class TestImageChooserUploadView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailimages:chooser_upload'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/chooser/chooser.html')
response_json = json.loads(response.content.decode())
self.assertEqual(response_json['step'], 'chooser')
def test_upload(self):
response = self.client.post(reverse('wagtailimages:chooser_upload'), {
'image-chooser-upload-title': "Test image",
'image-chooser-upload-file': SimpleUploadedFile('test.png', get_test_image_file().file.getvalue()),
})
# Check response
self.assertEqual(response.status_code, 200)
# Check that the image was created
images = Image.objects.filter(title="Test image")
self.assertEqual(images.count(), 1)
# Test that size was populated correctly
image = images.first()
self.assertEqual(image.width, 640)
self.assertEqual(image.height, 480)
# Test that the file_size/hash fields were set
self.assertTrue(image.file_size)
self.assertTrue(image.file_hash)
def test_upload_no_file_selected(self):
response = self.client.post(reverse('wagtailimages:chooser_upload'), {
'image-chooser-upload-title': "Test image",
})
# Shouldn't redirect anywhere
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/chooser/chooser.html')
# The form should have an error
self.assertFormError(response, 'uploadform', 'file', "This field is required.")
def test_pagination_after_upload_form_error(self):
for i in range(0, 20):
Image.objects.create(
title="Test image %d" % i,
file=get_test_image_file(),
)
response = self.client.post(reverse('wagtailimages:chooser_upload'), {
'image-chooser-upload-title': "Test image",
})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/chooser/chooser.html')
# The re-rendered image chooser listing should be paginated
self.assertContains(response, "Page 1 of ")
self.assertEqual(12, len(response.context['images']))
def test_select_format_flag_after_upload_form_error(self):
submit_url = reverse('wagtailimages:chooser_upload') + '?select_format=true'
response = self.client.post(submit_url, {
'image-chooser-upload-title': "Test image",
'image-chooser-upload-file': SimpleUploadedFile('not_an_image.txt', b'this is not an image'),
})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/chooser/chooser.html')
self.assertFormError(response, 'uploadform', 'file', "Not a supported image format. Supported formats: GIF, JPEG, PNG, WEBP.")
# the action URL of the re-rendered form should include the select_format=true parameter
# (NB the HTML in the response is embedded in a JS string, so need to escape accordingly)
expected_action_attr = 'action=\\"%s\\"' % submit_url
self.assertContains(response, expected_action_attr)
@override_settings(DEFAULT_FILE_STORAGE='wagtail.tests.dummy_external_storage.DummyExternalStorage')
def test_upload_with_external_storage(self):
response = self.client.post(reverse('wagtailimages:chooser_upload'), {
'image-chooser-upload-title': "Test image",
'image-chooser-upload-file': SimpleUploadedFile('test.png', get_test_image_file().file.getvalue()),
})
# Check response
self.assertEqual(response.status_code, 200)
# Check that the image was created
self.assertTrue(Image.objects.filter(title="Test image").exists())
class TestImageChooserUploadViewWithLimitedPermissions(TestCase, WagtailTestUtils):
def setUp(self):
add_image_permission = Permission.objects.get(
content_type__app_label='wagtailimages', codename='add_image'
)
admin_permission = Permission.objects.get(
content_type__app_label='wagtailadmin', codename='access_admin'
)
root_collection = Collection.get_first_root_node()
self.evil_plans_collection = root_collection.add_child(name="Evil plans")
conspirators_group = Group.objects.create(name="Evil conspirators")
conspirators_group.permissions.add(admin_permission)
GroupCollectionPermission.objects.create(
group=conspirators_group,
collection=self.evil_plans_collection,
permission=add_image_permission
)
user = get_user_model().objects.create_user(
username='moriarty',
email='moriarty@example.com',
password='password'
)
user.groups.add(conspirators_group)
self.client.login(username='moriarty', password='password')
def test_get(self):
response = self.client.get(reverse('wagtailimages:chooser_upload'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/chooser/chooser.html')
# user only has access to one collection, so no 'Collection' option
# is displayed on the form
self.assertNotContains(response, '<label for="id_collection">')
def test_get_chooser(self):
response = self.client.get(reverse('wagtailimages:chooser'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/chooser/chooser.html')
# user only has access to one collection, so no 'Collection' option
# is displayed on the form
self.assertNotContains(response, '<label for="id_collection">')
def test_add(self):
response = self.client.post(reverse('wagtailimages:chooser_upload'), {
'image-chooser-upload-title': "Test image",
'image-chooser-upload-file': SimpleUploadedFile('test.png', get_test_image_file().file.getvalue()),
})
self.assertEqual(response.status_code, 200)
# Check that the image was created
images = Image.objects.filter(title="Test image")
self.assertEqual(images.count(), 1)
# Image should be created in the 'evil plans' collection,
# despite there being no collection field in the form, because that's the
# only one the user has access to
self.assertTrue(Image.objects.filter(title="Test image").exists())
self.assertEqual(
Image.objects.get(title="Test image").collection,
self.evil_plans_collection
)
class TestMultipleImageUploader(TestCase, WagtailTestUtils):
"""
This tests the multiple image upload views located in wagtailimages/views/multiple.py
"""
def setUp(self):
self.login()
# Create an image for running tests on
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def test_add(self):
"""
This tests that the add view responds correctly on a GET request
"""
# Send request
response = self.client.get(reverse('wagtailimages:add_multiple'))
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/multiple/add.html')
# draftail should NOT be a standard JS include on this page
# (see TestMultipleImageUploaderWithCustomImageModel - this confirms that form media
# definitions are being respected)
self.assertNotContains(response, 'wagtailadmin/js/draftail.js')
@override_settings(WAGTAILIMAGES_MAX_UPLOAD_SIZE=1000)
def test_add_max_file_size_context_variables(self):
response = self.client.get(reverse('wagtailimages:add_multiple'))
self.assertEqual(response.context['max_filesize'], 1000)
self.assertEqual(
response.context['error_max_file_size'], "This file is too big. Maximum filesize 1000\xa0bytes."
)
def test_add_post(self):
"""
This tests that a POST request to the add view saves the image and returns an edit form
"""
response = self.client.post(reverse('wagtailimages:add_multiple'), {
'files[]': SimpleUploadedFile('test.png', get_test_image_file().file.getvalue()),
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertTemplateUsed(response, 'wagtailimages/multiple/edit_form.html')
# Check image
self.assertIn('image', response.context)
self.assertEqual(response.context['image'].title, 'test.png')
self.assertTrue(response.context['image'].file_size)
self.assertTrue(response.context['image'].file_hash)
# Check form
self.assertIn('form', response.context)
self.assertEqual(response.context['form'].initial['title'], 'test.png')
# Check JSON
response_json = json.loads(response.content.decode())
self.assertIn('image_id', response_json)
self.assertIn('form', response_json)
self.assertIn('success', response_json)
self.assertEqual(response_json['image_id'], response.context['image'].id)
self.assertTrue(response_json['success'])
def test_add_post_noajax(self):
"""
This tests that only AJAX requests are allowed to POST to the add view
"""
response = self.client.post(reverse('wagtailimages:add_multiple'), {})
# Check response
self.assertEqual(response.status_code, 400)
def test_add_post_nofile(self):
"""
This tests that the add view checks for a file when a user POSTs to it
"""
response = self.client.post(reverse('wagtailimages:add_multiple'), {}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 400)
def test_add_post_badfile(self):
"""
This tests that the add view checks for a file when a user POSTs to it
"""
response = self.client.post(reverse('wagtailimages:add_multiple'), {
'files[]': SimpleUploadedFile('test.png', b"This is not an image!"),
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
# Check JSON
response_json = json.loads(response.content.decode())
self.assertNotIn('image_id', response_json)
self.assertNotIn('form', response_json)
self.assertIn('success', response_json)
self.assertIn('error_message', response_json)
self.assertFalse(response_json['success'])
self.assertEqual(
response_json['error_message'], "Not a supported image format. Supported formats: GIF, JPEG, PNG, WEBP."
)
def test_edit_get(self):
"""
This tests that a GET request to the edit view returns a 405 "METHOD NOT ALLOWED" response
"""
# Send request
response = self.client.get(reverse('wagtailimages:edit_multiple', args=(self.image.id, )))
# Check response
self.assertEqual(response.status_code, 405)
def test_edit_post(self):
"""
This tests that a POST request to the edit view edits the image
"""
# Send request
response = self.client.post(reverse('wagtailimages:edit_multiple', args=(self.image.id, )), {
('image-%d-title' % self.image.id): "New title!",
('image-%d-tags' % self.image.id): "",
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
# Check JSON
response_json = json.loads(response.content.decode())
self.assertIn('image_id', response_json)
self.assertNotIn('form', response_json)
self.assertIn('success', response_json)
self.assertEqual(response_json['image_id'], self.image.id)
self.assertTrue(response_json['success'])
def test_edit_post_noajax(self):
"""
This tests that a POST request to the edit view without AJAX returns a 400 response
"""
# Send request
response = self.client.post(reverse('wagtailimages:edit_multiple', args=(self.image.id, )), {
('image-%d-title' % self.image.id): "New title!",
('image-%d-tags' % self.image.id): "",
})
# Check response
self.assertEqual(response.status_code, 400)
def test_edit_post_validation_error(self):
"""
This tests that a POST request to the edit page returns a json document with "success=False"
and a form with the validation error indicated
"""
# Send request
response = self.client.post(reverse('wagtailimages:edit_multiple', args=(self.image.id, )), {
('image-%d-title' % self.image.id): "", # Required
('image-%d-tags' % self.image.id): "",
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertTemplateUsed(response, 'wagtailimages/multiple/edit_form.html')
# Check that a form error was raised
self.assertFormError(response, 'form', 'title', "This field is required.")
# Check JSON
response_json = json.loads(response.content.decode())
self.assertIn('image_id', response_json)
self.assertIn('form', response_json)
self.assertIn('success', response_json)
self.assertEqual(response_json['image_id'], self.image.id)
self.assertFalse(response_json['success'])
def test_delete_get(self):
"""
This tests that a GET request to the delete view returns a 405 "METHOD NOT ALLOWED" response
"""
# Send request
response = self.client.get(reverse('wagtailimages:delete_multiple', args=(self.image.id, )))
# Check response
self.assertEqual(response.status_code, 405)
def test_delete_post(self):
"""
This tests that a POST request to the delete view deletes the image
"""
# Send request
response = self.client.post(reverse(
'wagtailimages:delete_multiple', args=(self.image.id, )
), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
# Make sure the image is deleted
self.assertFalse(Image.objects.filter(id=self.image.id).exists())
# Check JSON
response_json = json.loads(response.content.decode())
self.assertIn('image_id', response_json)
self.assertIn('success', response_json)
self.assertEqual(response_json['image_id'], self.image.id)
self.assertTrue(response_json['success'])
def test_delete_post_noajax(self):
"""
This tests that a POST request to the delete view without AJAX returns a 400 response
"""
# Send request
response = self.client.post(reverse('wagtailimages:delete_multiple', args=(self.image.id, )))
# Check response
self.assertEqual(response.status_code, 400)
@override_settings(WAGTAILIMAGES_IMAGE_MODEL='tests.CustomImage')
class TestMultipleImageUploaderWithCustomImageModel(TestCase, WagtailTestUtils):
"""
This tests the multiple image upload views located in wagtailimages/views/multiple.py
with a custom image model
"""
def setUp(self):
self.login()
# Create an image for running tests on
self.image = CustomImage.objects.create(
title="Test image",
file=get_test_image_file(),
)
def test_add(self):
"""
This tests that the add view responds correctly on a GET request
"""
# Send request
response = self.client.get(reverse('wagtailimages:add_multiple'))
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/multiple/add.html')
# response should include form media for the image edit form
self.assertContains(response, 'wagtailadmin/js/draftail.js')
def test_add_post(self):
"""
This tests that a POST request to the add view saves the image and returns an edit form
"""
response = self.client.post(reverse('wagtailimages:add_multiple'), {
'files[]': SimpleUploadedFile('test.png', get_test_image_file().file.getvalue()),
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertTemplateUsed(response, 'wagtailimages/multiple/edit_form.html')
# Check image
self.assertIn('image', response.context)
self.assertEqual(response.context['image'].title, 'test.png')
self.assertTrue(response.context['image'].file_size)
self.assertTrue(response.context['image'].file_hash)
# Check form
self.assertIn('form', response.context)
self.assertEqual(response.context['form'].initial['title'], 'test.png')
self.assertIn('caption', response.context['form'].fields)
self.assertNotIn('not_editable_field', response.context['form'].fields)
# Check JSON
response_json = json.loads(response.content.decode())
self.assertIn('image_id', response_json)
self.assertIn('form', response_json)
self.assertIn('success', response_json)
self.assertEqual(response_json['image_id'], response.context['image'].id)
self.assertTrue(response_json['success'])
def test_edit_post(self):
"""
This tests that a POST request to the edit view edits the image
"""
# Send request
response = self.client.post(reverse('wagtailimages:edit_multiple', args=(self.image.id, )), {
('image-%d-title' % self.image.id): "New title!",
('image-%d-tags' % self.image.id): "",
('image-%d-caption' % self.image.id): "a boot stamping on a human face, forever",
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
# Check JSON
response_json = json.loads(response.content.decode())
self.assertIn('image_id', response_json)
self.assertNotIn('form', response_json)
self.assertIn('success', response_json)
self.assertEqual(response_json['image_id'], self.image.id)
self.assertTrue(response_json['success'])
# check that image has been updated
new_image = CustomImage.objects.get(id=self.image.id)
self.assertEqual(new_image.title, "New title!")
self.assertEqual(new_image.caption, "a boot stamping on a human face, forever")
def test_delete_post(self):
"""
This tests that a POST request to the delete view deletes the image
"""
# Send request
response = self.client.post(reverse(
'wagtailimages:delete_multiple', args=(self.image.id, )
), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
# Make sure the image is deleted
self.assertFalse(Image.objects.filter(id=self.image.id).exists())
# Check JSON
response_json = json.loads(response.content.decode())
self.assertIn('image_id', response_json)
self.assertIn('success', response_json)
self.assertEqual(response_json['image_id'], self.image.id)
self.assertTrue(response_json['success'])
# check that image has been deleted
self.assertEqual(CustomImage.objects.filter(id=self.image.id).count(), 0)
class TestURLGeneratorView(TestCase, WagtailTestUtils):
def setUp(self):
# Create an image for running tests on
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
# Login
self.user = self.login()
def test_get(self):
"""
This tests that the view responds correctly for a user with edit permissions on this image
"""
# Get
response = self.client.get(reverse('wagtailimages:url_generator', args=(self.image.id, )))
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/url_generator.html')
def test_get_bad_permissions(self):
"""
This tests that the view returns a "permission denied" redirect if a user without correct
permissions attemts to access it
"""
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Get
response = self.client.get(reverse('wagtailimages:url_generator', args=(self.image.id, )))
# Check response
self.assertRedirects(response, reverse('wagtailadmin_home'))
class TestGenerateURLView(TestCase, WagtailTestUtils):
def setUp(self):
# Create an image for running tests on
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
# Login
self.user = self.login()
def test_get(self):
"""
This tests that the view responds correctly for a user with edit permissions on this image
"""
# Get
response = self.client.get(reverse('wagtailimages:generate_url', args=(self.image.id, 'fill-800x600')))
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
# Check JSON
content_json = json.loads(response.content.decode())
self.assertEqual(set(content_json.keys()), set(['url', 'preview_url']))
expected_url = 'http://localhost/images/%(signature)s/%(image_id)d/fill-800x600/' % {
'signature': urlquote(generate_signature(self.image.id, 'fill-800x600'), safe=urlquote_safechars),
'image_id': self.image.id,
}
self.assertEqual(content_json['url'], expected_url)
expected_preview_url = reverse('wagtailimages:preview', args=(self.image.id, 'fill-800x600'))
self.assertEqual(content_json['preview_url'], expected_preview_url)
def test_get_bad_permissions(self):
"""
This tests that the view gives a 403 if a user without correct permissions attemts to access it
"""
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Get
response = self.client.get(reverse('wagtailimages:generate_url', args=(self.image.id, 'fill-800x600')))
# Check response
self.assertEqual(response.status_code, 403)
self.assertEqual(response['Content-Type'], 'application/json')
# Check JSON
self.assertJSONEqual(response.content.decode(), json.dumps({
'error': 'You do not have permission to generate a URL for this image.',
}))
def test_get_bad_image(self):
"""
This tests that the view gives a 404 response if a user attempts to use it with an image which doesn't exist
"""
# Get
response = self.client.get(reverse('wagtailimages:generate_url', args=(self.image.id + 1, 'fill-800x600')))
# Check response
self.assertEqual(response.status_code, 404)
self.assertEqual(response['Content-Type'], 'application/json')
# Check JSON
self.assertJSONEqual(response.content.decode(), json.dumps({
'error': 'Cannot find image.',
}))
def test_get_bad_filter_spec(self):
"""
This tests that the view gives a 400 response if the user attempts to use it with an invalid filter spec
"""
# Get
response = self.client.get(reverse('wagtailimages:generate_url', args=(self.image.id, 'bad-filter-spec')))
# Check response
self.assertEqual(response.status_code, 400)
self.assertEqual(response['Content-Type'], 'application/json')
# Check JSON
self.assertJSONEqual(response.content.decode(), json.dumps({
'error': 'Invalid filter spec.',
}))
class TestPreviewView(TestCase, WagtailTestUtils):
def setUp(self):
# Create an image for running tests on
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
# Login
self.user = self.login()
def test_get(self):
"""
Test a valid GET request to the view
"""
# Get the image
response = self.client.get(reverse('wagtailimages:preview', args=(self.image.id, 'fill-800x600')))
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'image/png')
def test_get_invalid_filter_spec(self):
"""
Test that an invalid filter spec returns a 400 response
This is very unlikely to happen in reality. A user would have
to create signature for the invalid filter spec which can't be
done with Wagtails built in URL generator. We should test it
anyway though.
"""
# Get the image
response = self.client.get(reverse('wagtailimages:preview', args=(self.image.id, 'bad-filter-spec')))
# Check response
self.assertEqual(response.status_code, 400)
class TestEditOnlyPermissions(TestCase, WagtailTestUtils):
def setUp(self):
# Create an image to edit
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
# Create a user with change_image permission but not add_image
user = get_user_model().objects.create_user(
username='changeonly', email='changeonly@example.com', password='password'
)
change_permission = Permission.objects.get(content_type__app_label='wagtailimages', codename='change_image')
admin_permission = Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
image_changers_group = Group.objects.create(name='Image changers')
image_changers_group.permissions.add(admin_permission)
GroupCollectionPermission.objects.create(
group=image_changers_group,
collection=Collection.get_first_root_node(),
permission=change_permission
)
user.groups.add(image_changers_group)
self.assertTrue(self.client.login(username='changeonly', password='password'))
def test_get_index(self):
response = self.client.get(reverse('wagtailimages:index'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/index.html')
# user should not get an "Add an image" button
self.assertNotContains(response, "Add an image")
# user should be able to see images not owned by them
self.assertContains(response, "Test image")
def test_search(self):
response = self.client.get(reverse('wagtailimages:index'), {'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_get_add(self):
response = self.client.get(reverse('wagtailimages:add'))
# permission should be denied
self.assertRedirects(response, reverse('wagtailadmin_home'))
def test_get_edit(self):
response = self.client.get(reverse('wagtailimages:edit', args=(self.image.id,)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/edit.html')
def test_get_delete(self):
response = self.client.get(reverse('wagtailimages:delete', args=(self.image.id,)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/confirm_delete.html')
def test_get_add_multiple(self):
response = self.client.get(reverse('wagtailimages:add_multiple'))
# permission should be denied
self.assertRedirects(response, reverse('wagtailadmin_home'))
class TestImageAddMultipleView(TestCase, WagtailTestUtils):
def test_as_superuser(self):
self.login()
response = self.client.get(reverse('wagtailimages:add_multiple'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/multiple/add.html')
def test_as_ordinary_editor(self):
user = get_user_model().objects.create_user(username='editor', email='editor@email.com', password='password')
add_permission = Permission.objects.get(content_type__app_label='wagtailimages', codename='add_image')
admin_permission = Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
image_adders_group = Group.objects.create(name='Image adders')
image_adders_group.permissions.add(admin_permission)
GroupCollectionPermission.objects.create(group=image_adders_group, collection=Collection.get_first_root_node(), permission=add_permission)
user.groups.add(image_adders_group)
self.client.login(username='editor', password='password')
response = self.client.get(reverse('wagtailimages:add_multiple'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/multiple/add.html')
|
|
#!/usr/bin/env python3
"""Check the MANIFEST.in file in a Python source package for completeness.
This script works by building a source distribution archive (by running
setup.py sdist), then checking the file list in the archive against the
file list in version control (Subversion, Git, Mercurial, Bazaar are
supported).
Since the first check can fail to catch missing MANIFEST.in entries when
you've got the right setuptools version control system support plugins
installed, the script copies all the versioned files into a temporary
directory and builds the source distribution again. This also avoids issues
with stale egg-info/SOURCES.txt files that may cause files not mentioned in
MANIFEST.in to be included nevertheless.
"""
import argparse
import codecs
import configparser
import fnmatch
import locale
import os
import posixpath
import re
import shutil
import stat
import subprocess
import sys
import tarfile
import tempfile
import unicodedata
import zipfile
from contextlib import contextmanager
from typing import List, Optional, Union
from xml.etree import ElementTree as ET
import toml
from setuptools.command.egg_info import translate_pattern
# import distutils after setuptools to avoid a warning
from distutils.text_file import TextFile # isort:skip
__version__ = '0.48.dev0'
__author__ = 'Marius Gedminas <marius@gedmin.as>'
__licence__ = 'MIT'
__url__ = 'https://github.com/mgedmin/check-manifest'
class Failure(Exception):
"""An expected failure (as opposed to a bug in this script)."""
#
# User interface
#
class UI:
def __init__(self, verbosity=1):
self.verbosity = verbosity
self._to_be_continued = False
self.stdout = sys.stdout
self.stderr = sys.stderr
@property
def quiet(self):
return self.verbosity < 1
@property
def verbose(self):
return self.verbosity >= 2
def _check_tbc(self):
if self._to_be_continued:
print(file=self.stdout)
self._to_be_continued = False
def info(self, message):
if self.quiet:
return
self._check_tbc()
print(message, file=self.stdout)
def info_begin(self, message):
if not self.verbose:
return
self._check_tbc()
print(message, end="", file=self.stdout)
self._to_be_continued = True
def info_continue(self, message):
if not self.verbose:
return
print(message, end="", file=self.stdout)
self._to_be_continued = True
def info_end(self, message):
if not self.verbose:
return
print(message, file=self.stdout)
self._to_be_continued = False
def error(self, message):
self._check_tbc()
print(message, file=self.stderr)
def warning(self, message):
self._check_tbc()
print(message, file=self.stderr)
def format_list(list_of_strings):
return "\n".join(" " + s for s in list_of_strings)
def format_missing(missing_from_a, missing_from_b, name_a, name_b):
res = []
if missing_from_a:
res.append("missing from %s:\n%s"
% (name_a, format_list(sorted(missing_from_a))))
if missing_from_b:
res.append("missing from %s:\n%s"
% (name_b, format_list(sorted(missing_from_b))))
return '\n'.join(res)
#
# Filesystem/OS utilities
#
class CommandFailed(Failure):
def __init__(self, command: List[str], status: int, output: str) -> None:
super().__init__("%s failed (status %s):\n%s" % (
command, status, output))
def run(
command: List[str],
*,
encoding: Optional[str] = None,
decode: bool = True,
cwd: Optional[str] = None # Python 3.5 forbids trailing comma here!
) -> Union[str, bytes]:
"""Run a command [cmd, arg1, arg2, ...].
Returns the output (stdout only).
Raises CommandFailed in cases of error.
"""
if not encoding:
encoding = locale.getpreferredencoding()
try:
pipe = subprocess.Popen(command, stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=cwd)
except OSError as e:
raise Failure(f"could not run {command}: {e}")
output, stderr = pipe.communicate()
status = pipe.wait()
if status != 0:
raise CommandFailed(command, status,
(output + stderr).decode(encoding, 'replace'))
if decode:
return output.decode(encoding)
return output
@contextmanager
def cd(directory):
"""Change the current working directory, temporarily.
Use as a context manager: with cd(d): ...
"""
old_dir = os.getcwd()
try:
os.chdir(directory)
yield
finally:
os.chdir(old_dir)
@contextmanager
def mkdtemp(hint=''):
"""Create a temporary directory, then clean it up.
Use as a context manager: with mkdtemp('-purpose'): ...
"""
dirname = tempfile.mkdtemp(prefix='check-manifest-', suffix=hint)
try:
yield dirname
finally:
rmtree(dirname)
def chmod_plus(path, add_bits):
"""Change a file's mode by adding a few bits.
Like chmod +<bits> <path> in a Unix shell.
"""
try:
os.chmod(path, stat.S_IMODE(os.stat(path).st_mode) | add_bits)
except OSError: # pragma: nocover
pass # well, we tried
def rmtree(path):
"""A version of rmtree that can deal with read-only files and directories.
Needed because the stock shutil.rmtree() fails with an access error
when there are read-only files in the directory on Windows, or when the
directory itself is read-only on Unix.
"""
def onerror(func, path, exc_info):
# Did you know what on Python 3.3 on Windows os.remove() and
# os.unlink() are distinct functions?
if func is os.remove or func is os.unlink or func is os.rmdir:
if sys.platform != 'win32':
chmod_plus(os.path.dirname(path), stat.S_IWUSR | stat.S_IXUSR)
chmod_plus(path, stat.S_IWUSR)
func(path)
else:
raise
shutil.rmtree(path, onerror=onerror)
def copy_files(filelist, destdir):
"""Copy a list of files to destdir, preserving directory structure.
File names should be relative to the current working directory.
"""
for filename in filelist:
destfile = os.path.join(destdir, filename)
# filename should not be absolute, but let's double-check
assert destfile.startswith(destdir + os.path.sep)
destfiledir = os.path.dirname(destfile)
if not os.path.isdir(destfiledir):
os.makedirs(destfiledir)
if os.path.isdir(filename):
os.mkdir(destfile)
else:
shutil.copy2(filename, destfile)
def get_one_file_in(dirname):
"""Return the pathname of the one file in a directory.
Raises if the directory has no files or more than one file.
"""
files = os.listdir(dirname)
if len(files) > 1:
raise Failure('More than one file exists in %s:\n%s' %
(dirname, '\n'.join(sorted(files))))
elif not files:
raise Failure('No files found in %s' % dirname)
return os.path.join(dirname, files[0])
#
# File lists are a fundamental data structure here. We want them to have
# the following properties:
#
# - contain Unicode filenames (normalized to NFC on OS X)
# - be sorted
# - use / as the directory separator
# - list only files, but not directories
#
# We get these file lists from various sources (zip files, tar files, version
# control systems) and we have to normalize them into our common format before
# comparing.
#
def canonical_file_list(filelist):
"""Return the file list convered to a canonical form.
This means:
- converted to Unicode normal form C, when running on Mac OS X
- sorted alphabetically
- use / as the directory separator
- list files but not directories
Caveat: since it works on file lists taken from archives and such, it
doesn't know whether a particular filename refers to a file or a directory,
unless it finds annother filename that is inside the first one. In other
words, canonical_file_list() will not remove the names of empty directories
if those appear in the initial file list.
"""
names = set(normalize_names(filelist))
for name in list(names):
while name:
name = posixpath.dirname(name)
names.discard(name)
return sorted(names)
def get_sdist_file_list(sdist_filename, ignore):
"""Return the list of interesting files in a source distribution.
Removes extra generated files like PKG-INFO and *.egg-info that are usually
present only in the sdist, but not in the VCS.
Supports .tar.gz and .zip sdists.
"""
return strip_sdist_extras(
ignore,
strip_toplevel_name(get_archive_file_list(sdist_filename)))
def get_archive_file_list(archive_filename):
"""Return the list of files in an archive.
Supports .tar.gz and .zip.
"""
if archive_filename.endswith('.zip'):
with zipfile.ZipFile(archive_filename) as zf:
filelist = zf.namelist()
elif archive_filename.endswith(('.tar.gz', '.tar.bz2', '.tar')):
with tarfile.open(archive_filename) as tf:
# XXX: is unicodify() necessary now that Py2 is no longer supported?
filelist = map(unicodify, tf.getnames())
else:
raise Failure('Unrecognized archive type: %s'
% os.path.basename(archive_filename))
return canonical_file_list(filelist)
def unicodify(filename):
"""Make sure filename is Unicode.
Because the tarfile module on Python 2 doesn't return Unicode.
"""
if isinstance(filename, bytes):
# XXX: Ah, but is it right to use the locale encoding here, or should I
# use sys.getfilesystemencoding()? A good question!
return filename.decode(locale.getpreferredencoding())
else:
return filename
def strip_toplevel_name(filelist):
"""Strip toplevel name from a file list.
>>> strip_toplevel_name(['a', 'a/b', 'a/c', 'a/c/d'])
['b', 'c', 'c/d']
>>> strip_toplevel_name(['a', 'a/', 'a/b', 'a/c', 'a/c/d'])
['b', 'c', 'c/d']
>>> strip_toplevel_name(['a/b', 'a/c', 'a/c/d'])
['b', 'c', 'c/d']
"""
if not filelist:
return filelist
prefix = filelist[0]
# so here's a function we assume / is the directory separator
if '/' in prefix:
prefix = prefix.partition('/')[0] + '/'
names = filelist
else:
prefix = prefix + '/'
names = filelist[1:]
for name in names:
if not name.startswith(prefix):
raise Failure("File doesn't have the common prefix (%s): %s"
% (name, prefix))
return [name[len(prefix):] for name in names if name != prefix]
class VCS:
def __init__(self, ui):
self.ui = ui
@classmethod
def detect(cls, location):
return os.path.isdir(os.path.join(location, cls.metadata_name))
def get_versioned_files(self):
raise NotImplementedError('this is an abstract method')
class Git(VCS):
metadata_name = '.git'
# Git for Windows uses UTF-8 instead of the locale encoding.
# Git on POSIX systems uses the locale encoding.
_encoding = 'UTF-8' if sys.platform == 'win32' else None
@classmethod
def detect(cls, location):
# .git can be a file for submodules
return os.path.exists(os.path.join(location, cls.metadata_name))
def _has_submodules(cls):
return os.path.exists(".gitmodules")
def get_versioned_files(self):
"""List all files versioned by git in the current directory."""
extra_args = ["--recurse-submodules"] if self._has_submodules() else []
output = run(
["git", "ls-files", "-z"] + extra_args,
encoding=self._encoding,
)
# -z tells git to use \0 as a line terminator; split() treats it as a
# line separator, so we always get one empty line at the end, which we
# drop with the [:-1] slice
return output.split("\0")[:-1]
class Mercurial(VCS):
metadata_name = '.hg'
def get_versioned_files(self):
"""List all files under Mercurial control in the current directory."""
output = run(['hg', 'status', '-ncamd', '.'])
return output.splitlines()
class Bazaar(VCS):
metadata_name = '.bzr'
@classmethod
def _get_terminal_encoding(self):
# Python 3.6 lets us name the OEM codepage directly, which is lucky
# because it also breaks our old method of OEM codepage detection
# (PEP-528 changed sys.stdout.encoding to UTF-8).
try:
codecs.lookup('oem')
except LookupError:
pass
else: # pragma: nocover
return 'oem'
# Based on bzrlib.osutils.get_terminal_encoding()
encoding = getattr(sys.stdout, 'encoding', None)
if not encoding:
encoding = getattr(sys.stdin, 'encoding', None)
if encoding == 'cp0': # "no codepage"
encoding = None
# NB: bzrlib falls back on bzrlib.osutils.get_user_encoding(),
# which is like locale.getpreferredencoding() on steroids, and
# also includes a fallback from 'ascii' to 'utf-8' when
# sys.platform is 'darwin'. This is probably something we might
# want to do in run(), but I'll wait for somebody to complain
# first, since I don't have a Mac OS X machine and cannot test.
return encoding
def get_versioned_files(self):
"""List all files versioned in Bazaar in the current directory."""
encoding = self._get_terminal_encoding()
output = run(['bzr', 'ls', '-VR'], encoding=encoding)
return output.splitlines()
class Subversion(VCS):
metadata_name = '.svn'
def get_versioned_files(self):
"""List all files under SVN control in the current directory."""
output = run(['svn', 'st', '-vq', '--xml'], decode=False)
tree = ET.XML(output)
return sorted(entry.get('path') for entry in tree.findall('.//entry')
if self.is_interesting(entry))
def is_interesting(self, entry):
"""Is this entry interesting?
``entry`` is an XML node representing one entry of the svn status
XML output. It looks like this::
<entry path="unchanged.txt">
<wc-status item="normal" revision="1" props="none">
<commit revision="1">
<author>mg</author>
<date>2015-02-06T07:52:38.163516Z</date>
</commit>
</wc-status>
</entry>
<entry path="added-but-not-committed.txt">
<wc-status item="added" revision="-1" props="none"></wc-status>
</entry>
<entry path="ext">
<wc-status item="external" props="none"></wc-status>
</entry>
<entry path="unknown.txt">
<wc-status props="none" item="unversioned"></wc-status>
</entry>
"""
if entry.get('path') == '.':
return False
status = entry.find('wc-status')
if status is None:
self.ui.warning(
'svn status --xml parse error: <entry path="%s"> without'
' <wc-status>' % entry.get('path')
)
return False
# For SVN externals we get two entries: one mentioning the
# existence of the external, and one about the status of the external.
if status.get('item') in ('unversioned', 'external'):
return False
return True
def detect_vcs(ui):
"""Detect the version control system used for the current directory."""
location = os.path.abspath('.')
while True:
for vcs in Git, Mercurial, Bazaar, Subversion:
if vcs.detect(location):
return vcs(ui)
parent = os.path.dirname(location)
if parent == location:
raise Failure("Couldn't find version control data"
" (git/hg/bzr/svn supported)")
location = parent
def get_vcs_files(ui):
"""List all files under version control in the current directory."""
vcs = detect_vcs(ui)
return canonical_file_list(vcs.get_versioned_files())
def normalize_names(names):
"""Normalize file names."""
return [normalize_name(name) for name in names]
def normalize_name(name):
"""Some VCS print directory names with trailing slashes. Strip them.
Easiest is to normalize the path.
And encodings may trip us up too, especially when comparing lists
of files. Plus maybe lowercase versus uppercase.
"""
name = os.path.normpath(name).replace(os.path.sep, '/')
name = unicodify(name) # XXX is this necessary?
if sys.platform == 'darwin':
# Mac OS X may have problems comparing non-ASCII filenames, so
# we convert them.
name = unicodedata.normalize('NFC', name)
return name
#
# Packaging logic
#
class IgnoreList:
def __init__(self):
self._regexps = []
@classmethod
def default(cls):
return (
cls()
# these are always generated
.global_exclude('PKG-INFO')
.global_exclude('*.egg-info/*')
# setup.cfg is always generated, but sometimes also kept in source control
.global_exclude('setup.cfg')
# it's not a problem if the sdist is lacking these files:
.global_exclude(
'.hgtags', '.hgsigs', '.hgignore', '.gitignore', '.bzrignore',
'.gitattributes',
)
# GitHub template files
.prune('.github')
# we can do without these in sdists
.global_exclude('.travis.yml')
.global_exclude('Jenkinsfile')
# It's convenient to ship compiled .mo files in sdists, but they
# shouldn't be checked in, so don't complain that they're missing
# from VCS
.global_exclude('*.mo')
)
def clear(self):
self._regexps = []
def __repr__(self):
return 'IgnoreList(%r)' % (self._regexps)
def __eq__(self, other):
return isinstance(other, IgnoreList) and self._regexps == other._regexps
def __iadd__(self, other):
assert isinstance(other, IgnoreList)
self._regexps += other._regexps
return self
def _path(self, path):
return path.replace('/', os.path.sep)
def exclude(self, *patterns):
for pat in patterns:
pat = self._path(pat)
self._regexps.append(translate_pattern(pat))
return self
def global_exclude(self, *patterns):
for pat in patterns:
pat = os.path.join('**', self._path(pat))
self._regexps.append(translate_pattern(pat))
return self
def recursive_exclude(self, dirname, *patterns):
dirname = self._path(dirname)
for pat in patterns:
pat = os.path.join(dirname, '**', self._path(pat))
self._regexps.append(translate_pattern(pat))
return self
def prune(self, subdir):
pat = os.path.join(self._path(subdir), '**')
self._regexps.append(translate_pattern(pat))
return self
def filter(self, filelist):
return [name for name in filelist
if not any(rx.match(self._path(name)) for rx in self._regexps)]
WARN_ABOUT_FILES_IN_VCS = [
# generated files should not be committed into the VCS
'PKG-INFO',
'*.egg-info',
'*.mo',
'*.py[co]',
'*.so',
'*.pyd',
'*~',
'.*.sw[po]',
'.#*',
]
SUGGESTIONS = [(re.compile(pattern), suggestion) for pattern, suggestion in [
# regexp -> suggestion
('^([^/]+[.](cfg|ini))$', r'include \1'),
('^([.]travis[.]yml)$', r'include \1'),
('^([.]coveragerc)$', r'include \1'),
('^([A-Z]+)$', r'include \1'),
('^(Makefile)$', r'include \1'),
('^[^/]+[.](txt|rst|py)$', r'include *.\1'),
('^([a-zA-Z_][a-zA-Z_0-9]*)/'
'.*[.](py|zcml|pt|mako|xml|html|txt|rst|css|png|jpg|dot|po|pot|mo|ui|desktop|bat)$',
r'recursive-include \1 *.\2'),
('^([a-zA-Z_][a-zA-Z_0-9]*)(?:/.*)?/(Makefile)$',
r'recursive-include \1 \2'),
# catch-all rules that actually cover some of the above; somewhat
# experimental: I fear false positives
('^([a-zA-Z_0-9]+)$', r'include \1'),
('^[^/]+[.]([a-zA-Z_0-9]+)$', r'include *.\1'),
('^([a-zA-Z_][a-zA-Z_0-9]*)/.*[.]([a-zA-Z_0-9]+)$',
r'recursive-include \1 *.\2'),
]]
CFG_SECTION_CHECK_MANIFEST = 'check-manifest'
CFG_IGNORE_DEFAULT_RULES = (CFG_SECTION_CHECK_MANIFEST, 'ignore-default-rules')
CFG_IGNORE = (CFG_SECTION_CHECK_MANIFEST, 'ignore')
CFG_IGNORE_BAD_IDEAS = (CFG_SECTION_CHECK_MANIFEST, 'ignore-bad-ideas')
def read_config():
"""Read configuration from file if possible."""
ignore = IgnoreList.default()
ignore_bad_ideas = IgnoreList()
config = _load_config()
if config.get(CFG_IGNORE_DEFAULT_RULES[1], False):
ignore.clear()
if CFG_IGNORE[1] in config:
for p in config[CFG_IGNORE[1]]:
if p:
ignore.global_exclude(p)
if CFG_IGNORE_BAD_IDEAS[1] in config:
for p in config[CFG_IGNORE_BAD_IDEAS[1]]:
if p:
ignore_bad_ideas.global_exclude(p)
return ignore, ignore_bad_ideas
def _load_config():
"""Searches for config files, reads them and returns a dictionary
Looks for a ``check-manifest`` section in ``pyproject.toml``,
``setup.cfg``, and ``tox.ini``, in that order. The first file
that exists and has that section will be loaded and returned as a
dictionary.
"""
if os.path.exists("pyproject.toml"):
config = toml.load("pyproject.toml")
if CFG_SECTION_CHECK_MANIFEST in config.get("tool", {}):
return config["tool"][CFG_SECTION_CHECK_MANIFEST]
search_files = ['setup.cfg', 'tox.ini']
config_parser = configparser.ConfigParser()
for filename in search_files:
if (config_parser.read([filename])
and config_parser.has_section(CFG_SECTION_CHECK_MANIFEST)):
config = {}
if config_parser.has_option(*CFG_IGNORE_DEFAULT_RULES):
ignore_defaults = config_parser.getboolean(*CFG_IGNORE_DEFAULT_RULES)
config[CFG_IGNORE_DEFAULT_RULES[1]] = ignore_defaults
if config_parser.has_option(*CFG_IGNORE):
patterns = [
p.strip()
for p in config_parser.get(*CFG_IGNORE).splitlines()
]
config[CFG_IGNORE[1]] = patterns
if config_parser.has_option(*CFG_IGNORE_BAD_IDEAS):
patterns = [
p.strip()
for p in config_parser.get(*CFG_IGNORE_BAD_IDEAS).splitlines()
]
config[CFG_IGNORE_BAD_IDEAS[1]] = patterns
return config
return {}
def read_manifest(ui):
"""Read existing configuration from MANIFEST.in.
We use that to ignore anything the MANIFEST.in ignores.
"""
if not os.path.isfile('MANIFEST.in'):
return IgnoreList()
return _get_ignore_from_manifest('MANIFEST.in', ui)
def _get_ignore_from_manifest(filename, ui):
"""Gather the various ignore patterns from a MANIFEST.in.
Returns an IgnoreList instance.
"""
class MyTextFile(TextFile):
def error(self, msg, line=None): # pragma: nocover
# (this is never called by TextFile in current versions of CPython)
raise Failure(self.gen_error(msg, line))
def warn(self, msg, line=None):
ui.warning(self.gen_error(msg, line))
template = MyTextFile(filename,
strip_comments=True,
skip_blanks=True,
join_lines=True,
lstrip_ws=True,
rstrip_ws=True,
collapse_join=True)
try:
lines = template.readlines()
finally:
template.close()
return _get_ignore_from_manifest_lines(lines, ui)
def _get_ignore_from_manifest_lines(lines, ui):
"""Gather the various ignore patterns from a MANIFEST.in.
'lines' should be a list of strings with comments removed
and continuation lines joined.
Returns an IgnoreList instance.
"""
ignore = IgnoreList()
for line in lines:
try:
cmd, rest = line.split(None, 1)
except ValueError:
# no whitespace, so not interesting
continue
for part in rest.split():
# distutils enforces these warnings on Windows only
if part.startswith('/'):
ui.warning("ERROR: Leading slashes are not allowed in MANIFEST.in on Windows: %s" % part)
if part.endswith('/'):
ui.warning("ERROR: Trailing slashes are not allowed in MANIFEST.in on Windows: %s" % part)
if cmd == 'exclude':
ignore.exclude(*rest.split())
elif cmd == 'global-exclude':
ignore.global_exclude(*rest.split())
elif cmd == 'recursive-exclude':
try:
dirname, patterns = rest.split(None, 1)
except ValueError:
# Wrong MANIFEST.in line.
ui.warning(
"You have a wrong line in MANIFEST.in: %r\n"
"'recursive-exclude' expects <dir> <pattern1> <pattern2>..."
% line
)
continue
ignore.recursive_exclude(dirname, *patterns.split())
elif cmd == 'prune':
ignore.prune(rest)
# XXX: This ignores all 'include'/'global-include'/'recusive-include'/'graft' commands,
# which is wrong! Quoting the documentation:
#
# The order of commands in the manifest template matters: initially,
# we have the list of default files as described above, and each
# command in the template adds to or removes from that list of
# files.
# -- https://docs.python.org/3.8/distutils/sourcedist.html#specifying-the-files-to-distribute
return ignore
def file_matches(filename, patterns):
"""Does this filename match any of the patterns?"""
return any(fnmatch.fnmatch(filename, pat)
or fnmatch.fnmatch(os.path.basename(filename), pat)
for pat in patterns)
def strip_sdist_extras(ignore, filelist):
"""Strip generated files that are only present in source distributions.
We also strip files that are ignored for other reasons, like
command line arguments, setup.cfg rules or MANIFEST.in rules.
"""
return ignore.filter(filelist)
def find_bad_ideas(filelist):
"""Find files matching WARN_ABOUT_FILES_IN_VCS patterns."""
return [name for name in filelist
if file_matches(name, WARN_ABOUT_FILES_IN_VCS)]
def find_suggestions(filelist):
"""Suggest MANIFEST.in patterns for missing files.
Returns two lists: one with suggested MANIGEST.in commands, and one with
files for which no suggestions were offered.
"""
suggestions = set()
unknowns = []
for filename in filelist:
for pattern, suggestion in SUGGESTIONS:
m = pattern.match(filename)
if m is not None:
suggestions.add(pattern.sub(suggestion, filename))
break
else:
unknowns.append(filename)
return sorted(suggestions), unknowns
def is_package(source_tree='.'):
"""Is the directory the root of a Python package?
Note: the term "package" here refers to a collection of files
with a setup.py/pyproject.toml, not to a directory with an __init__.py.
"""
return (
os.path.exists(os.path.join(source_tree, 'setup.py'))
or os.path.exists(os.path.join(source_tree, 'pyproject.toml'))
)
def extract_version_from_filename(filename):
"""Extract version number from sdist filename."""
filename = os.path.splitext(os.path.basename(filename))[0]
if filename.endswith('.tar'):
filename = os.path.splitext(filename)[0]
return filename.split('-')[-1]
def should_use_pep_517():
"""Check if the project uses PEP-517 builds."""
# https://www.python.org/dev/peps/pep-0517/#build-system-table says
# "If the pyproject.toml file is absent, or the build-backend key is
# missing, the source tree is not using this specification, and tools
# should revert to the legacy behaviour of running setup.py".
if not os.path.exists('pyproject.toml'):
return False
config = toml.load("pyproject.toml")
if "build-system" not in config:
return False
if "build-backend" not in config["build-system"]:
return False
return True
def build_sdist(tempdir, python=sys.executable, build_isolation=True):
"""Build a source distribution in a temporary directory.
Should be run with the current working directory inside the Python package
you want to build.
"""
if should_use_pep_517():
# I could do this in-process with
# import build.__main__
# build.__main__.build('.', tempdir)
# but then it would print a bunch of things to stdout and I'd have to
# worry about exceptions
cmd = [python, '-m', 'build', '--sdist', '.', '--outdir', tempdir]
if not build_isolation:
cmd.append('--no-isolation')
run(cmd)
else:
run([python, 'setup.py', 'sdist', '-d', tempdir])
def check_manifest(source_tree='.', create=False, update=False,
python=sys.executable, ui=None, extra_ignore=None,
extra_ignore_bad_ideas=None,
build_isolation=True):
"""Compare a generated source distribution with list of files in a VCS.
Returns True if the manifest is fine.
"""
if ui is None:
ui = UI()
all_ok = True
if os.path.sep in python:
python = os.path.abspath(python)
with cd(source_tree):
if not is_package():
raise Failure(
'This is not a Python project (no setup.py/pyproject.toml).')
ignore, ignore_bad_ideas = read_config()
ignore += read_manifest(ui)
if extra_ignore:
ignore += extra_ignore
if extra_ignore_bad_ideas:
ignore_bad_ideas += extra_ignore_bad_ideas
ui.info_begin("listing source files under version control")
all_source_files = get_vcs_files(ui)
source_files = strip_sdist_extras(ignore, all_source_files)
ui.info_continue(": %d files and directories" % len(source_files))
if not all_source_files:
raise Failure('There are no files added to version control!')
ui.info_begin("building an sdist")
with mkdtemp('-sdist') as tempdir:
build_sdist(tempdir, python=python, build_isolation=build_isolation)
sdist_filename = get_one_file_in(tempdir)
ui.info_continue(": %s" % os.path.basename(sdist_filename))
sdist_files = get_sdist_file_list(sdist_filename, ignore)
ui.info_continue(": %d files and directories" % len(sdist_files))
version = extract_version_from_filename(sdist_filename)
existing_source_files = list(filter(os.path.exists, all_source_files))
missing_source_files = sorted(set(all_source_files) - set(existing_source_files))
if missing_source_files:
ui.warning("some files listed as being under source control are missing:\n%s"
% format_list(missing_source_files))
ui.info_begin("copying source files to a temporary directory")
with mkdtemp('-sources') as tempsourcedir:
copy_files(existing_source_files, tempsourcedir)
for filename in 'MANIFEST.in', 'setup.py', 'pyproject.toml':
if filename not in source_files and os.path.exists(filename):
# See https://github.com/mgedmin/check-manifest/issues/7
# and https://github.com/mgedmin/check-manifest/issues/46:
# if we do this, the user gets a warning about files
# missing from source control; if we don't do this,
# things get very confusing for the user!
copy_files([filename], tempsourcedir)
ui.info_begin("building a clean sdist")
with cd(tempsourcedir):
with mkdtemp('-sdist') as tempdir:
os.environ['SETUPTOOLS_SCM_PRETEND_VERSION'] = version
build_sdist(tempdir, python=python, build_isolation=build_isolation)
sdist_filename = get_one_file_in(tempdir)
ui.info_continue(": %s" % os.path.basename(sdist_filename))
clean_sdist_files = get_sdist_file_list(sdist_filename, ignore)
ui.info_continue(": %d files and directories" % len(clean_sdist_files))
missing_from_manifest = set(source_files) - set(clean_sdist_files)
missing_from_VCS = set(sdist_files + clean_sdist_files) - set(source_files)
if not missing_from_manifest and not missing_from_VCS:
ui.info("lists of files in version control and sdist match")
else:
ui.error(
"lists of files in version control and sdist do not match!\n%s"
% format_missing(missing_from_VCS, missing_from_manifest, "VCS", "sdist"))
suggestions, unknowns = find_suggestions(missing_from_manifest)
user_asked_for_help = update or (create and not
os.path.exists('MANIFEST.in'))
if 'MANIFEST.in' not in existing_source_files:
if suggestions and not user_asked_for_help:
ui.info("no MANIFEST.in found; you can run 'check-manifest -c' to create one")
else:
ui.info("no MANIFEST.in found")
if suggestions:
ui.info("suggested MANIFEST.in rules:\n%s" % format_list(suggestions))
if user_asked_for_help:
existed = os.path.exists('MANIFEST.in')
with open('MANIFEST.in', 'a') as f:
if not existed:
ui.info("creating MANIFEST.in")
else:
ui.info("updating MANIFEST.in")
f.write('\n# added by check-manifest\n')
f.write('\n'.join(suggestions) + '\n')
if unknowns:
ui.info("don't know how to come up with rules matching\n%s"
% format_list(unknowns))
elif user_asked_for_help:
ui.info("don't know how to come up with rules matching any of the files, sorry!")
all_ok = False
bad_ideas = find_bad_ideas(all_source_files)
filtered_bad_ideas = ignore_bad_ideas.filter(bad_ideas)
if filtered_bad_ideas:
ui.warning(
"you have %s in source control!\n"
"that's a bad idea: auto-generated files should not be versioned"
% filtered_bad_ideas[0])
if len(filtered_bad_ideas) > 1:
ui.warning("this also applies to the following:\n%s"
% format_list(filtered_bad_ideas[1:]))
all_ok = False
return all_ok
#
# Main script
#
def main():
parser = argparse.ArgumentParser(
description="Check a Python MANIFEST.in file for completeness",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('source_tree', default='.', nargs='?',
help='location for the source tree')
parser.add_argument('--version', action='version',
version='%(prog)s version ' + __version__)
parser.add_argument('-q', '--quiet', action='store_const', dest='quiet',
const=0, default=1, help='reduced output verbosity')
parser.add_argument('-v', '--verbose', action='store_const', dest='verbose',
const=1, default=0, help='more verbose output')
parser.add_argument('-c', '--create', action='store_true',
help='create a MANIFEST.in if missing')
parser.add_argument('-u', '--update', action='store_true',
help='append suggestions to MANIFEST.in (implies --create)')
parser.add_argument('-p', '--python', default=sys.executable,
help='use this Python interpreter for running setup.py sdist')
parser.add_argument('--ignore', metavar='patterns', default=None,
help='ignore files/directories matching these'
' comma-separated patterns')
parser.add_argument('--ignore-bad-ideas', metavar='patterns',
default=[], help='ignore bad idea files/directories '
'matching these comma-separated patterns')
parser.add_argument(
'--no-build-isolation', dest='build_isolation', action='store_false',
help='Disable isolation when building a modern source distribution. '
'Build dependencies specified by PEP 518 must be already installed if '
'this option is used.',
)
args = parser.parse_args()
ignore = IgnoreList()
if args.ignore:
ignore.global_exclude(*args.ignore.split(','))
ignore_bad_ideas = IgnoreList()
if args.ignore_bad_ideas:
ignore_bad_ideas.global_exclude(*args.ignore_bad_ideas.split(','))
ui = UI(verbosity=args.quiet + args.verbose)
try:
if not check_manifest(args.source_tree, create=args.create,
update=args.update, python=args.python,
ui=ui, extra_ignore=ignore,
extra_ignore_bad_ideas=ignore_bad_ideas,
build_isolation=args.build_isolation):
sys.exit(1)
except Failure as e:
ui.error(str(e))
sys.exit(2)
#
# zest.releaser integration
#
def zest_releaser_check(data):
"""Check the completeness of MANIFEST.in before the release.
This is an entry point for zest.releaser. See the documentation at
https://zestreleaser.readthedocs.io/en/latest/entrypoints.html
"""
from zest.releaser.utils import ask
source_tree = data['workingdir']
if not is_package(source_tree):
# You can use zest.releaser on things that are not Python packages.
# It's pointless to run check-manifest in those circumstances.
# See https://github.com/mgedmin/check-manifest/issues/9 for details.
return
if not ask("Do you want to run check-manifest?"):
return
ui = UI()
try:
if not check_manifest(source_tree, ui=ui):
if not ask("MANIFEST.in has problems."
" Do you want to continue despite that?", default=False):
sys.exit(1)
except Failure as e:
ui.error(str(e))
if not ask("Something bad happened."
" Do you want to continue despite that?", default=False):
sys.exit(2)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
#
# Copyright 2012 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities for working with threads and ``Futures``.
``Futures`` are a pattern for concurrent programming introduced in
Python 3.2 in the `concurrent.futures` package. This package defines
a mostly-compatible `Future` class designed for use from coroutines,
as well as some utility functions for interacting with the
`concurrent.futures` package.
"""
from __future__ import absolute_import, division, print_function, with_statement
import functools
import platform
import textwrap
import traceback
import sys
from tornado.log import app_log
from tornado.stack_context import ExceptionStackContext, wrap
from tornado.util import raise_exc_info, ArgReplacer, is_finalizing
try:
from concurrent import futures
except ImportError:
futures = None
try:
import typing
except ImportError:
typing = None
# Can the garbage collector handle cycles that include __del__ methods?
# This is true in cpython beginning with version 3.4 (PEP 442).
_GC_CYCLE_FINALIZERS = (platform.python_implementation() == 'CPython' and
sys.version_info >= (3, 4))
class ReturnValueIgnoredError(Exception):
pass
# This class and associated code in the future object is derived
# from the Trollius project, a backport of asyncio to Python 2.x - 3.x
class _TracebackLogger(object):
"""Helper to log a traceback upon destruction if not cleared.
This solves a nasty problem with Futures and Tasks that have an
exception set: if nobody asks for the exception, the exception is
never logged. This violates the Zen of Python: 'Errors should
never pass silently. Unless explicitly silenced.'
However, we don't want to log the exception as soon as
set_exception() is called: if the calling code is written
properly, it will get the exception and handle it properly. But
we *do* want to log it if result() or exception() was never called
-- otherwise developers waste a lot of time wondering why their
buggy code fails silently.
An earlier attempt added a __del__() method to the Future class
itself, but this backfired because the presence of __del__()
prevents garbage collection from breaking cycles. A way out of
this catch-22 is to avoid having a __del__() method on the Future
class itself, but instead to have a reference to a helper object
with a __del__() method that logs the traceback, where we ensure
that the helper object doesn't participate in cycles, and only the
Future has a reference to it.
The helper object is added when set_exception() is called. When
the Future is collected, and the helper is present, the helper
object is also collected, and its __del__() method will log the
traceback. When the Future's result() or exception() method is
called (and a helper object is present), it removes the the helper
object, after calling its clear() method to prevent it from
logging.
One downside is that we do a fair amount of work to extract the
traceback from the exception, even when it is never logged. It
would seem cheaper to just store the exception object, but that
references the traceback, which references stack frames, which may
reference the Future, which references the _TracebackLogger, and
then the _TracebackLogger would be included in a cycle, which is
what we're trying to avoid! As an optimization, we don't
immediately format the exception; we only do the work when
activate() is called, which call is delayed until after all the
Future's callbacks have run. Since usually a Future has at least
one callback (typically set by 'yield From') and usually that
callback extracts the callback, thereby removing the need to
format the exception.
PS. I don't claim credit for this solution. I first heard of it
in a discussion about closing files when they are collected.
"""
__slots__ = ('exc_info', 'formatted_tb')
def __init__(self, exc_info):
self.exc_info = exc_info
self.formatted_tb = None
def activate(self):
exc_info = self.exc_info
if exc_info is not None:
self.exc_info = None
self.formatted_tb = traceback.format_exception(*exc_info)
def clear(self):
self.exc_info = None
self.formatted_tb = None
def __del__(self, is_finalizing=is_finalizing):
if not is_finalizing() and self.formatted_tb:
app_log.error('Future exception was never retrieved: %s',
''.join(self.formatted_tb).rstrip())
class Future(object):
"""Placeholder for an asynchronous result.
A ``Future`` encapsulates the result of an asynchronous
operation. In synchronous applications ``Futures`` are used
to wait for the result from a thread or process pool; in
Tornado they are normally used with `.IOLoop.add_future` or by
yielding them in a `.gen.coroutine`.
`tornado.concurrent.Future` is similar to
`concurrent.futures.Future`, but not thread-safe (and therefore
faster for use with single-threaded event loops).
In addition to ``exception`` and ``set_exception``, methods ``exc_info``
and ``set_exc_info`` are supported to capture tracebacks in Python 2.
The traceback is automatically available in Python 3, but in the
Python 2 futures backport this information is discarded.
This functionality was previously available in a separate class
``TracebackFuture``, which is now a deprecated alias for this class.
.. versionchanged:: 4.0
`tornado.concurrent.Future` is always a thread-unsafe ``Future``
with support for the ``exc_info`` methods. Previously it would
be an alias for the thread-safe `concurrent.futures.Future`
if that package was available and fall back to the thread-unsafe
implementation if it was not.
.. versionchanged:: 4.1
If a `.Future` contains an error but that error is never observed
(by calling ``result()``, ``exception()``, or ``exc_info()``),
a stack trace will be logged when the `.Future` is garbage collected.
This normally indicates an error in the application, but in cases
where it results in undesired logging it may be necessary to
suppress the logging by ensuring that the exception is observed:
``f.add_done_callback(lambda f: f.exception())``.
"""
def __init__(self):
self._done = False
self._result = None
self._exc_info = None
self._log_traceback = False # Used for Python >= 3.4
self._tb_logger = None # Used for Python <= 3.3
self._callbacks = []
# Implement the Python 3.5 Awaitable protocol if possible
# (we can't use return and yield together until py33).
if sys.version_info >= (3, 3):
exec(textwrap.dedent("""
def __await__(self):
return (yield self)
"""))
else:
# Py2-compatible version for use with cython.
def __await__(self):
result = yield self
# StopIteration doesn't take args before py33,
# but Cython recognizes the args tuple.
e = StopIteration()
e.args = (result,)
raise e
def cancel(self):
"""Cancel the operation, if possible.
Tornado ``Futures`` do not support cancellation, so this method always
returns False.
"""
return False
def cancelled(self):
"""Returns True if the operation has been cancelled.
Tornado ``Futures`` do not support cancellation, so this method
always returns False.
"""
return False
def running(self):
"""Returns True if this operation is currently running."""
return not self._done
def done(self):
"""Returns True if the future has finished running."""
return self._done
def _clear_tb_log(self):
self._log_traceback = False
if self._tb_logger is not None:
self._tb_logger.clear()
self._tb_logger = None
def result(self, timeout=None):
"""If the operation succeeded, return its result. If it failed,
re-raise its exception.
This method takes a ``timeout`` argument for compatibility with
`concurrent.futures.Future` but it is an error to call it
before the `Future` is done, so the ``timeout`` is never used.
"""
self._clear_tb_log()
if self._result is not None:
return self._result
if self._exc_info is not None:
raise_exc_info(self._exc_info)
self._check_done()
return self._result
def exception(self, timeout=None):
"""If the operation raised an exception, return the `Exception`
object. Otherwise returns None.
This method takes a ``timeout`` argument for compatibility with
`concurrent.futures.Future` but it is an error to call it
before the `Future` is done, so the ``timeout`` is never used.
"""
self._clear_tb_log()
if self._exc_info is not None:
return self._exc_info[1]
else:
self._check_done()
return None
def add_done_callback(self, fn):
"""Attaches the given callback to the `Future`.
It will be invoked with the `Future` as its argument when the Future
has finished running and its result is available. In Tornado
consider using `.IOLoop.add_future` instead of calling
`add_done_callback` directly.
"""
if self._done:
fn(self)
else:
self._callbacks.append(fn)
def set_result(self, result):
"""Sets the result of a ``Future``.
It is undefined to call any of the ``set`` methods more than once
on the same object.
"""
self._result = result
self._set_done()
def set_exception(self, exception):
"""Sets the exception of a ``Future.``"""
self.set_exc_info(
(exception.__class__,
exception,
getattr(exception, '__traceback__', None)))
def exc_info(self):
"""Returns a tuple in the same format as `sys.exc_info` or None.
.. versionadded:: 4.0
"""
self._clear_tb_log()
return self._exc_info
def set_exc_info(self, exc_info):
"""Sets the exception information of a ``Future.``
Preserves tracebacks on Python 2.
.. versionadded:: 4.0
"""
self._exc_info = exc_info
self._log_traceback = True
if not _GC_CYCLE_FINALIZERS:
self._tb_logger = _TracebackLogger(exc_info)
try:
self._set_done()
finally:
# Activate the logger after all callbacks have had a
# chance to call result() or exception().
if self._log_traceback and self._tb_logger is not None:
self._tb_logger.activate()
self._exc_info = exc_info
def _check_done(self):
if not self._done:
raise Exception("DummyFuture does not support blocking for results")
def _set_done(self):
self._done = True
for cb in self._callbacks:
try:
cb(self)
except Exception:
app_log.exception('Exception in callback %r for %r',
cb, self)
self._callbacks = None
# On Python 3.3 or older, objects with a destructor part of a reference
# cycle are never destroyed. It's no longer the case on Python 3.4 thanks to
# the PEP 442.
if _GC_CYCLE_FINALIZERS:
def __del__(self, is_finalizing=is_finalizing):
if is_finalizing() or not self._log_traceback:
# set_exception() was not called, or result() or exception()
# has consumed the exception
return
tb = traceback.format_exception(*self._exc_info)
app_log.error('Future %r exception was never retrieved: %s',
self, ''.join(tb).rstrip())
TracebackFuture = Future
if futures is None:
FUTURES = Future # type: typing.Union[type, typing.Tuple[type, ...]]
else:
FUTURES = (futures.Future, Future)
def is_future(x):
return isinstance(x, FUTURES)
class DummyExecutor(object):
def submit(self, fn, *args, **kwargs):
future = TracebackFuture()
try:
future.set_result(fn(*args, **kwargs))
except Exception:
future.set_exc_info(sys.exc_info())
return future
def shutdown(self, wait=True):
pass
dummy_executor = DummyExecutor()
def run_on_executor(*args, **kwargs):
"""Decorator to run a synchronous method asynchronously on an executor.
The decorated method may be called with a ``callback`` keyword
argument and returns a future.
The `.IOLoop` and executor to be used are determined by the ``io_loop``
and ``executor`` attributes of ``self``. To use different attributes,
pass keyword arguments to the decorator::
@run_on_executor(executor='_thread_pool')
def foo(self):
pass
.. versionchanged:: 4.2
Added keyword arguments to use alternative attributes.
"""
def run_on_executor_decorator(fn):
executor = kwargs.get("executor", "executor")
io_loop = kwargs.get("io_loop", "io_loop")
@functools.wraps(fn)
def wrapper(self, *args, **kwargs):
callback = kwargs.pop("callback", None)
future = getattr(self, executor).submit(fn, self, *args, **kwargs)
if callback:
getattr(self, io_loop).add_future(
future, lambda future: callback(future.result()))
return future
return wrapper
if args and kwargs:
raise ValueError("cannot combine positional and keyword args")
if len(args) == 1:
return run_on_executor_decorator(args[0])
elif len(args) != 0:
raise ValueError("expected 1 argument, got %d", len(args))
return run_on_executor_decorator
_NO_RESULT = object()
def return_future(f):
"""Decorator to make a function that returns via callback return a
`Future`.
The wrapped function should take a ``callback`` keyword argument
and invoke it with one argument when it has finished. To signal failure,
the function can simply raise an exception (which will be
captured by the `.StackContext` and passed along to the ``Future``).
From the caller's perspective, the callback argument is optional.
If one is given, it will be invoked when the function is complete
with `Future.result()` as an argument. If the function fails, the
callback will not be run and an exception will be raised into the
surrounding `.StackContext`.
If no callback is given, the caller should use the ``Future`` to
wait for the function to complete (perhaps by yielding it in a
`.gen.engine` function, or passing it to `.IOLoop.add_future`).
Usage:
.. testcode::
@return_future
def future_func(arg1, arg2, callback):
# Do stuff (possibly asynchronous)
callback(result)
@gen.engine
def caller(callback):
yield future_func(arg1, arg2)
callback()
..
Note that ``@return_future`` and ``@gen.engine`` can be applied to the
same function, provided ``@return_future`` appears first. However,
consider using ``@gen.coroutine`` instead of this combination.
"""
replacer = ArgReplacer(f, 'callback')
@functools.wraps(f)
def wrapper(*args, **kwargs):
future = TracebackFuture()
callback, args, kwargs = replacer.replace(
lambda value=_NO_RESULT: future.set_result(value),
args, kwargs)
def handle_error(typ, value, tb):
future.set_exc_info((typ, value, tb))
return True
exc_info = None
with ExceptionStackContext(handle_error):
try:
result = f(*args, **kwargs)
if result is not None:
raise ReturnValueIgnoredError(
"@return_future should not be used with functions "
"that return values")
except:
exc_info = sys.exc_info()
raise
if exc_info is not None:
# If the initial synchronous part of f() raised an exception,
# go ahead and raise it to the caller directly without waiting
# for them to inspect the Future.
future.result()
# If the caller passed in a callback, schedule it to be called
# when the future resolves. It is important that this happens
# just before we return the future, or else we risk confusing
# stack contexts with multiple exceptions (one here with the
# immediate exception, and again when the future resolves and
# the callback triggers its exception by calling future.result()).
if callback is not None:
def run_callback(future):
result = future.result()
if result is _NO_RESULT:
callback()
else:
callback(future.result())
future.add_done_callback(wrap(run_callback))
return future
return wrapper
def chain_future(a, b):
"""Chain two futures together so that when one completes, so does the other.
The result (success or failure) of ``a`` will be copied to ``b``, unless
``b`` has already been completed or cancelled by the time ``a`` finishes.
"""
def copy(future):
assert future is a
if b.done():
return
if (isinstance(a, TracebackFuture) and
isinstance(b, TracebackFuture) and
a.exc_info() is not None):
b.set_exc_info(a.exc_info())
elif a.exception() is not None:
b.set_exception(a.exception())
else:
b.set_result(a.result())
a.add_done_callback(copy)
|
|
"""The tests for the Template Binary sensor platform."""
from datetime import timedelta
import unittest
from unittest import mock
from homeassistant.const import MATCH_ALL, EVENT_HOMEASSISTANT_START
from homeassistant import setup
from homeassistant.components.template import binary_sensor as template
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import template as template_hlpr
from homeassistant.util.async_ import run_callback_threadsafe
import homeassistant.util.dt as dt_util
from tests.common import (
get_test_home_assistant,
assert_setup_component,
async_fire_time_changed,
)
class TestBinarySensorTemplate(unittest.TestCase):
"""Test for Binary sensor template platform."""
hass = None
# pylint: disable=invalid-name
def setup_method(self, method):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def test_setup(self):
"""Test the setup."""
config = {
"binary_sensor": {
"platform": "template",
"sensors": {
"test": {
"friendly_name": "virtual thingy",
"value_template": "{{ foo }}",
"device_class": "motion",
}
},
}
}
with assert_setup_component(1):
assert setup.setup_component(self.hass, "binary_sensor", config)
def test_setup_no_sensors(self):
"""Test setup with no sensors."""
with assert_setup_component(0):
assert setup.setup_component(
self.hass, "binary_sensor", {"binary_sensor": {"platform": "template"}}
)
def test_setup_invalid_device(self):
"""Test the setup with invalid devices."""
with assert_setup_component(0):
assert setup.setup_component(
self.hass,
"binary_sensor",
{"binary_sensor": {"platform": "template", "sensors": {"foo bar": {}}}},
)
def test_setup_invalid_device_class(self):
"""Test setup with invalid sensor class."""
with assert_setup_component(0):
assert setup.setup_component(
self.hass,
"binary_sensor",
{
"binary_sensor": {
"platform": "template",
"sensors": {
"test": {
"value_template": "{{ foo }}",
"device_class": "foobarnotreal",
}
},
}
},
)
def test_setup_invalid_missing_template(self):
"""Test setup with invalid and missing template."""
with assert_setup_component(0):
assert setup.setup_component(
self.hass,
"binary_sensor",
{
"binary_sensor": {
"platform": "template",
"sensors": {"test": {"device_class": "motion"}},
}
},
)
def test_icon_template(self):
"""Test icon template."""
with assert_setup_component(1):
assert setup.setup_component(
self.hass,
"binary_sensor",
{
"binary_sensor": {
"platform": "template",
"sensors": {
"test_template_sensor": {
"value_template": "{{ states.sensor.xyz.state }}",
"icon_template": "{% if "
"states.binary_sensor.test_state.state == "
"'Works' %}"
"mdi:check"
"{% endif %}",
}
},
}
},
)
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_template_sensor")
assert state.attributes.get("icon") == ""
self.hass.states.set("binary_sensor.test_state", "Works")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_template_sensor")
assert state.attributes["icon"] == "mdi:check"
def test_entity_picture_template(self):
"""Test entity_picture template."""
with assert_setup_component(1):
assert setup.setup_component(
self.hass,
"binary_sensor",
{
"binary_sensor": {
"platform": "template",
"sensors": {
"test_template_sensor": {
"value_template": "{{ states.sensor.xyz.state }}",
"entity_picture_template": "{% if "
"states.binary_sensor.test_state.state == "
"'Works' %}"
"/local/sensor.png"
"{% endif %}",
}
},
}
},
)
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_template_sensor")
assert state.attributes.get("entity_picture") == ""
self.hass.states.set("binary_sensor.test_state", "Works")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_template_sensor")
assert state.attributes["entity_picture"] == "/local/sensor.png"
@mock.patch(
"homeassistant.components.template.binary_sensor."
"BinarySensorTemplate._async_render"
)
def test_match_all(self, _async_render):
"""Test MATCH_ALL in template."""
with assert_setup_component(1):
assert setup.setup_component(
self.hass,
"binary_sensor",
{
"binary_sensor": {
"platform": "template",
"sensors": {
"match_all_template_sensor": {"value_template": "{{ 42 }}"}
},
}
},
)
self.hass.start()
self.hass.block_till_done()
init_calls = len(_async_render.mock_calls)
self.hass.states.set("sensor.any_state", "update")
self.hass.block_till_done()
assert len(_async_render.mock_calls) == init_calls
def test_attributes(self):
"""Test the attributes."""
vs = run_callback_threadsafe(
self.hass.loop,
template.BinarySensorTemplate,
self.hass,
"parent",
"Parent",
"motion",
template_hlpr.Template("{{ 1 > 1 }}", self.hass),
None,
None,
MATCH_ALL,
None,
None,
).result()
assert not vs.should_poll
assert "motion" == vs.device_class
assert "Parent" == vs.name
run_callback_threadsafe(self.hass.loop, vs.async_check_state).result()
assert not vs.is_on
# pylint: disable=protected-access
vs._template = template_hlpr.Template("{{ 2 > 1 }}", self.hass)
run_callback_threadsafe(self.hass.loop, vs.async_check_state).result()
assert vs.is_on
def test_event(self):
"""Test the event."""
config = {
"binary_sensor": {
"platform": "template",
"sensors": {
"test": {
"friendly_name": "virtual thingy",
"value_template": "{{ states.sensor.test_state.state == 'on' }}",
"device_class": "motion",
}
},
}
}
with assert_setup_component(1):
assert setup.setup_component(self.hass, "binary_sensor", config)
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test")
assert state.state == "off"
self.hass.states.set("sensor.test_state", "on")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test")
assert state.state == "on"
@mock.patch("homeassistant.helpers.template.Template.render")
def test_update_template_error(self, mock_render):
"""Test the template update error."""
vs = run_callback_threadsafe(
self.hass.loop,
template.BinarySensorTemplate,
self.hass,
"parent",
"Parent",
"motion",
template_hlpr.Template("{{ 1 > 1 }}", self.hass),
None,
None,
MATCH_ALL,
None,
None,
).result()
mock_render.side_effect = TemplateError("foo")
run_callback_threadsafe(self.hass.loop, vs.async_check_state).result()
mock_render.side_effect = TemplateError(
"UndefinedError: 'None' has no attribute"
)
run_callback_threadsafe(self.hass.loop, vs.async_check_state).result()
async def test_template_delay_on(hass):
"""Test binary sensor template delay on."""
config = {
"binary_sensor": {
"platform": "template",
"sensors": {
"test": {
"friendly_name": "virtual thingy",
"value_template": "{{ states.sensor.test_state.state == 'on' }}",
"device_class": "motion",
"delay_on": 5,
}
},
}
}
await setup.async_setup_component(hass, "binary_sensor", config)
await hass.async_start()
hass.states.async_set("sensor.test_state", "on")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "off"
future = dt_util.utcnow() + timedelta(seconds=5)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "on"
# check with time changes
hass.states.async_set("sensor.test_state", "off")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "off"
hass.states.async_set("sensor.test_state", "on")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "off"
hass.states.async_set("sensor.test_state", "off")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "off"
future = dt_util.utcnow() + timedelta(seconds=5)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "off"
async def test_template_delay_off(hass):
"""Test binary sensor template delay off."""
config = {
"binary_sensor": {
"platform": "template",
"sensors": {
"test": {
"friendly_name": "virtual thingy",
"value_template": "{{ states.sensor.test_state.state == 'on' }}",
"device_class": "motion",
"delay_off": 5,
}
},
}
}
hass.states.async_set("sensor.test_state", "on")
await setup.async_setup_component(hass, "binary_sensor", config)
await hass.async_start()
hass.states.async_set("sensor.test_state", "off")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "on"
future = dt_util.utcnow() + timedelta(seconds=5)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "off"
# check with time changes
hass.states.async_set("sensor.test_state", "on")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "on"
hass.states.async_set("sensor.test_state", "off")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "on"
hass.states.async_set("sensor.test_state", "on")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "on"
future = dt_util.utcnow() + timedelta(seconds=5)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "on"
async def test_no_update_template_match_all(hass, caplog):
"""Test that we do not update sensors that match on all."""
hass.states.async_set("binary_sensor.test_sensor", "true")
await setup.async_setup_component(
hass,
"binary_sensor",
{
"binary_sensor": {
"platform": "template",
"sensors": {
"all_state": {"value_template": '{{ "true" }}'},
"all_icon": {
"value_template": "{{ states.binary_sensor.test_sensor.state }}",
"icon_template": "{{ 1 + 1 }}",
},
"all_entity_picture": {
"value_template": "{{ states.binary_sensor.test_sensor.state }}",
"entity_picture_template": "{{ 1 + 1 }}",
},
},
}
},
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 4
assert (
"Template binary sensor all_state has no entity ids "
"configured to track nor were we able to extract the entities to "
"track from the value template"
) in caplog.text
assert (
"Template binary sensor all_icon has no entity ids "
"configured to track nor were we able to extract the entities to "
"track from the icon template"
) in caplog.text
assert (
"Template binary sensor all_entity_picture has no entity ids "
"configured to track nor were we able to extract the entities to "
"track from the entity_picture template"
) in caplog.text
assert hass.states.get("binary_sensor.all_state").state == "off"
assert hass.states.get("binary_sensor.all_icon").state == "off"
assert hass.states.get("binary_sensor.all_entity_picture").state == "off"
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.all_state").state == "on"
assert hass.states.get("binary_sensor.all_icon").state == "on"
assert hass.states.get("binary_sensor.all_entity_picture").state == "on"
hass.states.async_set("binary_sensor.test_sensor", "false")
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.all_state").state == "on"
assert hass.states.get("binary_sensor.all_icon").state == "on"
assert hass.states.get("binary_sensor.all_entity_picture").state == "on"
await hass.helpers.entity_component.async_update_entity("binary_sensor.all_state")
await hass.helpers.entity_component.async_update_entity("binary_sensor.all_icon")
await hass.helpers.entity_component.async_update_entity(
"binary_sensor.all_entity_picture"
)
assert hass.states.get("binary_sensor.all_state").state == "on"
assert hass.states.get("binary_sensor.all_icon").state == "off"
assert hass.states.get("binary_sensor.all_entity_picture").state == "off"
|
|
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# EDITING INSTRUCTIONS
# This file was generated from the file
# https://github.com/google/googleapis/blob/master/google/monitoring/v3/metric_service.proto,
# and updates to that file get reflected here through a refresh process.
# For the short term, the refresh process will only be runnable by Google engineers.
#
# The only allowed edits are to method and file documentation. A 3-way
# merge preserves those additions if the generated source changes.
"""Accesses the google.monitoring.v3 MetricService API."""
import json
import os
import pkg_resources
import platform
from google.gax import api_callable
from google.gax import config
from google.gax import path_template
import google.gax
from google.api import metric_pb2 as api_metric_pb2
from google.cloud.gapic.monitoring.v3 import enums
from google.monitoring.v3 import common_pb2
from google.monitoring.v3 import metric_pb2 as v3_metric_pb2
from google.monitoring.v3 import metric_service_pb2
_PageDesc = google.gax.PageDescriptor
class MetricServiceApi(object):
"""
Manages metric descriptors, monitored resource descriptors, and
time series data.
"""
SERVICE_ADDRESS = 'monitoring.googleapis.com'
"""The default address of the service."""
DEFAULT_SERVICE_PORT = 443
"""The default port of the service."""
_CODE_GEN_NAME_VERSION = 'gapic/0.1.0'
_GAX_VERSION = pkg_resources.get_distribution('google-gax').version
_PAGE_DESCRIPTORS = {
'list_monitored_resource_descriptors':
_PageDesc('page_token', 'next_page_token', 'resource_descriptors'),
'list_metric_descriptors': _PageDesc('page_token', 'next_page_token',
'metric_descriptors'),
'list_time_series': _PageDesc('page_token', 'next_page_token',
'time_series')
}
# The scopes needed to make gRPC calls to all of the methods defined in
# this service
_ALL_SCOPES = ()
_PROJECT_PATH_TEMPLATE = path_template.PathTemplate('projects/{project}')
_METRIC_DESCRIPTOR_PATH_PATH_TEMPLATE = path_template.PathTemplate(
'projects/{project}/metricDescriptors/{metric_descriptor_path=**}')
_MONITORED_RESOURCE_DESCRIPTOR_PATH_TEMPLATE = path_template.PathTemplate(
'projects/{project}/monitoredResourceDescriptors/{monitored_resource_descriptor}')
@classmethod
def project_path(cls, project):
"""Returns a fully-qualified project resource name string."""
return cls._PROJECT_PATH_TEMPLATE.render({'project': project, })
@classmethod
def metric_descriptor_path_path(cls, project, metric_descriptor_path):
"""Returns a fully-qualified metric_descriptor_path resource name string."""
return cls._METRIC_DESCRIPTOR_PATH_PATH_TEMPLATE.render({
'project': project,
'metric_descriptor_path': metric_descriptor_path,
})
@classmethod
def monitored_resource_descriptor_path(cls, project,
monitored_resource_descriptor):
"""Returns a fully-qualified monitored_resource_descriptor resource name string."""
return cls._MONITORED_RESOURCE_DESCRIPTOR_PATH_TEMPLATE.render({
'project': project,
'monitored_resource_descriptor': monitored_resource_descriptor,
})
@classmethod
def match_project_from_project_name(cls, project_name):
"""Parses the project from a project resource.
Args:
project_name (string): A fully-qualified path representing a project
resource.
Returns:
A string representing the project.
"""
return cls._PROJECT_PATH_TEMPLATE.match(project_name).get('project')
@classmethod
def match_project_from_metric_descriptor_path_name(
cls, metric_descriptor_path_name):
"""Parses the project from a metric_descriptor_path resource.
Args:
metric_descriptor_path_name (string): A fully-qualified path representing a metric_descriptor_path
resource.
Returns:
A string representing the project.
"""
return cls._METRIC_DESCRIPTOR_PATH_PATH_TEMPLATE.match(
metric_descriptor_path_name).get('project')
@classmethod
def match_metric_descriptor_path_from_metric_descriptor_path_name(
cls, metric_descriptor_path_name):
"""Parses the metric_descriptor_path from a metric_descriptor_path resource.
Args:
metric_descriptor_path_name (string): A fully-qualified path representing a metric_descriptor_path
resource.
Returns:
A string representing the metric_descriptor_path.
"""
return cls._METRIC_DESCRIPTOR_PATH_PATH_TEMPLATE.match(
metric_descriptor_path_name).get('metric_descriptor_path')
@classmethod
def match_project_from_monitored_resource_descriptor_name(
cls, monitored_resource_descriptor_name):
"""Parses the project from a monitored_resource_descriptor resource.
Args:
monitored_resource_descriptor_name (string): A fully-qualified path representing a monitored_resource_descriptor
resource.
Returns:
A string representing the project.
"""
return cls._MONITORED_RESOURCE_DESCRIPTOR_PATH_TEMPLATE.match(
monitored_resource_descriptor_name).get('project')
@classmethod
def match_monitored_resource_descriptor_from_monitored_resource_descriptor_name(
cls, monitored_resource_descriptor_name):
"""Parses the monitored_resource_descriptor from a monitored_resource_descriptor resource.
Args:
monitored_resource_descriptor_name (string): A fully-qualified path representing a monitored_resource_descriptor
resource.
Returns:
A string representing the monitored_resource_descriptor.
"""
return cls._MONITORED_RESOURCE_DESCRIPTOR_PATH_TEMPLATE.match(
monitored_resource_descriptor_name).get(
'monitored_resource_descriptor')
def __init__(self,
service_path=SERVICE_ADDRESS,
port=DEFAULT_SERVICE_PORT,
channel=None,
metadata_transformer=None,
ssl_creds=None,
scopes=None,
client_config=None,
app_name='gax',
app_version=_GAX_VERSION):
"""Constructor.
Args:
service_path (string): The domain name of the API remote host.
port (int): The port on which to connect to the remote host.
channel (:class:`grpc.Channel`): A ``Channel`` instance through
which to make calls.
ssl_creds (:class:`grpc.ChannelCredentials`): A
``ChannelCredentials`` instance for use with an SSL-enabled
channel.
client_config (dict):
A dictionary for call options for each method. See
:func:`google.gax.construct_settings` for the structure of
this data. Falls back to the default config if not specified
or the specified config is missing data points.
metadata_transformer (Callable[[], list]): A function that creates
the metadata for requests.
app_name (string): The codename of the calling service.
app_version (string): The version of the calling service.
Returns:
A MetricServiceApi object.
"""
if scopes is None:
scopes = self._ALL_SCOPES
if client_config is None:
client_config = {}
goog_api_client = '{}/{} {} gax/{} python/{}'.format(
app_name, app_version, self._CODE_GEN_NAME_VERSION,
self._GAX_VERSION, platform.python_version())
metadata = [('x-goog-api-client', goog_api_client)]
default_client_config = json.loads(
pkg_resources.resource_string(
__name__, 'metric_service_client_config.json').decode())
defaults = api_callable.construct_settings(
'google.monitoring.v3.MetricService',
default_client_config,
client_config,
config.STATUS_CODE_NAMES,
kwargs={'metadata': metadata},
page_descriptors=self._PAGE_DESCRIPTORS)
self.metric_service_stub = config.create_stub(
metric_service_pb2.MetricServiceStub,
service_path,
port,
ssl_creds=ssl_creds,
channel=channel,
metadata_transformer=metadata_transformer,
scopes=scopes)
self._list_monitored_resource_descriptors = api_callable.create_api_call(
self.metric_service_stub.ListMonitoredResourceDescriptors,
settings=defaults['list_monitored_resource_descriptors'])
self._get_monitored_resource_descriptor = api_callable.create_api_call(
self.metric_service_stub.GetMonitoredResourceDescriptor,
settings=defaults['get_monitored_resource_descriptor'])
self._list_metric_descriptors = api_callable.create_api_call(
self.metric_service_stub.ListMetricDescriptors,
settings=defaults['list_metric_descriptors'])
self._get_metric_descriptor = api_callable.create_api_call(
self.metric_service_stub.GetMetricDescriptor,
settings=defaults['get_metric_descriptor'])
self._create_metric_descriptor = api_callable.create_api_call(
self.metric_service_stub.CreateMetricDescriptor,
settings=defaults['create_metric_descriptor'])
self._delete_metric_descriptor = api_callable.create_api_call(
self.metric_service_stub.DeleteMetricDescriptor,
settings=defaults['delete_metric_descriptor'])
self._list_time_series = api_callable.create_api_call(
self.metric_service_stub.ListTimeSeries,
settings=defaults['list_time_series'])
self._create_time_series = api_callable.create_api_call(
self.metric_service_stub.CreateTimeSeries,
settings=defaults['create_time_series'])
# Service calls
def list_monitored_resource_descriptors(self,
name,
filter_='',
page_size=0,
options=None):
"""
Lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account.
Example:
>>> from google.cloud.gapic.monitoring.v3 import metric_service_api
>>> from google.gax import CallOptions, INITIAL_PAGE
>>> api = metric_service_api.MetricServiceApi()
>>> name = api.project_path('[PROJECT]')
>>>
>>> # Iterate over all results
>>> for element in api.list_monitored_resource_descriptors(name):
>>> # process element
>>> pass
>>>
>>> # Or iterate over results one page at a time
>>> for page in api.list_monitored_resource_descriptors(name, options=CallOptions(page_token=INITIAL_PAGE)):
>>> for element in page:
>>> # process element
>>> pass
Args:
name (string): The project on which to execute the request. The format is
``\"projects/{project_id_or_number}\"``.
filter_ (string): An optional `filter <https://cloud.google.com/monitoring/api/v3/filters>`_ describing
the descriptors to be returned. The filter can reference
the descriptor's type and labels. For example, the
following filter returns only Google Compute Engine descriptors
that have an ``id`` label:
resource.type = starts_with(\"gce_\") AND resource.label:id
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.gax.PageIterator` instance. By default, this
is an iterable of :class:`google.api.monitored_resource_pb2.MonitoredResourceDescriptor` instances.
This object can also be configured to iterate over the pages
of the response through the `CallOptions` parameter.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
request = metric_service_pb2.ListMonitoredResourceDescriptorsRequest(
name=name, filter=filter_, page_size=page_size)
return self._list_monitored_resource_descriptors(request, options)
def get_monitored_resource_descriptor(self, name, options=None):
"""
Gets a single monitored resource descriptor. This method does not require a Stackdriver account.
Example:
>>> from google.cloud.gapic.monitoring.v3 import metric_service_api
>>> api = metric_service_api.MetricServiceApi()
>>> name = api.monitored_resource_descriptor_path('[PROJECT]', '[MONITORED_RESOURCE_DESCRIPTOR]')
>>> response = api.get_monitored_resource_descriptor(name)
Args:
name (string): The monitored resource descriptor to get. The format is
``\"projects/{project_id_or_number}/monitoredResourceDescriptors/{resource_type}\"``.
The ``{resource_type}`` is a predefined type, such as
``cloudsql_database``.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.api.monitored_resource_pb2.MonitoredResourceDescriptor` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
request = metric_service_pb2.GetMonitoredResourceDescriptorRequest(
name=name)
return self._get_monitored_resource_descriptor(request, options)
def list_metric_descriptors(self,
name,
filter_='',
page_size=0,
options=None):
"""
Lists metric descriptors that match a filter. This method does not require a Stackdriver account.
Example:
>>> from google.cloud.gapic.monitoring.v3 import metric_service_api
>>> from google.gax import CallOptions, INITIAL_PAGE
>>> api = metric_service_api.MetricServiceApi()
>>> name = api.project_path('[PROJECT]')
>>>
>>> # Iterate over all results
>>> for element in api.list_metric_descriptors(name):
>>> # process element
>>> pass
>>>
>>> # Or iterate over results one page at a time
>>> for page in api.list_metric_descriptors(name, options=CallOptions(page_token=INITIAL_PAGE)):
>>> for element in page:
>>> # process element
>>> pass
Args:
name (string): The project on which to execute the request. The format is
``\"projects/{project_id_or_number}\"``.
filter_ (string): If this field is empty, all custom and
system-defined metric descriptors are returned.
Otherwise, the `filter <https://cloud.google.com/monitoring/api/v3/filters>`_
specifies which metric descriptors are to be
returned. For example, the following filter matches all
`custom metrics <https://cloud.google.com/monitoring/custom-metrics>`_:
metric.type = starts_with(\"custom.googleapis.com/\")
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.gax.PageIterator` instance. By default, this
is an iterable of :class:`google.api.metric_pb2.MetricDescriptor` instances.
This object can also be configured to iterate over the pages
of the response through the `CallOptions` parameter.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
request = metric_service_pb2.ListMetricDescriptorsRequest(
name=name, filter=filter_, page_size=page_size)
return self._list_metric_descriptors(request, options)
def get_metric_descriptor(self, name, options=None):
"""
Gets a single metric descriptor. This method does not require a Stackdriver account.
Example:
>>> from google.cloud.gapic.monitoring.v3 import metric_service_api
>>> api = metric_service_api.MetricServiceApi()
>>> name = api.metric_descriptor_path_path('[PROJECT]', '[METRIC_DESCRIPTOR_PATH]')
>>> response = api.get_metric_descriptor(name)
Args:
name (string): The metric descriptor on which to execute the request. The format is
``\"projects/{project_id_or_number}/metricDescriptors/{metric_id}\"``.
An example value of ``{metric_id}`` is
``\"compute.googleapis.com/instance/disk/read_bytes_count\"``.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.api.metric_pb2.MetricDescriptor` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
request = metric_service_pb2.GetMetricDescriptorRequest(name=name)
return self._get_metric_descriptor(request, options)
def create_metric_descriptor(self, name, metric_descriptor, options=None):
"""
Creates a new metric descriptor.
User-created metric descriptors define
`custom metrics <https://cloud.google.com/monitoring/custom-metrics>`_.
Example:
>>> from google.cloud.gapic.monitoring.v3 import metric_service_api
>>> from google.api import metric_pb2 as api_metric_pb2
>>> api = metric_service_api.MetricServiceApi()
>>> name = api.project_path('[PROJECT]')
>>> metric_descriptor = api_metric_pb2.MetricDescriptor()
>>> response = api.create_metric_descriptor(name, metric_descriptor)
Args:
name (string): The project on which to execute the request. The format is
``\"projects/{project_id_or_number}\"``.
metric_descriptor (:class:`google.api.metric_pb2.MetricDescriptor`): The new `custom metric <https://cloud.google.com/monitoring/custom-metrics>`_
descriptor.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.api.metric_pb2.MetricDescriptor` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
request = metric_service_pb2.CreateMetricDescriptorRequest(
name=name, metric_descriptor=metric_descriptor)
return self._create_metric_descriptor(request, options)
def delete_metric_descriptor(self, name, options=None):
"""
Deletes a metric descriptor. Only user-created
`custom metrics <https://cloud.google.com/monitoring/custom-metrics>`_ can be deleted.
Example:
>>> from google.cloud.gapic.monitoring.v3 import metric_service_api
>>> api = metric_service_api.MetricServiceApi()
>>> name = api.metric_descriptor_path_path('[PROJECT]', '[METRIC_DESCRIPTOR_PATH]')
>>> api.delete_metric_descriptor(name)
Args:
name (string): The metric descriptor on which to execute the request. The format is
``\"projects/{project_id_or_number}/metricDescriptors/{metric_id}\"``.
An example of ``{metric_id}`` is:
``\"custom.googleapis.com/my_test_metric\"``.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
request = metric_service_pb2.DeleteMetricDescriptorRequest(name=name)
self._delete_metric_descriptor(request, options)
def list_time_series(self,
name,
filter_,
interval,
view,
aggregation=None,
order_by='',
page_size=0,
options=None):
"""
Lists time series that match a filter. This method does not require a Stackdriver account.
Example:
>>> from google.cloud.gapic.monitoring.v3 import metric_service_api
>>> from google.cloud.gapic.monitoring.v3 import enums
>>> from google.monitoring.v3 import common_pb2
>>> from google.gax import CallOptions, INITIAL_PAGE
>>> api = metric_service_api.MetricServiceApi()
>>> name = api.project_path('[PROJECT]')
>>> filter_ = ''
>>> interval = common_pb2.TimeInterval()
>>> view = enums.ListTimeSeriesRequest.TimeSeriesView.FULL
>>>
>>> # Iterate over all results
>>> for element in api.list_time_series(name, filter_, interval, view):
>>> # process element
>>> pass
>>>
>>> # Or iterate over results one page at a time
>>> for page in api.list_time_series(name, filter_, interval, view, options=CallOptions(page_token=INITIAL_PAGE)):
>>> for element in page:
>>> # process element
>>> pass
Args:
name (string): The project on which to execute the request. The format is
\"projects/{project_id_or_number}\".
filter_ (string): A `monitoring filter <https://cloud.google.com/monitoring/api/v3/filters>`_ that specifies which time
series should be returned. The filter must specify a single metric type,
and can additionally specify metric labels and other information. For
example:
metric.type = \"compute.googleapis.com/instance/cpu/usage_time\" AND
metric.label.instance_name = \"my-instance-name\"
interval (:class:`google.monitoring.v3.common_pb2.TimeInterval`): The time interval for which results should be returned. Only time series
that contain data points in the specified interval are included
in the response.
aggregation (:class:`google.monitoring.v3.common_pb2.Aggregation`): By default, the raw time series data is returned.
Use this field to combine multiple time series for different
views of the data.
order_by (string): Specifies the order in which the points of the time series should
be returned. By default, results are not ordered. Currently,
this field must be left blank.
view (enum :class:`google.cloud.gapic.monitoring.v3.enums.ListTimeSeriesRequest.TimeSeriesView`): Specifies which information is returned about the time series.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.gax.PageIterator` instance. By default, this
is an iterable of :class:`google.monitoring.v3.metric_pb2.TimeSeries` instances.
This object can also be configured to iterate over the pages
of the response through the `CallOptions` parameter.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
if aggregation is None:
aggregation = common_pb2.Aggregation()
request = metric_service_pb2.ListTimeSeriesRequest(
name=name,
filter=filter_,
interval=interval,
view=view,
aggregation=aggregation,
order_by=order_by,
page_size=page_size)
return self._list_time_series(request, options)
def create_time_series(self, name, time_series, options=None):
"""
Creates or adds data to one or more time series.
The response is empty if all time series in the request were written.
If any time series could not be written, a corresponding failure message is
included in the error response.
Example:
>>> from google.cloud.gapic.monitoring.v3 import metric_service_api
>>> from google.monitoring.v3 import metric_pb2 as v3_metric_pb2
>>> api = metric_service_api.MetricServiceApi()
>>> name = api.project_path('[PROJECT]')
>>> time_series = []
>>> api.create_time_series(name, time_series)
Args:
name (string): The project on which to execute the request. The format is
``\"projects/{project_id_or_number}\"``.
time_series (list[:class:`google.monitoring.v3.metric_pb2.TimeSeries`]): The new data to be added to a list of time series.
Adds at most one data point to each of several time series. The new data
point must be more recent than any other point in its time series. Each
``TimeSeries`` value must fully specify a unique time series by supplying
all label values for the metric and the monitored resource.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
request = metric_service_pb2.CreateTimeSeriesRequest(
name=name, time_series=time_series)
self._create_time_series(request, options)
|
|
#!/usr/bin/python
import os
import pickle
import pygame
import sys
import time
from pygame.locals import *
from itertools import cycle
from random import randint
###############################################################################
Black = (0,0,0)
White = (255,255,255)
Red = (255,0,0)
Green = (0,255,0)
Blue = (0,0,255)
Rainbow = cycle([Red,Green,Blue, (255,255,0),(255,0,255),(0,255,255)])
###############################################################################
###############################################################################
###############################################################################
class Snake():
def __init__(self):
pygame.init()
self.mFrameRate = 20
self.mLineThickness = 10
self.mWindowWidth = 800
self.mWindowHeight = 700
self.mFont = pygame.font.Font('freesansbold.ttf',20)
self.mBigFont = pygame.font.Font('freesansbold.ttf',40)
self.mDisplay = \
pygame.display.set_mode((self.mWindowWidth,self.mWindowHeight))
self.mClock = pygame.time.Clock()
pygame.display.set_caption('Snake')
self.mGameOver = True
#############################################################################
def StartGame(self):
MiddleX, MiddleY = self.mWindowWidth/2, self.mWindowHeight/2
self.mSnakeLength = 3
self.mSnake = [(MiddleX, MiddleY), \
(MiddleX-self.mLineThickness,MiddleY), \
(MiddleX-2*self.mLineThickness,MiddleY)]
self.mSnakeXDirection = 1
self.mSnakeYDirection = 0
self.mFoodPosition = self.GetNextFoodPosition()
self.mTimeSinceLastIncrease = time.time()
self.mTimeSinceLastFood = time.time()
self.mScore = 0
#############################################################################
def IsInsideOfSnake(self, x, y):
for SnakeX, SnakeY in self.mSnake:
SnakeXRange = range(SnakeX,SnakeX + self.mLineThickness)
SnakeYRange = range(SnakeY,SnakeY + self.mLineThickness)
if x in SnakeXRange and y in SnakeYRange:
return True
return False
#############################################################################
def GetHighScoreList(self):
try:
return pickle.load(\
open(os.path.expanduser('~/') + '.SnakeHighScore.pickle','rb'))
except IOError:
return [0,0,0,0,0]
#############################################################################
def SaveHighScores(self):
NewHighScoreList = []
OldHighScores = self.GetHighScoreList()
for HighScore in OldHighScores:
if self.mScore > HighScore:
NewHighScoreList.append(self.mScore)
self.mScore = HighScore
else:
NewHighScoreList.append(HighScore)
pickle.dump( \
NewHighScoreList, \
open(os.path.expanduser('~/') + '.SnakeHighScore.pickle','wb'))
#############################################################################
def GetNextFoodPosition(self):
while True:
x = randint( \
self.mLineThickness, \
(self.mWindowWidth-2*self.mLineThickness)/self.mLineThickness)*self.mLineThickness
y = randint( \
self.mLineThickness, \
(self.mWindowHeight-2*self.mLineThickness)/self.mLineThickness)*self.mLineThickness
self.mFoodColor = self.GetRandomColor()
if not self.IsInsideOfSnake(x, y):
return x, y
#############################################################################
def DrawMenu(self):
NewGameSurface = \
self.mBigFont.render('Press s to start a new game!', True, Red)
NewGameRectangle = NewGameSurface.get_rect()
NewGameRectangle.midbottom = (self.mWindowWidth/2,(self.mWindowHeight/4))
HighScoreSurface = \
self.mBigFont.render('Pess h to see high scores!', True, Blue)
HighScoreRectangle = HighScoreSurface.get_rect()
HighScoreRectangle.midtop = NewGameRectangle.midbottom
EscapeSurface = \
self.mBigFont.render('Pess Esc or Q to exit!', True, Green)
EscapeRectangle = EscapeSurface.get_rect()
EscapeRectangle.midtop = HighScoreRectangle.midbottom
self.mDisplay.blit(NewGameSurface, NewGameRectangle)
self.mDisplay.blit(HighScoreSurface, HighScoreRectangle)
self.mDisplay.blit(EscapeSurface, EscapeRectangle)
#############################################################################
def DrawColorBorder(self):
for xPos in range(self.mWindowWidth/10, 9*self.mWindowWidth/10, self.mLineThickness):
TopRectangle = pygame.Rect(xPos,self.mWindowHeight/10, self.mLineThickness, self.mLineThickness)
BottomRectangle = pygame.Rect(xPos,9*self.mWindowHeight/10, self.mLineThickness, self.mLineThickness)
pygame.draw.rect(self.mDisplay, self.GetRandomColor(),TopRectangle)
pygame.draw.rect(self.mDisplay, self.GetRandomColor(),BottomRectangle)
for yPos in range(self.mWindowHeight/10, 9*self.mWindowHeight/10+self.mLineThickness,self.mLineThickness):
LeftRectangle = pygame.Rect(self.mWindowWidth/10,yPos, self.mLineThickness, self.mLineThickness)
RightRectangle = pygame.Rect(9*self.mWindowWidth/10,yPos, self.mLineThickness, self.mLineThickness)
pygame.draw.rect(self.mDisplay, self.GetRandomColor(),LeftRectangle)
pygame.draw.rect(self.mDisplay, self.GetRandomColor(),RightRectangle)
#############################################################################
def DrawStartScreen(self):
self.DrawBorder()
self.DrawMenu()
self.DrawColorBorder()
pygame.display.update()
#############################################################################
def DrawGame(self):
self.DrawBorder()
self.DrawFood()
self.DrawSnake()
self.DrawScore()
pygame.display.update()
#############################################################################
def DrawBorder(self):
self.mDisplay.fill(Black)
pygame.draw.rect( \
self.mDisplay, \
White,\
((0,0),(self.mWindowWidth,self.mWindowHeight)), \
self.mLineThickness*2)
#############################################################################
def DrawSnake(self):
for x,y in self.mSnake:
Rect = pygame.Rect(x, y, self.mLineThickness, self.mLineThickness)
pygame.draw.rect(self.mDisplay, next(Rainbow), Rect)
##############################################################################
def GetRandomColor(self):
return randint(0,255),randint(0,255),randint(0,255)
##############################################################################
def DrawFood(self):
x,y = self.mFoodPosition
Food = pygame.Rect(x,y, self.mLineThickness, self.mLineThickness)
pygame.draw.rect(self.mDisplay, self.mFoodColor, Food)
##############################################################################
def GetNextHead(self):
x,y = self.mSnake[0]
x = x + self.mSnakeXDirection * self.mLineThickness
y = y + self.mSnakeYDirection * self.mLineThickness
return x, y
##############################################################################
def MoveSnake(self):
if time.time() - self.mTimeSinceLastIncrease > 5:
self.mTimeSinceLastIncrease = time.time()
self.mSnake = [self.GetNextHead()] + self.mSnake
else:
self.mSnake = [self.GetNextHead()] + self.mSnake[:-1]
##############################################################################
def CheckForFoodCollision(self):
if self.IsInsideOfSnake(self.mFoodPosition[0], self.mFoodPosition[1]):
self.mScore += 100 * len(self.mSnake)
self.mTimeSinceLastIncrease =-100
self.mFoodPosition = self.GetNextFoodPosition()
##############################################################################
def DisplayGameOver(self):
GameOverSurface = self.mBigFont.render('GAME OVER', True, Red)
GameOverRectangle = GameOverSurface.get_rect()
GameOverRectangle.midbottom = (self.mWindowWidth/2,self.mWindowHeight/2)
self.mDisplay.blit(GameOverSurface, GameOverRectangle)
ScoreSurface = self.mBigFont.render('Score = %s' %(self.mScore), True, Red)
ScoreRectangle = ScoreSurface.get_rect()
ScoreRectangle.midtop = GameOverRectangle.midbottom
self.mDisplay.blit(ScoreSurface, ScoreRectangle)
pygame.display.update()
time.sleep(3)
##############################################################################
def Fail(self):
self.DisplayGameOver()
self.SaveHighScores()
self.mGameOver = True
##############################################################################
def CheckForSnakeCollision(self):
if len(set(self.mSnake)) < len(self.mSnake):
self.Fail()
##############################################################################
def CheckForWallCollision(self):
x, y = self.mSnake[0]
if x <= 0 or x + self.mLineThickness >= self.mWindowWidth:
self.Fail()
elif y <= 0 or y + self.mLineThickness >= self.mWindowHeight:
self.Fail()
##############################################################################
def HandleKeyPress(self, Event):
if Event.key == K_DOWN:
if self.mSnakeYDirection == 0:
self.mSnakeXDirection = 0
self.mSnakeYDirection = 1
elif Event.key == K_UP:
if self.mSnakeYDirection == 0:
self.mSnakeXDirection = 0
self.mSnakeYDirection = -1
elif Event.key == K_RIGHT:
if self.mSnakeXDirection == 0:
self.mSnakeXDirection = 1
self.mSnakeYDirection = 0
elif Event.key == K_LEFT:
if self.mSnakeXDirection == 0:
self.mSnakeXDirection = -1
self.mSnakeYDirection = 0
elif Event.key == K_ESCAPE or Event.key == 113:
pygame.quit()
exit()
elif Event.key == 115:
self.StartGame()
self.mGameOver = False
elif Event.key == 104:
self.DrawHighScoreScreen()
##############################################################################
def DrawHighScore(self, Place, Score):
ScoreSurface = self.mFont.render('%s. %s' %(Place +1, Score), True, next(Rainbow))
ScoreRectangle = ScoreSurface.get_rect()
ScoreRectangle.midbottom = \
(self.mWindowWidth/2,self.mWindowHeight/4+ Place*self.mWindowHeight/8)
self.mDisplay.blit(ScoreSurface,ScoreRectangle)
pygame.display.update()
##############################################################################
def DrawHighScoreScreen(self):
self.DrawBorder()
for Place, Score in enumerate(self.GetHighScoreList()):
delay = time.time()
while time.time() -delay < 1:
self.DrawColorBorder()
self.DrawHighScore(Place,Score)
delay = time.time()
while time.time() -delay < 3:
self.DrawColorBorder()
pygame.display.update()
##############################################################################
def DrawScore(self):
Surface = self.mFont.render('Score = %s' %(self.mScore), True, Green)
Rectangle = Surface.get_rect()
Rectangle.topleft = (self.mLineThickness*3, self.mLineThickness*2)
self.mDisplay.blit(Surface,Rectangle)
##############################################################################
def CheckIfStuck(self):
if time.time() - self.mTimeSinceLastIncrease > 10:
self.mFoodPosition = self.GetNextFoodPosition()
##############################################################################
def Run(self):
while True:
try:
for Event in pygame.event.get():
if Event.type == QUIT:
pygame.quit()
exit()
elif Event.type == KEYDOWN:
self.HandleKeyPress(Event)
if self.mGameOver:
self.DrawStartScreen()
else:
self.DrawGame()
self.mClock.tick(self.mFrameRate)
self.MoveSnake()
self.CheckForFoodCollision()
self.CheckForWallCollision()
self.CheckForSnakeCollision()
self.CheckIfStuck()
except KeyboardInterrupt:
pygame.quit()
exit()
pygame.quit()
exit()
################################################################################
################################################################################
if __name__ == '__main__':
SnakeGame = Snake()
SnakeGame.Run()
|
|
bl_info = {
"name": "RenderWare importer/exporter for GTA III/VC/SA (.dff)",
"author": "Ago Allikmaa (maxorator)",
"version": (0, 9, 2),
"blender": (2, 6, 3),
"location": "File > Import-Export > Renderware (.dff) ",
"description": "RenderWare importer/exporter for GTA III/VC/SA",
"category": "Import-Export" }
import struct
import os
import zlib
import base64
from collections import deque
import bpy
import math
import mathutils
from bpy.props import *
class RwTypes():
ANY = -1
STRUCT = 0x0001
STRING = 0x0002
EXTENSION = 0x0003
TEXTURE = 0x0006
MATERIAL = 0x0007
MATERIALLIST = 0x0008
FRAMELIST = 0x000E
GEOMETRY = 0x000F
CLUMP = 0x0010
ATOMIC = 0x0014
GEOMETRYLIST = 0x001A
RENDERRIGHTS = 0x001F
MORPHPLG = 0x0105
SKINPLG = 0x116
HANIMPLG = 0x11E
MATEFFECTS = 0x0120
BINMESHPLG = 0x050E
FRAMENAME = 0x253F2FE
COLLISION = 0x253F2FA
MATSPECULAR = 0x253F2F6
NIGHTCOLS = 0x253F2F9
MATREFLECTION = 0x253F2FC
MESHEXTENSION = 0x253F2FD
def decodeVersion(version):
if (version & 0xFFFF0000) == 0:
return version << 8
else:
p1 = ((version >> 14) & 0x3FF00) + 0x30000
p2 = (version >> 16) & 0x3F
return p1 | p2
class RpGeomFlag:
TRISTRIP = 0x0001
POSITIONS = 0x0002
TEXTURED = 0x0004
PRELIT = 0x0008
NORMALS = 0x0010
LIGHT = 0x0020
MODULATEMATERIALCOLOR = 0x0040
TEXTURED2 = 0x0080
class ImportRenderware:
class RwTriangle:
def __init__(self, verts, mat):
self.verts = verts
self.mat = mat
def desc(self):
return (self.verts[0], self.verts[1], self.verts[2])
class RwVertex:
def __init__(self, coords, normal):
self.coords = coords
self.normal = normal
self.uv = None
self.uv_env = None
def desc(self):
return (self.coords[0], self.coords[1], self.coords[2])
class RwFrame:
def __init__(self, loader, index, rot, pos, parent):
self.loader = loader
self.index = index
self.geometry = None
self.atomic = None
self.blobj = None
self.bldata = None
self.hanimdata = None
self.name = None
rmatrix = mathutils.Matrix.Identity(3)
rmatrix[0] = rot[0], rot[1], rot[2]
rmatrix[1] = rot[3], rot[4], rot[5]
rmatrix[2] = rot[6], rot[7], rot[8]
rmatrix.resize_4x4()
rmatrix.translation = pos[0], pos[1], pos[2]
self.matrix = rmatrix
self.parent = parent
self.loader.childrenOf[parent+1].append(self.index)
def setAtomic(self, atomic):
self.atomic = atomic
self.geometry = atomic.geometry
def build(self):
if self.name is None:
self.name = "noname_" + str(self.index);
if self.geometry:
self.bldata = self.geometry.build(self.name)
self.blobj = bpy.data.objects.new(self.name, self.bldata)
if self.parent >= 0:
self.blobj.parent = self.loader.frames[self.parent].blobj
self.blobj.matrix_local = self.matrix
bpy.context.scene.objects.link(self.blobj)
for frame in self.loader.childrenOf[self.index+1]:
self.loader.frames[frame].build()
if "_vlo" in self.name or "_dam" in self.name:
self.blobj.hide = True
self.blobj.hide_render = True
if self.loader.colhex and self.index == self.loader.childrenOf[0][0]:
textobj = bpy.data.texts.new(name = ("zrwcoll_" + self.name))
textobj.from_string(self.loader.colhex)
self.blobj.collhex = textobj.name
if self.hanimdata:
textobj = bpy.data.texts.new(name = ("zrwhanim" + str(self.index) + "_" + self.name))
textobj.from_string(self.hanimdata)
self.blobj.rw_hanimdata = textobj.name
if self.geometry and self.geometry.skindata:
textobj = bpy.data.texts.new(name = ("zrwskin_" + self.name))
textobj.from_string(self.geometry.skindata)
self.blobj.rw_skindata = textobj.name
if self.atomic and self.atomic.renderPlugin != None and self.atomic.renderExtra != None:
self.blobj.renderright = self.atomic.renderPlugin
self.blobj.renderextra = self.atomic.renderExtra
if self.atomic and self.atomic.matfxpipe:
self.blobj.matfxpipe = True
class RpGeometry:
def __init__(self, loader, index):
self.loader = loader
self.index = index
self.vertices = []
self.triangles = []
self.materials = []
self.mesh = None
self.atomic = None
self.skindata = None
self.hasEnvUV = False
self.vertCol = None
self.nightVertCol = None
self.hasNormals = False
def setAtomic(self, atomic):
self.atomic = atomic
def addMaterial(self, material):
material.setIndex(len(self.materials))
self.materials.append(material)
def addVertex(self, vertex):
self.vertices.append(vertex)
def addTriangle(self, triangle):
self.triangles.append(triangle)
def build(self, name):
self.mesh = bpy.data.meshes.new(name)
pyverts = []
pypolys = []
for vertex in self.vertices:
pyverts.append(vertex.desc())
for triangle in self.triangles:
pypolys.append(triangle.desc())
self.mesh.from_pydata(pyverts, [], pypolys)
self.mesh.update()
if self.vertCol:
vcol = self.mesh.vertex_colors.new("Normal")
self.mesh.vertex_colors.active = vcol
for i in range(len(self.vertices)):
vcol.data[i].color = (self.vertCol[i][0], self.vertCol[i][1], self.vertCol[i][2])
if self.nightVertCol:
nvcol = self.mesh.vertex_colors.new("Night")
self.mesh.vertex_colors.active = nvcol
for i in range(len(self.vertices)):
nvcol.data[i].color = (self.nightVertCol[i][0], self.nightVertCol[i][1], self.nightVertCol[i][2])
uvtexture = self.mesh.uv_textures.new()
uvtexture.name = "MainUV"
uvlayer = self.mesh.uv_layers[-1]
for i in range(len(self.triangles)):
for j in range(3):
uvlayer.data[3*i + j].uv = self.vertices[self.triangles[i].verts[j]].uv
if self.hasEnvUV:
euvtexture = self.mesh.uv_textures.new()
euvtexture.name = "EnvUV"
euvlayer = self.mesh.uv_layers[-1]
for i in range(len(self.triangles)):
for j in range(3):
euvlayer.data[3*i + j].uv = self.vertices[self.triangles[i].verts[j]].uv_env
for material in self.materials:
material.build()
for i in range(len(self.triangles)):
self.mesh.polygons[i].material_index = self.triangles[i].mat
return self.mesh
class RpMaterial:
def __init__(self, geometry, flags=None, col=None, textured=None, ambient=None, specular=None, diffuse=None):
self.index = None
self.name = "g" + str(geometry.index) + "m"
self.geometry = geometry
self.flags = flags
self.col = col
self.ambient = ambient
self.specular = specular
self.diffuse = diffuse
self.textured = textured
self.texture = None
self.blmat = None
self.envtex = None
self.readenvmap = False
self.envIntensity = 1
self.reflectColour = None
self.reflectIntensity = None
self.spectex = None
def setIndex(self, index):
self.index = index
self.name = "g" + str(self.geometry.index) + "m" + str(index)
def setTexture(self, texture):
self.texture = texture
def setEnvTexture(self, texture):
self.envtex = texture
def setSpecTexture(self, texture):
self.spectex = texture
def setReflection(self, colour, intensity):
self.reflectColour = colour
self.reflectIntensity = intensity
def build(self):
self.blmat = bpy.data.materials.new(self.name)
self.blmat.diffuse_color = (self.col[0]/255, self.col[1]/255, self.col[2]/255)
self.blmat.diffuse_intensity = self.diffuse
self.blmat.ambient = self.ambient
self.blmat.specular_intensity = self.specular
if self.geometry.vertCol:
self.blmat.use_vertex_color_light = True
if self.col[3] < 255:
self.blmat.use_transparency = True
self.blmat.alpha = self.col[3]/255
if self.envtex:
self.envtex.build()
if self.spectex:
self.spectex.build()
if self.texture:
self.texture.build()
self.blmat.active_texture_index = 0
if self.reflectColour and self.reflectIntensity:
self.blmat.mirror_color = self.reflectColour
self.blmat.raytrace_mirror.use = True
self.blmat.raytrace_mirror.reflect_factor = self.reflectIntensity
self.geometry.mesh.materials.append(self.blmat)
class RwTexture:
def __init__(self, loader, material, name, texType, intensity=1):
self.material = material
self.bltex = None
self.bltexslot = None
self.name = name
self.loader = loader
self.texType = texType
self.intensity = intensity
def build(self):
if self.texType == 1 and self.name in self.loader.envtexpool:
self.bltex = self.loader.envtexpool[self.name]
elif self.texType != 1 and self.name in self.loader.texpool:
self.bltex = self.loader.texpool[self.name]
else:
if self.texType == 1:
self.bltex = bpy.data.textures.new(self.name, "ENVIRONMENT_MAP")
self.bltex.__class__ = bpy.types.EnvironmentMapTexture
self.bltex.environment_map.source = "IMAGE_FILE"
self.loader.envtexpool[self.name] = self.bltex
else:
self.bltex = bpy.data.textures.new(self.name, "IMAGE")
self.bltex.__class__ = bpy.types.ImageTexture
self.loader.texpool[self.name] = self.bltex
imgfile = self.loader.filename + "_tex\\" + self.name + ".png"
if os.path.isfile(imgfile):
self.bltex.image = bpy.data.images.load(imgfile)
self.bltexslot = self.material.blmat.texture_slots.create(self.texType)
self.bltexslot.texture_coords = "UV"
self.bltexslot.texture = self.bltex
if (self.texType == 1 or self.texType == 2) and self.material.geometry.hasEnvUV:
self.bltexslot.uv_layer = "EnvUV"
else:
self.bltexslot.uv_layer = "MainUV"
if self.texType == 1:
self.bltexslot.diffuse_factor = self.intensity
elif self.texType == 2:
self.bltexslot.use_map_diffuse = False
self.bltexslot.use_map_color_diffuse = False
self.bltexslot.use_map_color_spec = True
self.bltexslot.specular_color_factor = self.intensity
class RpAtomic:
def __init__(self, loader, frame, geometry, flags):
self.loader = loader
self.frame = frame
self.geometry = geometry
self.flags = flags
self.renderPlugin = None
self.renderExtra = None
self.matfxpipe = False
frame.setAtomic(self)
geometry.setAtomic(self)
def setRenderRights(self, plugin, extra):
self.renderPlugin = plugin
self.renderExtra = extra
def __init__(self, filename):
self.filename = filename
self.texpool = {}
self.envtexpool = {}
self.colhex = None
self.childrenOf = None
self.frames = []
self.geoms = []
self.f = open(filename, "rb")
self.readSection(RwTypes.CLUMP)
self.f.close()
for frame in self.childrenOf[0]:
self.frames[frame].build()
def writeDebug(self, text):
g = open(self.filename + ".txt", "a")
g.write(text + "\n")
g.close()
def readFormat(self, format):
return struct.unpack(format, self.f.read(struct.calcsize(format)))
def readSlice(self, format, slice):
size = struct.calcsize(format)
if(len(slice) < size):
raise Exception("Failed to read slice, buffer is too small.")
return struct.unpack(format, slice[:size]), slice[size:]
def readSection(self, type, extra = None):
header = self.readFormat("III")
header = (header[0], header[1], RwTypes.decodeVersion(header[2]))
if type >= 0 and header[0] != type:
raise Exception("Expected type " + str(type) + ", found " + str(header[0]))
curPos = self.f.tell()
res = None
if header[0] == RwTypes.STRUCT: res = self.readSectionStruct(header)
elif header[0] == RwTypes.STRING: res = self.readSectionString(header)
elif header[0] == RwTypes.EXTENSION: res = self.readSectionExtension(header, extra)
elif header[0] == RwTypes.TEXTURE: res = self.readSectionTexture(header, extra)
elif header[0] == RwTypes.MATERIAL: res = self.readSectionMaterial(header, extra)
elif header[0] == RwTypes.MATERIALLIST: res = self.readSectionMaterialList(header, extra)
elif header[0] == RwTypes.FRAMELIST: res = self.readSectionFrameList(header)
elif header[0] == RwTypes.GEOMETRY: res = self.readSectionGeometry(header, extra)
elif header[0] == RwTypes.CLUMP: res = self.readSectionClump(header)
elif header[0] == RwTypes.ATOMIC: res = self.readSectionAtomic(header)
elif header[0] == RwTypes.GEOMETRYLIST: res = self.readSectionGeometryList(header)
elif header[0] == RwTypes.MORPHPLG: res = self.readSectionMorphPLG(header, extra)
elif header[0] == RwTypes.BINMESHPLG: res = self.readSectionBinMeshPLG(header, extra)
elif header[0] == RwTypes.FRAMENAME: res = self.readSectionFrameName(header, extra)
elif header[0] == RwTypes.COLLISION: res = self.readSectionCollision(header, extra)
elif header[0] == RwTypes.MATEFFECTS: res = self.readSectionMatEffects(header, extra)
elif header[0] == RwTypes.MATSPECULAR: res = self.readSectionMatSpecular(header, extra)
elif header[0] == RwTypes.MATREFLECTION: res = self.readSectionMatReflection(header, extra)
elif header[0] == RwTypes.MESHEXTENSION: res = self.readSectionMeshExtension(header, extra)
elif header[0] == RwTypes.RENDERRIGHTS: res = self.readSectionRenderRights(header, extra)
elif header[0] == RwTypes.HANIMPLG: res = self.readSectionHAnimPLG(header, extra)
elif header[0] == RwTypes.SKINPLG: res = self.readSectionSkinPLG(header, extra)
elif header[0] == RwTypes.NIGHTCOLS: res = self.readSectionNightCols(header, extra)
elif type >= 0: raise Exception("Missing read function for section type " + str(type))
else: print("Ignoring extension data of type " + hex(header[0]))
self.f.seek(curPos + header[1])
return res
def readSectionStruct(self, header):
return header, self.f.read(header[1])
def readSectionString(self, header):
byteList = b""
for i in range(header[1]):
newByte = self.f.read(1)
if newByte[0] == 0:
break
byteList += newByte
return header, byteList.decode("ascii")
def readSectionExtension(self, header, extra):
endPos = self.f.tell() + header[1]
while self.f.tell() < endPos:
self.readSection(RwTypes.ANY, extra)
return header, None
def readSectionTexture(self, header, material):
metaHeader, slice = self.readSection(RwTypes.STRUCT)
(flags, x), slice = self.readSlice("HH", slice)
x, texName = self.readSection(RwTypes.STRING)
x, alphaName = self.readSection(RwTypes.STRING)
if material.readenvmap:
texture = self.RwTexture(self, material, texName, 1, material.envIntensity)
material.setEnvTexture(texture)
else:
texture = self.RwTexture(self, material, texName, 0, 1)
material.setTexture(texture)
self.readSection(RwTypes.EXTENSION)
return header, None
def readSectionMaterial(self, header, geometry):
metaHeader, slice = self.readSection(RwTypes.STRUCT)
(flags,), slice = self.readSlice("I", slice)
col, slice = self.readSlice("BBBB", slice)
(x, textured, ambient, specular, diffuse), slice = self.readSlice("iifff", slice)
material = self.RpMaterial(geometry, flags, col, textured, ambient, specular, diffuse)
geometry.addMaterial(material)
if textured > 0:
self.readSection(RwTypes.TEXTURE, material)
self.readSection(RwTypes.EXTENSION, material)
return header, None
def readSectionMaterialList(self, header, geometry):
metaHeader, slice = self.readSection(RwTypes.STRUCT)
(matCount,), slice = self.readSlice("i", slice)
for i in range(matCount):
junk, slice = self.readSlice("i", slice)
for i in range(matCount):
self.readSection(RwTypes.MATERIAL, geometry)
return header, None
def readSectionFrameList(self, header):
metaHeader, slice = self.readSection(RwTypes.STRUCT)
(frameCount,), slice = self.readSlice("i", slice)
self.childrenOf = []
for i in range(frameCount+1):
self.childrenOf.append([])
for i in range(frameCount):
rot, slice = self.readSlice("fffffffff", slice)
pos, slice = self.readSlice("fff", slice)
(parent, flags), slice = self.readSlice("ii", slice)
self.frames.append(self.RwFrame(self, i, rot, pos, parent))
for i in range(frameCount):
self.readSection(RwTypes.EXTENSION, self.frames[i])
return header, None
def readSectionGeometry(self, header, index):
metaHeader, slice = self.readSection(RwTypes.STRUCT)
(flags, texCount, triCount, vertCount, morphCount), slice = self.readSlice("HHiii", slice)
geometry = self.RpGeometry(self, index)
self.geoms.append(geometry)
geometry.flags = flags
if metaHeader[2] < 0x34001:
(surfAmbient, surfSpecular, surfDiffuse), slice = self.readSlice("fff", slice)
for i in range(vertCount):
geometry.addVertex(self.RwVertex(None, None))
if flags & RpGeomFlag.PRELIT:
geometry.vertCol = []
for i in range(vertCount):
(vcr, vcg, vcb, vca), slice = self.readSlice("BBBB", slice)
geometry.vertCol.append((vcr / 255, vcg / 255, vcb / 255))
for i in range(vertCount):
uv, slice = self.readSlice("ff", slice)
geometry.vertices[i].uv = (uv[0], 1-uv[1])
if texCount > 1:
geometry.hasEnvUV = True
for i in range(vertCount):
uv_env, slice = self.readSlice("ff", slice)
geometry.vertices[i].uv_env = (uv_env[0], 1-uv_env[1])
if texCount > 2:
slice = slice[struct.calcsize("ff")*(texCount-2)*(vertCount):]
for i in range(triCount):
(c, b, mat, a), slice = self.readSlice("HHHH", slice)
if a >= vertCount or b >= vertCount or c >= vertCount:
raise Exception("Vertex indices out of range for triangle.")
geometry.addTriangle(self.RwTriangle((a, b, c), mat))
if morphCount is not 1:
raise Exception("Multiple frames not supported")
for i in range(morphCount):
(bx, by, bz, br, hasVerts, hasNormals), slice = self.readSlice("ffffii", slice)
if hasVerts > 0:
for j in range(vertCount):
coords, slice = self.readSlice("fff", slice)
geometry.vertices[j].coords = coords
if hasNormals > 0:
geometry.hasNormals = True
for j in range(vertCount):
normal, slice = self.readSlice("fff", slice)
geometry.vertices[j].normal = normal
self.readSection(RwTypes.MATERIALLIST, geometry)
self.readSection(RwTypes.EXTENSION, geometry)
return header, None
def readSectionClump(self, header):
metaHeader, slice = self.readSection(RwTypes.STRUCT)
(atomicCount,), slice = self.readSlice("i", slice)
if metaHeader[2] > 0x33000:
(lightCount, cameraCount), slice = self.readSlice("ii", slice)
self.readSection(RwTypes.FRAMELIST)
self.readSection(RwTypes.GEOMETRYLIST)
for i in range(atomicCount):
self.readSection(RwTypes.ATOMIC)
self.readSection(RwTypes.EXTENSION)
return header, None
def readSectionAtomic(self, header):
metaHeader, slice = self.readSection(RwTypes.STRUCT)
(frameIndex, geomIndex, flags, x, x, x, x), slice = self.readSlice("iiBBBBi", slice)
atomic = self.RpAtomic(self, self.frames[frameIndex], self.geoms[geomIndex], flags)
self.readSection(RwTypes.EXTENSION, atomic)
return header, None
def readSectionGeometryList(self, header):
metaHeader, slice = self.readSection(RwTypes.STRUCT)
(geomCount,), slice = self.readSlice("i", slice)
for i in range(geomCount):
self.readSection(RwTypes.GEOMETRY, i)
def readSectionMorphPLG(self, header, geometry):
return header, None
def readSectionBinMeshPLG(self, header, geometry):
slice = self.f.read(header[1])
(type, splits, total), slice = self.readSlice("iii", slice)
if type != 0 and type != 1:
print("Morph PLG section in unknown type - ignoring.")
return header, None
lookup = {}
for i in range(len(geometry.triangles)):
v = geometry.triangles[i].verts
v = list(v)
v.sort()
lookup[tuple(v)] = i
totals = 0
for i in range(splits):
(sub, mat), slice = self.readSlice("ii", slice)
if type == 0:
for j in range(sub//3):
vx, slice = self.readSlice("iii", slice)
vx = list(vx)
vx.sort()
vx = tuple(vx)
if vx in lookup:
geometry.triangles[lookup[vx]].mat = mat
else:
elems = deque()
for j in range(sub):
if len(elems) > 2:
elems.popleft()
(item,), slice = self.readSlice("i", slice)
if len(elems) > 1:
checklist = [elems[0], elems[1], item]
checklist.sort()
check = tuple(checklist)
if check in lookup:
geometry.triangles[lookup[check]].mat = mat
elems.append(item)
return header, None
def readSectionFrameName(self, header, frame):
frame.name = self.f.read(header[1]).decode("ascii")
return header, None
def readSectionCollision(self, header, geometry):
if not self.childrenOf or len(self.childrenOf[0]) is 0:
print("Collision extension - no frame to attach to.")
return header, None
binary = self.f.read(header[1])
self.colhex = base64.b64encode(zlib.compress(binary)).decode("ascii")
return header, None
def readSectionMatEffects(self, header, parent):
if parent.__class__ == self.RpMaterial:
return self.readSectionMaterialMatEffects(header, parent)
elif parent.__class__ == self.RpAtomic:
return self.readSectionAtomicMatEffects(header, parent)
return header, None
def readSectionMaterialMatEffects(self, header, material):
(flags,) = self.readFormat("I")
for i in range(2):
(effectType,) = self.readFormat("I")
if effectType == 0:
continue
elif effectType != 2:
print("Unknown material effect type.")
return header, None
(coefficient, frameBufferAlpha, textured) = self.readFormat("fii")
if textured:
material.readenvmap = True
material.envIntensity = coefficient
self.readSection(RwTypes.TEXTURE, material)
def readSectionAtomicMatEffects(self, header, atomic):
(check,) = self.readFormat("i")
if check != 0:
atomic.matfxpipe = True
return header, None
def readSectionMatSpecular(self, header, material):
slice = self.f.read(header[1])
(intensity,), slice = self.readSlice("f", slice)
specName = ""
for i in range(len(slice)):
if int(slice[i]) == 0:
break
specName += slice[i:i+1].decode("ascii")
texture = self.RwTexture(self, material, specName, 2, intensity)
material.setSpecTexture(texture)
return header, None
def readSectionMatReflection(self, header, material):
slice = self.f.read(header[1])
colour, slice = self.readSlice("fff", slice)
(x, intensity), slice = self.readSlice("ff", slice)
material.setReflection(colour, intensity)
return header, None
def readSectionMeshExtension(self, header, geometry):
slice = self.f.read(header[1])
(hasData,), slice = self.readSlice("i", slice)
if hasData:
print("Mesh extension extension actually has data. Not sure what to do with it.")
return header, None
def readSectionRenderRights(self, header, atomic):
if not hasattr(atomic, "__class__") or atomic.__class__ != self.RpAtomic:
print("Render rights extension is not in the right section, should be in atomic.")
return
slice = self.f.read(header[1])
(plugin, extra), slice = self.readSlice("ii", slice)
atomic.setRenderRights(plugin, extra)
def readSectionHAnimPLG(self, header, frame):
if not hasattr(frame, "__class__") or frame.__class__ != self.RwFrame:
print("HAnim extension is not in the right section, should be in frame.")
return
binary = self.f.read(header[1])
frame.hanimdata = base64.b64encode(zlib.compress(binary)).decode("ascii")
return header, None
def readSectionSkinPLG(self, header, geometry):
if not hasattr(geometry, "__class__") or geometry.__class__ != self.RpGeometry:
print("Skin extension is not in the right section, should be in geometry.")
return
binary = self.f.read(header[1])
geometry.skindata = base64.b64encode(zlib.compress(binary)).decode("ascii")
return header, None
def readSectionNightCols(self, header, geometry):
if not hasattr(geometry, "__class__") or geometry.__class__ != self.RpGeometry:
print("Night vertex colours extension is not in the right section, should be in geometry.")
return
slice = self.f.read(header[1])
(x,), slice = self.readSlice("I", slice)
geometry.nightVertCol = []
for i in range(len(geometry.vertices)):
(vcr, vcg, vcb, vca), slice = self.readSlice("BBBB", slice)
geometry.nightVertCol.append((vcr / 255, vcg / 255, vcb / 255))
return header, None
class ExportRenderware:
class RwChunkHeader:
def __init__(self, type, size):
self.type = type
self.size = size
def bin(self):
return struct.pack("III", self.type, self.size, ExportRenderware.targetVer)
class RwVector3:
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def bin(self):
return struct.pack("fff", self.x, self.y, self.z)
class RwRotMatrix:
def __init__(self):
self.m = [1, 0, 0, 0, 1, 0, 0, 0, 1]
def bin(self):
return struct.pack("9f", *self.m)
class RwFrameList:
def __init__(self):
self.R = ExportRenderware
self.frames = []
def bin(self):
payload = struct.pack("i", len(self.frames))
for frame in self.frames:
payload += frame.binraw()
payload = self.R.RwChunkHeader(RwTypes.STRUCT, len(payload)).bin() + payload
for frame in self.frames:
payload += frame.binext()
header = self.R.RwChunkHeader(RwTypes.FRAMELIST, len(payload)).bin()
return header + payload
class RwFrame:
def __init__(self, clump, object, parentFrame):
self.R = ExportRenderware
self.clump = clump
self.object = object
self.index = len(clump.frameList.frames)
clump.frameList.frames.append(self)
self.name = self.object.name
self.parent = parentFrame
self.rotation = self.R.RwRotMatrix()
self.position = self.R.RwVector3(0, 0, 0)
if parentFrame is not None:
ux = object.matrix_local.to_3x3()
self.rotation.m = [ux[0][0], ux[0][1], ux[0][2], ux[1][0], ux[1][1], ux[1][2], ux[2][0], ux[2][1], ux[2][2]]
self.position.x = object.matrix_local.translation[0]
self.position.y = object.matrix_local.translation[1]
self.position.z = object.matrix_local.translation[2]
if str(object.type) == "MESH":
self.atomic = self.R.RpAtomic(self)
elif str(object.type) == "EMPTY":
self.atomic = None
else:
raise Exception("Unsupported object type selected: " + str(object.type))
for child in self.object.children:
if str(object.type) != "MESH" and str(object.type) != "EMPTY":
print("Ignoring object " + object.name + ", type " + object.type)
continue
self.R.RwFrame(self.clump, child, self)
if not clump.colbin:
try:
if len(object.collhex) > 0:
textf = bpy.data.texts[object.collhex].as_string()
clump.colbin = zlib.decompress(base64.b64decode(bytes(textf, "ascii")))
except:
clump.colbin = None
def binraw(self):
payload = self.rotation.bin()
payload += self.position.bin()
payload += struct.pack("ii", -1 if self.parent is None else self.parent.index, 0)
return payload
def binext_name(self):
noname = "noname_"
if self.name[:len(noname)] == noname:
return b""
writename = self.R.unmangleName(self.name)
if len(writename) > 23:
writename = writename[:23]
print("Warning, frame name '", writename , "' truncated to 23 characters.")
payload = struct.pack(str(len(writename)) + "s", bytearray(writename, "ascii"))
header = self.R.RwChunkHeader(RwTypes.FRAMENAME, len(payload)).bin()
return header + payload
def binext_hanim(self):
object = self.object
try:
if len(object.rw_hanimdata) > 0:
textf = bpy.data.texts[object.rw_hanimdata].as_string()
rawdata = zlib.decompress(base64.b64decode(bytes(textf, "ascii")))
else:
return b""
except:
return b""
payload = rawdata
header = self.R.RwChunkHeader(RwTypes.HANIMPLG, len(payload)).bin()
return header + payload
def binext(self):
payload = self.binext_name() + self.binext_hanim()
header = self.R.RwChunkHeader(RwTypes.EXTENSION, len(payload)).bin()
return header + payload
class RpAtomicChunkInfo:
def __init__(self, frameIndex, geometryIndex, flags):
self.R = ExportRenderware
self.frameIndex = frameIndex
self.geometryIndex = geometryIndex
self.flags = flags
def bin(self):
payload = struct.pack("iiii", self.frameIndex, self.geometryIndex, self.flags, 0)
header = self.R.RwChunkHeader(RwTypes.STRUCT, len(payload)).bin()
return header + payload
class RpAtomic:
def __init__(self, frame):
self.R = ExportRenderware
self.clump = frame.clump
self.frame = frame
self.mesh = frame.object.to_mesh(self.clump.context.scene, False, "PREVIEW")
self.geometry = self.R.RpGeometry(self)
self.flags = 5
def binext_rights(self):
if self.frame.object.renderright == 0:
return b""
payload = struct.pack("ii", self.frame.object.renderright, self.frame.object.renderextra)
header = self.R.RwChunkHeader(RwTypes.RENDERRIGHTS, len(payload)).bin()
return header + payload
def binext_matfx(self):
if self.frame.object.matfxpipe != True and self.R.decodedVer > 0x34003:
return b""
payload = struct.pack("i", 1)
header = self.R.RwChunkHeader(RwTypes.MATEFFECTS, len(payload)).bin()
return header + payload
def bin(self):
payload = self.R.RpAtomicChunkInfo(self.frame.index, self.geometry.index, self.flags).bin()
extensions = self.binext_rights() + self.binext_matfx()
extensions = self.R.RwChunkHeader(RwTypes.EXTENSION, len(extensions)).bin() + extensions
payload += extensions
header = self.R.RwChunkHeader(RwTypes.ATOMIC, len(payload)).bin()
return header + payload
class RpVertex:
def __init__(self, pos, uv, uve, normal):
self.pos = pos
self.uv = uv
self.uve = uve
self.normal = normal
class RpTriangle:
def __init__(self, a, b, c, mat):
self.a = a
self.b = b
self.c = c
self.mat = mat
def bin(self):
return struct.pack("HHHH", self.a, self.b, self.mat, self.c)
class RwUVCoord:
def __init__(self, u, v):
self.u = u
self.v = v
def bin(self):
return struct.pack("ff", self.u, 1-self.v)
class RwTexture:
def __init__(self, material, bltexslot):
self.R = ExportRenderware
self.material = material
self.bltexslot = bltexslot
self.bltex = bltexslot.texture
def bin(self):
payload = struct.pack("HH", 0x1106, 0)
payload = self.R.RwChunkHeader(RwTypes.STRUCT, len(payload)).bin() + payload
strdata = struct.pack(str(len(self.bltex.name)) + "s", bytearray(self.bltex.name, "ascii"))
for i in range(4 - (len(self.bltex.name)&3)):
strdata += struct.pack("B", 0)
payload += self.R.RwChunkHeader(RwTypes.STRING, len(strdata)).bin() + strdata
strdata = struct.pack("i", 0)
payload += self.R.RwChunkHeader(RwTypes.STRING, len(strdata)).bin() + strdata
extensions = b""
extensions = self.R.RwChunkHeader(RwTypes.EXTENSION, len(extensions)).bin() + extensions
payload += extensions
header = self.R.RwChunkHeader(RwTypes.TEXTURE, len(payload)).bin()
return header + payload
class RpMaterial:
def __init__(self, materialList, blMaterial):
self.R = ExportRenderware
self.materialList = materialList
self.index = len(materialList.mats)
self.mesh = materialList.mesh
self.blmaterial = blMaterial
self.red = min(255, max(0, blMaterial.diffuse_color[0] * 256))
self.green = min(255, max(0, blMaterial.diffuse_color[1] * 256))
self.blue = min(255, max(0, blMaterial.diffuse_color[2] * 256))
self.alpha = min(255, max(0, blMaterial.alpha * 256))
self.ambient = blMaterial.ambient
self.specular = blMaterial.specular_intensity
self.diffuse = blMaterial.diffuse_intensity
self.bltex_diffuse = self.findTexSlot("DIFFUSE")
self.bltex_specular = self.findTexSlot("SPECULAR")
self.bltex_envmap = self.findTexSlot("ENVMAP")
self.tex_diffuse = None
self.tex_envmap = None
if self.bltex_diffuse:
self.tex_diffuse = self.R.RwTexture(self, self.bltex_diffuse)
if self.bltex_diffuse.texture_coords == "UV" and len(self.bltex_diffuse.uv_layer) > 0 and not self.materialList.geometry.uvname_diff:
self.materialList.geometry.uvname_diff = self.bltex_diffuse.uv_layer
if self.bltex_envmap:
self.tex_envmap = self.R.RwTexture(self, self.bltex_envmap)
if self.bltex_envmap.texture_coords == "UV" and len(self.bltex_envmap.uv_layer) > 0 and not self.materialList.geometry.uvname_env:
self.materialList.geometry.uvname_env = self.bltex_envmap.uv_layer
def findTexSlot(self, type):
for i in range(len(self.blmaterial.texture_slots)):
textype = ""
slot = self.blmaterial.texture_slots[i]
if slot and slot.texture:
if slot.texture.type == "ENVIRONMENT_MAP":
textype = "ENVMAP"
elif slot.use_map_color_spec and not slot.use_map_color_diffuse:
textype = "SPECULAR"
elif slot.use_map_color_diffuse and not slot.use_map_color_spec:
textype = "DIFFUSE"
if textype == type:
return slot
return None
def binext_matfx(self):
if not self.tex_envmap:
return b""
payload = struct.pack("iifii", 2, 2, self.bltex_envmap.specular_color_factor, 0, 1)
payload += self.tex_envmap.bin()
payload += struct.pack("i", 0)
header = self.R.RwChunkHeader(RwTypes.MATEFFECTS, len(payload)).bin()
return header + payload
def binext_reflect(self):
if not self.blmaterial.raytrace_mirror.use and ExportRenderware.decodedVer <= 0x34003:
return b""
factor = self.blmaterial.raytrace_mirror.reflect_factor if self.blmaterial.raytrace_mirror.use else 0
colour = self.blmaterial.mirror_color
payload = struct.pack("fffffi", colour[0], colour[1], colour[2], 1, self.blmaterial.raytrace_mirror.reflect_factor, 0)
header = self.R.RwChunkHeader(RwTypes.MATREFLECTION, len(payload)).bin()
return header + payload
def binext_specular(self):
if not self.bltex_specular:
return b""
payload = struct.pack("f", self.bltex_specular.specular_color_factor)
texname = bytes(self.bltex_specular.texture.name, "ascii")
payload += texname[:23]
nullbyte = struct.pack("B", 0)
for i in range(24 - min(23, len(texname))):
payload += nullbyte
header = self.R.RwChunkHeader(RwTypes.MATSPECULAR, len(payload)).bin()
return header + payload
def bin(self):
payload = struct.pack("iBBBBiIfff", 0, int(self.red), int(self.green), int(self.blue), int(self.alpha), 0, 1 if self.tex_diffuse else 0, self.ambient, self.specular, self.diffuse)
payload = self.R.RwChunkHeader(RwTypes.STRUCT, len(payload)).bin() + payload
if self.tex_diffuse:
payload += self.tex_diffuse.bin()
extensions = self.binext_matfx() + self.binext_reflect() + self.binext_specular()
extensions = self.R.RwChunkHeader(RwTypes.EXTENSION, len(extensions)).bin() + extensions
payload += extensions
header = self.R.RwChunkHeader(RwTypes.MATERIAL, len(payload)).bin()
return header + payload
class RpMaterialList:
def __init__(self, geometry):
self.R = ExportRenderware
self.geometry = geometry
self.clump = geometry.clump
self.mesh = geometry.mesh
self.mats = []
for mat in self.mesh.materials:
self.mats.append(self.R.RpMaterial(self, mat))
def bin(self):
payload = struct.pack("i", len(self.mesh.materials))
for mat in self.mats:
payload += struct.pack("i", -1)
payload = self.R.RwChunkHeader(RwTypes.STRUCT, len(payload)).bin() + payload
for mat in self.mats:
payload += mat.bin()
header = self.R.RwChunkHeader(RwTypes.MATERIALLIST, len(payload)).bin()
return header + payload
class RpGeometryList:
def __init__(self):
self.R = ExportRenderware
self.geoms = []
def bin(self):
payload = struct.pack("i", len(self.geoms))
payload = self.R.RwChunkHeader(RwTypes.STRUCT, len(payload)).bin() + payload
for geom in self.geoms:
payload += geom.bin()
header = self.R.RwChunkHeader(RwTypes.GEOMETRYLIST, len(payload)).bin()
return header + payload
class RpGeometryChunkInfo:
def __init__(self):
self.flags = RpGeomFlag.TEXTURED | RpGeomFlag.NORMALS | RpGeomFlag.LIGHT | RpGeomFlag.MODULATEMATERIALCOLOR
self.texCount = 1
self.triangleCount = 0
self.vertexCount = 0
self.frameCount = 1
def binraw(self):
return struct.pack("HHiii", self.flags, self.texCount, self.triangleCount, self.vertexCount, self.frameCount)
class RpGeometry:
def __init__(self, atomic):
self.R = ExportRenderware
self.clump = atomic.clump
self.atomic = atomic
self.mesh = atomic.mesh
self.index = len(self.clump.geometryList.geoms)
self.clump.geometryList.geoms.append(self)
self.chunkInfo = self.R.RpGeometryChunkInfo()
self.uvname_diff = None
self.uvname_env = None
self.materialList = self.R.RpMaterialList(self)
self.matTris = []
for i in range(len(self.materialList.mats)):
self.matTris.append([])
mesh = self.mesh
self.vdict = []
for i in range(len(mesh.vertices)):
self.vdict.append({})
self.uvc = self.getUVData(self.uvname_diff)
self.uvce = None
if self.uvname_env and self.uvname_env != self.uvname_diff:
self.uvce = self.getUVData(self.uvname_env)
self.vertices = []
self.triangles = []
self.vertCol = None
self.nightVertCol = None
self.vertColData = None
self.nightVertColData = None
for vcol in self.mesh.vertex_colors:
if vcol.name.lower() == "night" and self.nightVertCol is None:
self.nightVertCol = []
self.nightVertColData = vcol.data
elif self.vertCol is None:
self.vertCol = []
self.vertColData = vcol.data
for poly in mesh.polygons:
self.addBlenderPoly(poly)
if len(self.vertices) > 65535:
raise Exception("Aborting export: vertex count exceeds 65535")
self.maxDist = 0
for v in self.mesh.vertices:
self.maxDist = max(self.maxDist, math.sqrt(v.co[0]*v.co[0] + v.co[1]*v.co[1] + v.co[2]*v.co[2]))
self.chunkInfo.triangleCount = len(self.triangles)
self.chunkInfo.vertexCount = len(self.vertices)
if self.uvce:
self.chunkInfo.texCount = 2
self.chunkInfo.flags = self.chunkInfo.flags & (~RpGeomFlag.TEXTURED)
self.chunkInfo.flags |= RpGeomFlag.TEXTURED2
if self.R.decodedVer > 0x34003:
self.chunkInfo.flags |= RpGeomFlag.POSITIONS
if self.vertColData:
self.chunkInfo.flags |= RpGeomFlag.PRELIT
def findVertex(self, type):
for i in range(len(self.blmaterial.texture_slots)):
textype = ""
slot = self.blmaterial.texture_slots[i]
if slot and slot.texture:
if slot.texture.type == "ENVIRONMENT_MAP":
textype = "ENVMAP"
elif slot.use_map_color_spec and not slot.use_map_color_diffuse:
textype = "SPECULAR"
elif slot.use_map_color_diffuse and not slot.use_map_color_spec:
textype = "DIFFUSE"
if textype == type:
return slot
return None
def getUVData(self, name):
for i in range(len(self.mesh.uv_textures)):
if name and self.mesh.uv_textures[i] and self.mesh.uv_textures[i].name == name:
return self.mesh.uv_layers[i].data
return None
def newVertId(self, id, uv, uve):
if (uv + uve) not in self.vdict[id]:
self.vdict[id][uv + uve] = len(self.vertices)
self.vertices.append(self.R.RpVertex(self.mesh.vertices[id].co, uv, uve, self.mesh.vertices[id].normal))
if self.vertColData:
self.vertCol.append((int(self.vertColData[id].color[0]*255), int(self.vertColData[id].color[1]*255), int(self.vertColData[id].color[2]*255)))
if self.nightVertColData:
self.nightVertCol.append((int(self.nightVertColData[id].color[0]*255), int(self.nightVertColData[id].color[1]*255), int(self.nightVertColData[id].color[2]*255)))
return self.vdict[id][(uv + uve)]
def addRawPoly(self, verts, uvs, mat):
newIds = []
for i in range(3):
uv = tuple(self.uvc[uvs[i]].uv) if self.uvc else (0, 0)
uve = tuple(self.uvce[uvs[i]].uv) if self.uvce else (0, 0)
newIds.append(self.newVertId(verts[i], uv, uve))
self.triangles.append(self.R.RpTriangle(newIds[0], newIds[1], newIds[2], mat))
if mat >= 0:
self.matTris[mat].append(newIds[0])
self.matTris[mat].append(newIds[1])
self.matTris[mat].append(newIds[2])
def addBlenderPoly(self, p):
if len(p.vertices) < 3 or len(p.vertices) > 4:
raise Exception("Aborting export: Invalid number of vertices on an edge.")
self.addRawPoly([p.vertices[0], p.vertices[1], p.vertices[2]], [p.loop_indices[0], p.loop_indices[1], p.loop_indices[2]], p.material_index)
if len(p.vertices) == 4:
self.addRawPoly([p.vertices[0], p.vertices[3], p.vertices[2]], [p.loop_indices[0], p.loop_indices[3], p.loop_indices[2]], p.material_index)
def binext_binmesh(self):
payload = b""
splits = 0
total = 0
for i in range(len(self.matTris)):
if len(self.matTris[i]) == 0:
continue
splits += 1
total += len(self.matTris[i])
payload += struct.pack("ii", len(self.matTris[i]), i)
for id in self.matTris[i]:
payload += struct.pack("i", id)
payload = struct.pack("iii", 0, splits, total) + payload
header = self.R.RwChunkHeader(RwTypes.BINMESHPLG, len(payload)).bin()
return header + payload
def binext_morph(self):
if self.R.decodedVer > 0x34003 or self.R.decodedVer < 0x33000:
return b""
payload = struct.pack("i", 0)
header = self.R.RwChunkHeader(RwTypes.MORPHPLG, len(payload)).bin()
return header + payload
def binext_meshext(self):
if self.R.decodedVer <= 0x34003:
return b""
payload = struct.pack("i", 0)
header = self.R.RwChunkHeader(RwTypes.MESHEXTENSION, len(payload)).bin()
return header + payload
def binext_skin(self):
object = self.atomic.frame.object
try:
if len(object.rw_skindata) > 0:
textf = bpy.data.texts[object.rw_skindata].as_string()
rawdata = zlib.decompress(base64.b64decode(bytes(textf, "ascii")))
else:
return b""
except:
return b""
payload = rawdata
header = self.R.RwChunkHeader(RwTypes.SKINPLG, len(payload)).bin()
return header + payload
def binext_nightcol(self):
if not self.nightVertCol:
return b""
payload = struct.pack("I", 1)
for col in self.nightVertCol:
payload += struct.pack("BBBB", col[0], col[1], col[2], 255)
header = self.R.RwChunkHeader(RwTypes.NIGHTCOLS, len(payload)).bin()
return header + payload
def bin(self):
payload = self.chunkInfo.binraw()
if self.R.decodedVer < 0x34001:
payload += struct.pack("fff", 0, 0, 1)
if self.vertCol:
for col in self.vertCol:
payload += struct.pack("BBBB", col[0], col[1], col[2], 255)
for vertex in self.vertices:
payload += self.R.RwUVCoord(vertex.uv[0], vertex.uv[1]).bin()
if self.uvce:
for vertex in self.vertices:
payload += self.R.RwUVCoord(vertex.uve[0], vertex.uve[1]).bin()
for triangle in self.triangles:
payload += triangle.bin()
payload += struct.pack("ffffii", 0, 0, 0, self.maxDist, 1, 1)
for vertex in self.vertices:
payload += self.R.RwVector3(vertex.pos[0], vertex.pos[1], vertex.pos[2]).bin()
for vertex in self.vertices:
payload += self.R.RwVector3(vertex.normal[0], vertex.normal[1], vertex.normal[2]).bin()
payload = self.R.RwChunkHeader(RwTypes.STRUCT, len(payload)).bin() + payload
payload += self.materialList.bin()
extensions = self.binext_binmesh() + self.binext_skin() + self.binext_morph() + self.binext_meshext() + self.binext_nightcol()
extensions = self.R.RwChunkHeader(RwTypes.EXTENSION, len(extensions)).bin() + extensions
payload += extensions
header = self.R.RwChunkHeader(RwTypes.GEOMETRY, len(payload)).bin()
return header + payload
class RpClumpChunkInfo:
def __init__(self, atomicCount, lightCount, cameraCount):
self.R = ExportRenderware
self.atomicCount = atomicCount
self.lightCount = lightCount
self.cameraCount = cameraCount
def bin(self):
payload = struct.pack("i", self.atomicCount)
if self.R.decodedVer > 0x33000:
payload += struct.pack("ii", self.lightCount, self.cameraCount)
header = self.R.RwChunkHeader(RwTypes.STRUCT, len(payload)).bin()
return header + payload
class RpClump:
def __init__(self, context, exportVer):
self.R = ExportRenderware
self.R.targetVer = exportVer
self.R.decodedVer = RwTypes.decodeVersion(self.R.targetVer)
self.context = context
self.frameList = self.R.RwFrameList()
self.geometryList = self.R.RpGeometryList()
self.colbin = None
exportables = []
for object in context.selected_objects:
parent = object.parent
add = True
while parent:
if parent in context.selected_objects:
add = False
break
parent = parent.parent
if add:
exportables.append(object)
for object in exportables:
if str(object.type) != "MESH" and str(object.type) != "EMPTY":
print("Ignoring object " + object.name + ", type " + object.type)
continue
self.R.RwFrame(self, object, None)
if len(self.frameList.frames) == 0:
raise Exception("Aborting export: no frames selected.")
def binext_coll(self):
if not self.colbin:
return b""
payload = self.colbin
header = self.R.RwChunkHeader(RwTypes.COLLISION, len(self.colbin)).bin()
return header + payload
def bin(self):
payload = self.R.RpClumpChunkInfo(len(self.geometryList.geoms), 0, 0).bin()
payload += self.frameList.bin()
payload += self.geometryList.bin()
for geometry in self.geometryList.geoms:
payload += geometry.atomic.bin()
extensions = self.binext_coll()
extensions = self.R.RwChunkHeader(RwTypes.EXTENSION, len(extensions)).bin() + extensions
payload += extensions
header = self.R.RwChunkHeader(RwTypes.CLUMP, len(payload)).bin()
return header + payload
def __init__(self, context, exportVerIndex, filepath):
if exportVerIndex == "1":
exportVer = 0x0800FFFF
elif exportVerIndex == "2":
exportVer = 0x1003FFFF
else:
exportVer = 0x1803FFFF
outf = open(filepath, "wb")
outf.write(self.RpClump(context, exportVer).bin())
outf.close()
def unmangleName(name):
if len(name) > 4 and name[-4] == "." and name[-3:].isnumeric():
return name[:-4]
else:
return name
class ExportRenderwareMenu(bpy.types.Operator):
expVersionValues = (("1", "GTA III", ""), ("2", "Vice City", ""), ("3", "San Andreas", ""))
bl_idname = "export_rw.dff"
bl_label = "Export Renderware (.dff)"
filename_ext = ".dff"
filepath = StringProperty(subtype = "FILE_PATH")
expVersion = EnumProperty(name = "Export version", items = expVersionValues, default="2")
def invoke(self, context, event):
wm = context.window_manager
wm.fileselect_add(self)
return {"RUNNING_MODAL"}
def execute(self, context):
setupProps()
ExportRenderware(context, self.expVersion, self.filepath)
return {"FINISHED"}
class ImportRenderwareMenu(bpy.types.Operator):
bl_idname = "import_rw.dff"
bl_label = "Import Renderware (.dff)"
filename_ext = ".dff"
filepath = StringProperty(subtype = "FILE_PATH")
def invoke(self, context, event):
wm = context.window_manager
wm.fileselect_add(self)
return {"RUNNING_MODAL"}
def execute(self, context):
setupProps()
ImportRenderware(self.filepath)
return {"FINISHED"}
def export_func(self, context):
self.layout.operator(ExportRenderwareMenu.bl_idname, text="Renderware (.dff)")
def import_func(self, context):
self.layout.operator(ImportRenderwareMenu.bl_idname, text="Renderware (.dff)")
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_export.append(export_func)
bpy.types.INFO_MT_file_import.append(import_func)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_export.remove(export_func)
bpy.types.INFO_MT_file_import.remove(import_func)
def setupProps():
class renderwarePanel(bpy.types.Panel):
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_label = "Renderware"
def draw(self, context):
self.layout.prop(bpy.context.active_object, "renderright")
self.layout.prop(bpy.context.active_object, "renderextra")
self.layout.prop(bpy.context.active_object, "matfxpipe")
self.layout.prop(bpy.context.active_object, "collhex")
self.layout.prop(bpy.context.active_object, "rw_hanimdata")
self.layout.prop(bpy.context.active_object, "rw_skindata")
if hasattr(bpy.types.Object, "collhex"):
return
bpy.types.Object.collhex = bpy.props.StringProperty(name = "Collision", description = "Name of the text object that contains collision binary data.", maxlen = 100)
bpy.types.Object.renderright = bpy.props.IntProperty(name = "RenderRight", description = "Index of the plugin whose pipeline is used for rendering.")
bpy.types.Object.renderextra = bpy.props.IntProperty(name = "RenderExtra", description = "Extra arguments to the render pipeline.")
bpy.types.Object.matfxpipe = bpy.props.BoolProperty(name = "MatFX pipeline", description = "Whether rendering is handled by MatFX pipeline.")
bpy.types.Object.rw_hanimdata = bpy.props.StringProperty(name = "HAnimData", description = "Info for this skin bone.", maxlen = 100)
bpy.types.Object.rw_skindata = bpy.props.StringProperty(name = "SkinData", description = "Skin data (bone vertices etc) for this mesh.", maxlen = 100)
bpy.utils.register_class(renderwarePanel)
if __name__ == "__main__":
unregister()
register()
setupProps()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Do-nothing script for making a release
This idea comes from here:
https://blog.danslimmon.com/2019/07/15/do-nothing-scripting-the-key-to-gradual-automation/
Author: Gertjan van den Burg
Date: 2019-07-23
"""
import colorama
import os
import sys
import tempfile
import webbrowser
URLS = {
"CI": "https://github.com/GjjvdBurg/labella.py/actions",
}
def colored(msg, color=None, style=None):
colors = {
"red": colorama.Fore.RED,
"green": colorama.Fore.GREEN,
"cyan": colorama.Fore.CYAN,
"yellow": colorama.Fore.YELLOW,
"magenta": colorama.Fore.MAGENTA,
None: "",
}
styles = {
"bright": colorama.Style.BRIGHT,
"dim": colorama.Style.DIM,
None: "",
}
pre = colors[color] + styles[style]
post = colorama.Style.RESET_ALL
return f"{pre}{msg}{post}"
def cprint(msg, color=None, style=None):
print(colored(msg, color=color, style=style))
def wait_for_enter():
input(colored("\nPress Enter to continue", style="dim"))
print()
def get_package_name():
with open("./setup.py", "r") as fp:
nameline = next(
(l.strip() for l in fp if l.startswith("NAME = ")), None
)
return nameline.split("=")[-1].strip().strip('"')
def get_package_version(pkgname):
ctx = {}
with open(f"{pkgname}/__version__.py", "r") as fp:
exec(fp.read(), ctx)
return ctx["__version__"]
class Step:
def pre(self, context):
pass
def post(self, context):
wait_for_enter()
def run(self, context):
try:
self.pre(context)
self.action(context)
self.post(context)
except KeyboardInterrupt:
cprint("\nInterrupted.", color="red")
raise SystemExit(1)
def instruct(self, msg):
cprint(msg, color="green")
def print_run(self, msg):
cprint("Run:", color="cyan", style="bright")
self.print_cmd(msg)
def print_cmd(self, msg):
cprint("\t" + msg, color="cyan", style="bright")
def do_cmd(self, cmd):
cprint(f"Going to run: {cmd}", color="magenta", style="bright")
wait_for_enter()
os.system(cmd)
class GitToMaster(Step):
def action(self, context):
self.instruct("Make sure you're on master and changes are merged in")
self.print_run("git checkout master")
class UpdateChangelog(Step):
def action(self, context):
self.instruct(f"Update change log for version {context['version']}")
self.print_run("vi CHANGELOG.md")
class UpdateReadme(Step):
def action(self, context):
self.instruct("Update readme if necessary (check for version update)")
self.print_run("vi README.md")
class RunTests(Step):
def action(self, context):
self.do_cmd("make test")
class BumpVersionPackage(Step):
def action(self, context):
self.instruct("Update __version__.py with new version")
self.do_cmd(f"vi {context['pkgname']}/__version__.py")
def post(self, context):
wait_for_enter()
context["version"] = self._get_version(context)
def _get_version(self, context):
# Get the version from the version file
return get_package_version(context["pkgname"])
class MakeClean(Step):
def action(self, context):
self.do_cmd("make clean")
class MakeDocs(Step):
def action(self, context):
self.do_cmd("make docs")
class MakeDist(Step):
def action(self, context):
self.do_cmd("make dist")
class PushToTestPyPI(Step):
def action(self, context):
self.do_cmd(
"twine upload --repository-url https://test.pypi.org/legacy/ dist/*"
)
class InstallFromTestPyPI(Step):
def action(self, context):
tmpvenv = tempfile.mkdtemp(prefix="p2r_venv_")
self.do_cmd(
f"python -m venv {tmpvenv} && source {tmpvenv}/bin/activate && "
"pip install --no-cache-dir --index-url "
"https://test.pypi.org/simple/ "
"--extra-index-url https://pypi.org/simple "
f"{context['pkgname']}=={context['version']}"
)
context["tmpvenv"] = tmpvenv
class TestPackage(Step):
def action(self, context):
self.instruct(
f"Ensure that the following command gives version {context['version']}"
)
self.do_cmd(
f"source {context['tmpvenv']}/bin/activate && pip list | grep {context['pkgname']}"
)
class RemoveVenv(Step):
def action(self, context):
self.do_cmd(f"rm -rf {context['tmpvenv']}")
class GitTagVersion(Step):
def action(self, context):
self.do_cmd(f"git tag v{context['version']}")
class GitAdd(Step):
def action(self, context):
self.instruct("Add everything to git and commit")
self.print_run("git gui")
class GitAddRelease(Step):
def action(self, context):
self.instruct("Add Changelog & Readme to git")
self.instruct(
f"Commit with title: {context['pkgname']} Release {context['version']}"
)
self.instruct("Embed changelog in body commit message")
self.print_run("git gui")
class PushToPyPI(Step):
def action(self, context):
self.do_cmd("twine upload dist/*")
class PushToGitHub(Step):
def action(self, context):
self.do_cmd("git push -u --tags origin master")
class WaitForCI(Step):
def action(self, context):
webbrowser.open(URLS["CI"])
self.instruct("Wait for CI to complete and verify that its successful")
def main(target=None):
colorama.init()
procedure = [
("gittomaster", GitToMaster()),
("gitadd1", GitAdd()),
("clean1", MakeClean()),
("runtests", RunTests()),
# trigger CI to run tests on all platforms
("push1", PushToGitHub()),
("ci1", WaitForCI()),
("bumpversion", BumpVersionPackage()),
("gitadd2", GitAdd()),
("changelog", UpdateChangelog()),
("readme", UpdateReadme()),
("dist", MakeDist()),
("testpypi", PushToTestPyPI()),
("install", InstallFromTestPyPI()),
("testpkg", TestPackage()),
("remove_venv", RemoveVenv()),
("addrelease", GitAddRelease()),
("pypi", PushToPyPI()),
("tagfinal", GitTagVersion()),
# triggers CI to build with cibw and push to PyPI
("push3", PushToGitHub()),
("ci3", WaitForCI()),
]
context = {}
context["pkgname"] = get_package_name()
context["version"] = get_package_version(context["pkgname"])
skip = True if target else False
for name, step in procedure:
if not name == target and skip:
continue
skip = False
step.run(context)
cprint("\nDone!", color="yellow", style="bright")
if __name__ == "__main__":
target = sys.argv[1] if len(sys.argv) > 1 else None
main(target=target)
|
|
"""The tests for the emulated Hue component."""
import asyncio
import json
from ipaddress import ip_address
from unittest.mock import patch
from aiohttp.hdrs import CONTENT_TYPE
import pytest
from tests.common import get_test_instance_port
from homeassistant import core, const, setup
import homeassistant.components as core_components
from homeassistant.components import (
fan, http, light, script, emulated_hue, media_player, cover, climate)
from homeassistant.components.emulated_hue import Config
from homeassistant.components.emulated_hue.hue_api import (
HUE_API_STATE_ON, HUE_API_STATE_BRI, HueUsernameView, HueOneLightStateView,
HueAllLightsStateView, HueOneLightChangeView, HueAllGroupsStateView)
from homeassistant.const import STATE_ON, STATE_OFF
import homeassistant.util.dt as dt_util
from datetime import timedelta
from tests.common import async_fire_time_changed
HTTP_SERVER_PORT = get_test_instance_port()
BRIDGE_SERVER_PORT = get_test_instance_port()
BRIDGE_URL_BASE = 'http://127.0.0.1:{}'.format(BRIDGE_SERVER_PORT) + '{}'
JSON_HEADERS = {CONTENT_TYPE: const.CONTENT_TYPE_JSON}
@pytest.fixture
def hass_hue(loop, hass):
"""Set up a Home Assistant instance for these tests."""
# We need to do this to get access to homeassistant/turn_(on,off)
loop.run_until_complete(
core_components.async_setup(hass, {core.DOMAIN: {}}))
loop.run_until_complete(setup.async_setup_component(
hass, http.DOMAIN,
{http.DOMAIN: {http.CONF_SERVER_PORT: HTTP_SERVER_PORT}}))
with patch('homeassistant.components'
'.emulated_hue.UPNPResponderThread'):
loop.run_until_complete(
setup.async_setup_component(hass, emulated_hue.DOMAIN, {
emulated_hue.DOMAIN: {
emulated_hue.CONF_LISTEN_PORT: BRIDGE_SERVER_PORT,
emulated_hue.CONF_EXPOSE_BY_DEFAULT: True
}
}))
loop.run_until_complete(
setup.async_setup_component(hass, light.DOMAIN, {
'light': [
{
'platform': 'demo',
}
]
}))
loop.run_until_complete(
setup.async_setup_component(hass, script.DOMAIN, {
'script': {
'set_kitchen_light': {
'sequence': [
{
'service_template':
"light.turn_{{ requested_state }}",
'data_template': {
'entity_id': 'light.kitchen_lights',
'brightness': "{{ requested_level }}"
}
}
]
}
}
}))
loop.run_until_complete(
setup.async_setup_component(hass, climate.DOMAIN, {
'climate': [
{
'platform': 'demo',
}
]
}))
loop.run_until_complete(
setup.async_setup_component(hass, media_player.DOMAIN, {
'media_player': [
{
'platform': 'demo',
}
]
}))
loop.run_until_complete(
setup.async_setup_component(hass, fan.DOMAIN, {
'fan': [
{
'platform': 'demo',
}
]
}))
loop.run_until_complete(
setup.async_setup_component(hass, cover.DOMAIN, {
'cover': [
{
'platform': 'demo',
}
]
}))
# Kitchen light is explicitly excluded from being exposed
kitchen_light_entity = hass.states.get('light.kitchen_lights')
attrs = dict(kitchen_light_entity.attributes)
attrs[emulated_hue.ATTR_EMULATED_HUE] = False
hass.states.async_set(
kitchen_light_entity.entity_id, kitchen_light_entity.state,
attributes=attrs)
# Ceiling Fan is explicitly excluded from being exposed
ceiling_fan_entity = hass.states.get('fan.ceiling_fan')
attrs = dict(ceiling_fan_entity.attributes)
attrs[emulated_hue.ATTR_EMULATED_HUE_HIDDEN] = True
hass.states.async_set(
ceiling_fan_entity.entity_id, ceiling_fan_entity.state,
attributes=attrs)
# Expose the script
script_entity = hass.states.get('script.set_kitchen_light')
attrs = dict(script_entity.attributes)
attrs[emulated_hue.ATTR_EMULATED_HUE] = True
hass.states.async_set(
script_entity.entity_id, script_entity.state, attributes=attrs
)
# Expose cover
cover_entity = hass.states.get('cover.living_room_window')
attrs = dict(cover_entity.attributes)
attrs[emulated_hue.ATTR_EMULATED_HUE_HIDDEN] = False
hass.states.async_set(
cover_entity.entity_id, cover_entity.state, attributes=attrs
)
# Expose Hvac
hvac_entity = hass.states.get('climate.hvac')
attrs = dict(hvac_entity.attributes)
attrs[emulated_hue.ATTR_EMULATED_HUE_HIDDEN] = False
hass.states.async_set(
hvac_entity.entity_id, hvac_entity.state, attributes=attrs
)
# Expose HeatPump
hp_entity = hass.states.get('climate.heatpump')
attrs = dict(hp_entity.attributes)
attrs[emulated_hue.ATTR_EMULATED_HUE_HIDDEN] = False
hass.states.async_set(
hp_entity.entity_id, hp_entity.state, attributes=attrs
)
return hass
@pytest.fixture
def hue_client(loop, hass_hue, aiohttp_client):
"""Create web client for emulated hue api."""
web_app = hass_hue.http.app
config = Config(None, {
emulated_hue.CONF_TYPE: emulated_hue.TYPE_ALEXA,
emulated_hue.CONF_ENTITIES: {
'light.bed_light': {
emulated_hue.CONF_ENTITY_HIDDEN: True
},
'cover.living_room_window': {
emulated_hue.CONF_ENTITY_HIDDEN: False
}
}
})
HueUsernameView().register(web_app, web_app.router)
HueAllLightsStateView(config).register(web_app, web_app.router)
HueOneLightStateView(config).register(web_app, web_app.router)
HueOneLightChangeView(config).register(web_app, web_app.router)
HueAllGroupsStateView(config).register(web_app, web_app.router)
return loop.run_until_complete(aiohttp_client(web_app))
@asyncio.coroutine
def test_discover_lights(hue_client):
"""Test the discovery of lights."""
result = yield from hue_client.get('/api/username/lights')
assert result.status == 200
assert 'application/json' in result.headers['content-type']
result_json = yield from result.json()
devices = set(val['uniqueid'] for val in result_json.values())
# Make sure the lights we added to the config are there
assert 'light.ceiling_lights' in devices
assert 'light.bed_light' not in devices
assert 'script.set_kitchen_light' in devices
assert 'light.kitchen_lights' not in devices
assert 'media_player.living_room' in devices
assert 'media_player.bedroom' in devices
assert 'media_player.walkman' in devices
assert 'media_player.lounge_room' in devices
assert 'fan.living_room_fan' in devices
assert 'fan.ceiling_fan' not in devices
assert 'cover.living_room_window' in devices
assert 'climate.hvac' in devices
assert 'climate.heatpump' in devices
assert 'climate.ecobee' not in devices
@asyncio.coroutine
def test_get_light_state(hass_hue, hue_client):
"""Test the getting of light state."""
# Turn office light on and set to 127 brightness
yield from hass_hue.services.async_call(
light.DOMAIN, const.SERVICE_TURN_ON,
{
const.ATTR_ENTITY_ID: 'light.ceiling_lights',
light.ATTR_BRIGHTNESS: 127
},
blocking=True)
office_json = yield from perform_get_light_state(
hue_client, 'light.ceiling_lights', 200)
assert office_json['state'][HUE_API_STATE_ON] is True
assert office_json['state'][HUE_API_STATE_BRI] == 127
# Check all lights view
result = yield from hue_client.get('/api/username/lights')
assert result.status == 200
assert 'application/json' in result.headers['content-type']
result_json = yield from result.json()
assert 'light.ceiling_lights' in result_json
assert result_json['light.ceiling_lights']['state'][HUE_API_STATE_BRI] == \
127
# Turn office light off
yield from hass_hue.services.async_call(
light.DOMAIN, const.SERVICE_TURN_OFF,
{
const.ATTR_ENTITY_ID: 'light.ceiling_lights'
},
blocking=True)
office_json = yield from perform_get_light_state(
hue_client, 'light.ceiling_lights', 200)
assert office_json['state'][HUE_API_STATE_ON] is False
assert office_json['state'][HUE_API_STATE_BRI] == 0
# Make sure bedroom light isn't accessible
yield from perform_get_light_state(
hue_client, 'light.bed_light', 404)
# Make sure kitchen light isn't accessible
yield from perform_get_light_state(
hue_client, 'light.kitchen_lights', 404)
@asyncio.coroutine
def test_put_light_state(hass_hue, hue_client):
"""Test the setting of light states."""
yield from perform_put_test_on_ceiling_lights(hass_hue, hue_client)
# Turn the bedroom light on first
yield from hass_hue.services.async_call(
light.DOMAIN, const.SERVICE_TURN_ON,
{const.ATTR_ENTITY_ID: 'light.ceiling_lights',
light.ATTR_BRIGHTNESS: 153},
blocking=True)
ceiling_lights = hass_hue.states.get('light.ceiling_lights')
assert ceiling_lights.state == STATE_ON
assert ceiling_lights.attributes[light.ATTR_BRIGHTNESS] == 153
# Go through the API to turn it off
ceiling_result = yield from perform_put_light_state(
hass_hue, hue_client,
'light.ceiling_lights', False)
ceiling_result_json = yield from ceiling_result.json()
assert ceiling_result.status == 200
assert 'application/json' in ceiling_result.headers['content-type']
assert len(ceiling_result_json) == 1
# Check to make sure the state changed
ceiling_lights = hass_hue.states.get('light.ceiling_lights')
assert ceiling_lights.state == STATE_OFF
# Make sure we can't change the bedroom light state
bedroom_result = yield from perform_put_light_state(
hass_hue, hue_client,
'light.bed_light', True)
assert bedroom_result.status == 404
# Make sure we can't change the kitchen light state
kitchen_result = yield from perform_put_light_state(
hass_hue, hue_client,
'light.kitchen_light', True)
assert kitchen_result.status == 404
@asyncio.coroutine
def test_put_light_state_script(hass_hue, hue_client):
"""Test the setting of script variables."""
# Turn the kitchen light off first
yield from hass_hue.services.async_call(
light.DOMAIN, const.SERVICE_TURN_OFF,
{const.ATTR_ENTITY_ID: 'light.kitchen_lights'},
blocking=True)
# Emulated hue converts 0-100% to 0-255.
level = 23
brightness = round(level * 255 / 100)
script_result = yield from perform_put_light_state(
hass_hue, hue_client,
'script.set_kitchen_light', True, brightness)
script_result_json = yield from script_result.json()
assert script_result.status == 200
assert len(script_result_json) == 2
kitchen_light = hass_hue.states.get('light.kitchen_lights')
assert kitchen_light.state == 'on'
assert kitchen_light.attributes[light.ATTR_BRIGHTNESS] == level
@asyncio.coroutine
def test_put_light_state_climate_set_temperature(hass_hue, hue_client):
"""Test setting climate temperature."""
brightness = 19
temperature = round(brightness / 255 * 100)
hvac_result = yield from perform_put_light_state(
hass_hue, hue_client,
'climate.hvac', True, brightness)
hvac_result_json = yield from hvac_result.json()
assert hvac_result.status == 200
assert len(hvac_result_json) == 2
hvac = hass_hue.states.get('climate.hvac')
assert hvac.state == climate.const.STATE_COOL
assert hvac.attributes[climate.ATTR_TEMPERATURE] == temperature
assert hvac.attributes[climate.ATTR_OPERATION_MODE] == \
climate.const.STATE_COOL
# Make sure we can't change the ecobee temperature since it's not exposed
ecobee_result = yield from perform_put_light_state(
hass_hue, hue_client,
'climate.ecobee', True)
assert ecobee_result.status == 404
@asyncio.coroutine
def test_put_light_state_climate_turn_on(hass_hue, hue_client):
"""Test inability to turn climate on."""
yield from hass_hue.services.async_call(
climate.DOMAIN, const.SERVICE_TURN_OFF,
{const.ATTR_ENTITY_ID: 'climate.heatpump'},
blocking=True)
# Somehow after calling the above service the device gets unexposed,
# so we need to expose it again
hp_entity = hass_hue.states.get('climate.heatpump')
attrs = dict(hp_entity.attributes)
attrs[emulated_hue.ATTR_EMULATED_HUE_HIDDEN] = False
hass_hue.states.async_set(
hp_entity.entity_id, hp_entity.state, attributes=attrs
)
hp_result = yield from perform_put_light_state(
hass_hue, hue_client,
'climate.heatpump', True)
hp_result_json = yield from hp_result.json()
assert hp_result.status == 200
assert len(hp_result_json) == 1
hp = hass_hue.states.get('climate.heatpump')
assert hp.state == STATE_OFF
assert hp.attributes[climate.ATTR_OPERATION_MODE] == \
climate.const.STATE_HEAT
@asyncio.coroutine
def test_put_light_state_climate_turn_off(hass_hue, hue_client):
"""Test inability to turn climate off."""
hp_result = yield from perform_put_light_state(
hass_hue, hue_client,
'climate.heatpump', False)
hp_result_json = yield from hp_result.json()
assert hp_result.status == 200
assert len(hp_result_json) == 1
hp = hass_hue.states.get('climate.heatpump')
assert hp.state == climate.const.STATE_HEAT
assert hp.attributes[climate.ATTR_OPERATION_MODE] == \
climate.const.STATE_HEAT
@asyncio.coroutine
def test_put_light_state_media_player(hass_hue, hue_client):
"""Test turning on media player and setting volume."""
# Turn the music player off first
yield from hass_hue.services.async_call(
media_player.DOMAIN, const.SERVICE_TURN_OFF,
{const.ATTR_ENTITY_ID: 'media_player.walkman'},
blocking=True)
# Emulated hue converts 0.0-1.0 to 0-255.
level = 0.25
brightness = round(level * 255)
mp_result = yield from perform_put_light_state(
hass_hue, hue_client,
'media_player.walkman', True, brightness)
mp_result_json = yield from mp_result.json()
assert mp_result.status == 200
assert len(mp_result_json) == 2
walkman = hass_hue.states.get('media_player.walkman')
assert walkman.state == 'playing'
assert walkman.attributes[media_player.ATTR_MEDIA_VOLUME_LEVEL] == level
async def test_close_cover(hass_hue, hue_client):
"""Test opening cover ."""
COVER_ID = "cover.living_room_window"
# Turn the office light off first
await hass_hue.services.async_call(
cover.DOMAIN, const.SERVICE_CLOSE_COVER,
{const.ATTR_ENTITY_ID: COVER_ID},
blocking=True)
cover_test = hass_hue.states.get(COVER_ID)
assert cover_test.state == 'closing'
for _ in range(7):
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass_hue, future)
await hass_hue.async_block_till_done()
cover_test = hass_hue.states.get(COVER_ID)
assert cover_test.state == 'closed'
# Go through the API to turn it on
cover_result = await perform_put_light_state(
hass_hue, hue_client,
COVER_ID, True, 100)
assert cover_result.status == 200
assert 'application/json' in cover_result.headers['content-type']
for _ in range(7):
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass_hue, future)
await hass_hue.async_block_till_done()
cover_result_json = await cover_result.json()
assert len(cover_result_json) == 2
# Check to make sure the state changed
cover_test_2 = hass_hue.states.get(COVER_ID)
assert cover_test_2.state == 'open'
async def test_set_position_cover(hass_hue, hue_client):
"""Test setting postion cover ."""
COVER_ID = "cover.living_room_window"
# Turn the office light off first
await hass_hue.services.async_call(
cover.DOMAIN, const.SERVICE_CLOSE_COVER,
{const.ATTR_ENTITY_ID: COVER_ID},
blocking=True)
cover_test = hass_hue.states.get(COVER_ID)
assert cover_test.state == 'closing'
for _ in range(7):
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass_hue, future)
await hass_hue.async_block_till_done()
cover_test = hass_hue.states.get(COVER_ID)
assert cover_test.state == 'closed'
level = 20
brightness = round(level/100*255)
# Go through the API to open
cover_result = await perform_put_light_state(
hass_hue, hue_client,
COVER_ID, False, brightness)
assert cover_result.status == 200
assert 'application/json' in cover_result.headers['content-type']
cover_result_json = await cover_result.json()
assert len(cover_result_json) == 2
assert True, cover_result_json[0]['success'][
'/lights/cover.living_room_window/state/on']
assert cover_result_json[1]['success'][
'/lights/cover.living_room_window/state/bri'] == level
for _ in range(100):
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass_hue, future)
await hass_hue.async_block_till_done()
# Check to make sure the state changed
cover_test_2 = hass_hue.states.get(COVER_ID)
assert cover_test_2.state == 'open'
assert cover_test_2.attributes.get('current_position') == level
@asyncio.coroutine
def test_put_light_state_fan(hass_hue, hue_client):
"""Test turning on fan and setting speed."""
# Turn the fan off first
yield from hass_hue.services.async_call(
fan.DOMAIN, const.SERVICE_TURN_OFF,
{const.ATTR_ENTITY_ID: 'fan.living_room_fan'},
blocking=True)
# Emulated hue converts 0-100% to 0-255.
level = 43
brightness = round(level * 255 / 100)
fan_result = yield from perform_put_light_state(
hass_hue, hue_client,
'fan.living_room_fan', True, brightness)
fan_result_json = yield from fan_result.json()
assert fan_result.status == 200
assert len(fan_result_json) == 2
living_room_fan = hass_hue.states.get('fan.living_room_fan')
assert living_room_fan.state == 'on'
assert living_room_fan.attributes[fan.ATTR_SPEED] == fan.SPEED_MEDIUM
# pylint: disable=invalid-name
@asyncio.coroutine
def test_put_with_form_urlencoded_content_type(hass_hue, hue_client):
"""Test the form with urlencoded content."""
# Needed for Alexa
yield from perform_put_test_on_ceiling_lights(
hass_hue, hue_client, 'application/x-www-form-urlencoded')
# Make sure we fail gracefully when we can't parse the data
data = {'key1': 'value1', 'key2': 'value2'}
result = yield from hue_client.put(
'/api/username/lights/light.ceiling_lights/state',
headers={
'content-type': 'application/x-www-form-urlencoded'
},
data=data,
)
assert result.status == 400
@asyncio.coroutine
def test_entity_not_found(hue_client):
"""Test for entity which are not found."""
result = yield from hue_client.get(
'/api/username/lights/not.existant_entity')
assert result.status == 404
result = yield from hue_client.put(
'/api/username/lights/not.existant_entity/state')
assert result.status == 404
@asyncio.coroutine
def test_allowed_methods(hue_client):
"""Test the allowed methods."""
result = yield from hue_client.get(
'/api/username/lights/light.ceiling_lights/state')
assert result.status == 405
result = yield from hue_client.put(
'/api/username/lights/light.ceiling_lights')
assert result.status == 405
result = yield from hue_client.put(
'/api/username/lights')
assert result.status == 405
@asyncio.coroutine
def test_proper_put_state_request(hue_client):
"""Test the request to set the state."""
# Test proper on value parsing
result = yield from hue_client.put(
'/api/username/lights/{}/state'.format(
'light.ceiling_lights'),
data=json.dumps({HUE_API_STATE_ON: 1234}))
assert result.status == 400
# Test proper brightness value parsing
result = yield from hue_client.put(
'/api/username/lights/{}/state'.format(
'light.ceiling_lights'),
data=json.dumps({
HUE_API_STATE_ON: True,
HUE_API_STATE_BRI: 'Hello world!'
}))
assert result.status == 400
@asyncio.coroutine
def test_get_empty_groups_state(hue_client):
"""Test the request to get groups endpoint."""
# Test proper on value parsing
result = yield from hue_client.get(
'/api/username/groups')
assert result.status == 200
result_json = yield from result.json()
assert result_json == {}
# pylint: disable=invalid-name
async def perform_put_test_on_ceiling_lights(hass_hue, hue_client,
content_type='application/json'):
"""Test the setting of a light."""
# Turn the office light off first
await hass_hue.services.async_call(
light.DOMAIN, const.SERVICE_TURN_OFF,
{const.ATTR_ENTITY_ID: 'light.ceiling_lights'},
blocking=True)
ceiling_lights = hass_hue.states.get('light.ceiling_lights')
assert ceiling_lights.state == STATE_OFF
# Go through the API to turn it on
office_result = await perform_put_light_state(
hass_hue, hue_client,
'light.ceiling_lights', True, 56, content_type)
assert office_result.status == 200
assert 'application/json' in office_result.headers['content-type']
office_result_json = await office_result.json()
assert len(office_result_json) == 2
# Check to make sure the state changed
ceiling_lights = hass_hue.states.get('light.ceiling_lights')
assert ceiling_lights.state == STATE_ON
assert ceiling_lights.attributes[light.ATTR_BRIGHTNESS] == 56
@asyncio.coroutine
def perform_get_light_state(client, entity_id, expected_status):
"""Test the getting of a light state."""
result = yield from client.get('/api/username/lights/{}'.format(entity_id))
assert result.status == expected_status
if expected_status == 200:
assert 'application/json' in result.headers['content-type']
return (yield from result.json())
return None
@asyncio.coroutine
def perform_put_light_state(hass_hue, client, entity_id, is_on,
brightness=None, content_type='application/json'):
"""Test the setting of a light state."""
req_headers = {'Content-Type': content_type}
data = {HUE_API_STATE_ON: is_on}
if brightness is not None:
data[HUE_API_STATE_BRI] = brightness
result = yield from client.put(
'/api/username/lights/{}/state'.format(entity_id), headers=req_headers,
data=json.dumps(data).encode())
# Wait until state change is complete before continuing
yield from hass_hue.async_block_till_done()
return result
async def test_external_ip_blocked(hue_client):
"""Test external IP blocked."""
with patch('homeassistant.components.http.real_ip.ip_address',
return_value=ip_address('45.45.45.45')):
result = await hue_client.get('/api/username/lights')
assert result.status == 400
|
|
import math
import numpy
import pylab
import skimage.color, skimage.io
# Input original image in color as float array
def remove_background(image, box_start_row, box_start_col, box_width, box_height, is_text_inverse):
image = skimage.img_as_float(image)
height = image.shape[0]
width = image.shape[1]
original_image = numpy.zeros((height, width, 3))
for j in range(height):
for i in range(width):
original_image[j, i] = image[j, i]
threshold_seedfill = 0.32
box_end_row = box_start_row + box_height - 1
box_end_col = box_start_col + box_width - 1
# Increase text bounding box
width_expansion = int(box_width * 0.13)
if box_start_col <= width_expansion:
box_start_col = 0
else:
box_start_col = box_start_col - width_expansion
if box_end_col + width_expansion >= width:
box_end_col = width - 1
else:
box_end_col = box_end_col + width_expansion
height_expansion = int(box_height * 0.1)
if box_start_row <= height_expansion:
box_start_row = 0
else:
box_start_row = box_start_row - height_expansion
if box_end_row + height_expansion >= height:
box_end_row = height - 1
else:
box_end_row = box_end_row + height_expansion
# Background color
color = [1, 1, 1]
if is_text_inverse:
color = [0, 0, 0]
box_height = box_end_row - box_start_row + 1
box_width = box_end_col - box_start_col + 1
box = numpy.zeros((box_height, box_width))
# Take pixel on boundary as seed to fill all pixels with background color which do not differ more than threshold
# left
for i in range(box_height):
pixels_to_check = [(i, 0)]
boundary_color = []
boundary_color.append(image[i + box_start_row, box_start_col, 0])
boundary_color.append(image[i + box_start_row, box_start_col, 1])
boundary_color.append(image[i + box_start_row, box_start_col, 2])
box = numpy.zeros((box_height, box_width))
box[i, 0] = 1
while len(pixels_to_check) > 0:
pixel = pixels_to_check.pop(0)
if calc_rgb_distance(image[pixel[0] + box_start_row, pixel[1] + box_start_col], boundary_color) < threshold_seedfill:
original_image[pixel[0] + box_start_row, pixel[1] + box_start_col] = color
# left
if pixel[1] - 1 >= 0 and box[pixel[0], pixel[1] - 1] == 0 and not color_match(original_image[pixel[0] + box_start_row, pixel[1] - 1 + box_start_col], color):
pixels_to_check.append((pixel[0], pixel[1] - 1))
box[pixel[0], pixel[1] - 1] = 1
# top
if pixel[0] - 1 >= 0 and box[pixel[0] - 1, pixel[1]] == 0 and not color_match(original_image[pixel[0] - 1 + box_start_row, pixel[1] + box_start_col], color):
pixels_to_check.append((pixel[0] - 1, pixel[1]))
box[pixel[0] - 1, pixel[1]] = 1
# right
if pixel[1] + 1 < box_width and box[pixel[0], pixel[1] + 1] == 0 and not color_match(original_image[pixel[0] + box_start_row, pixel[1] + 1 + box_start_col], color):
pixels_to_check.append((pixel[0], pixel[1] + 1))
box[pixel[0], pixel[1] + 1] = 1
# bottom
if pixel[0] + 1 < box_height and box[pixel[0] + 1, pixel[1]] == 0 and not color_match(original_image[pixel[0] + 1 + box_start_row, pixel[1] + box_start_col], color):
pixels_to_check.append((pixel[0] + 1, pixel[1]))
box[pixel[0] + 1, pixel[1]] = 1
# top
for j in range(box_width):
pixels_to_check = [(0, j)]
boundary_color = []
boundary_color.append(image[box_start_row, box_start_col + j, 0])
boundary_color.append(image[box_start_row, box_start_col + j, 1])
boundary_color.append(image[box_start_row, box_start_col + j, 2])
box = numpy.zeros((box_height, box_width))
box[0, j] = 1
while len(pixels_to_check) > 0:
pixel = pixels_to_check.pop(0)
if calc_rgb_distance(image[pixel[0] + box_start_row, pixel[1] + box_start_col],
boundary_color) < threshold_seedfill:
original_image[pixel[0] + box_start_row, pixel[1] + box_start_col] = color
# left
if pixel[1] - 1 >= 0 and box[pixel[0], pixel[1] - 1] == 0 and not color_match(original_image[pixel[0] + box_start_row, pixel[1] - 1 + box_start_col], color):
pixels_to_check.append((pixel[0], pixel[1] - 1))
box[pixel[0], pixel[1] - 1] = 1
# top
if pixel[0] - 1 >= 0 and box[pixel[0] - 1, pixel[1]] == 0 and not color_match(original_image[pixel[0] - 1 + box_start_row, pixel[1] + box_start_col], color):
pixels_to_check.append((pixel[0] - 1, pixel[1]))
box[pixel[0] - 1, pixel[1]] = 1
# right
if pixel[1] + 1 < box_width and box[pixel[0], pixel[1] + 1] == 0 and not color_match(original_image[pixel[0] + box_start_row, pixel[1] + 1 + box_start_col], color):
pixels_to_check.append((pixel[0], pixel[1] + 1))
box[pixel[0], pixel[1] + 1] = 1
# bottom
if pixel[0] + 1 < box_height and box[pixel[0] + 1, pixel[1]] == 0 and not color_match(original_image[pixel[0] + 1 + box_start_row, pixel[1] + box_start_col], color):
pixels_to_check.append((pixel[0] + 1, pixel[1]))
box[pixel[0] + 1, pixel[1]] = 1
# right
for i in range(box_height):
pixels_to_check = [(i, box_width - 1)]
boundary_color = []
boundary_color.append(image[i + box_start_row, box_start_col + box_width - 1, 0])
boundary_color.append(image[i + box_start_row, box_start_col + box_width - 1, 1])
boundary_color.append(image[i + box_start_row, box_start_col + box_width - 1, 2])
box = numpy.zeros((box_height, box_width))
box[i, box_width - 1] = 1
while len(pixels_to_check) > 0:
pixel = pixels_to_check.pop(0)
if calc_rgb_distance(image[pixel[0] + box_start_row, pixel[1] + box_start_col],
boundary_color) < threshold_seedfill:
original_image[pixel[0] + box_start_row, pixel[1] + box_start_col] = color
# left
if pixel[1] - 1 >= 0 and box[pixel[0], pixel[1] - 1] == 0 and not color_match(original_image[pixel[0] + box_start_row, pixel[1] - 1 + box_start_col], color):
pixels_to_check.append((pixel[0], pixel[1] - 1))
box[pixel[0], pixel[1] - 1] = 1
# top
if pixel[0] - 1 >= 0 and box[pixel[0] - 1, pixel[1]] == 0 and not color_match(original_image[pixel[0] - 1 + box_start_row, pixel[1] + box_start_col], color):
pixels_to_check.append((pixel[0] - 1, pixel[1]))
box[pixel[0] - 1, pixel[1]] = 1
# right
if pixel[1] + 1 < box_width and box[pixel[0], pixel[1] + 1] == 0 and not color_match(original_image[pixel[0] + box_start_row, pixel[1] + 1 + box_start_col], color):
pixels_to_check.append((pixel[0], pixel[1] + 1))
box[pixel[0], pixel[1] + 1] = 1
# bottom
if pixel[0] + 1 < box_height and box[pixel[0] + 1, pixel[1]] == 0 and not color_match(original_image[pixel[0] + 1 + box_start_row, pixel[1] + box_start_col], color):
pixels_to_check.append((pixel[0] + 1, pixel[1]))
box[pixel[0] + 1, pixel[1]] = 1
# bottom
for j in range(box_width):
pixels_to_check = [(box_height - 1, j)]
boundary_color = []
boundary_color.append(image[box_start_row + box_height - 1, box_start_col + j, 0])
boundary_color.append(image[box_start_row + box_height - 1, box_start_col + j, 1])
boundary_color.append(image[box_start_row + box_height - 1, box_start_col + j, 2])
box = numpy.zeros((box_height, box_width))
box[box_height - 1, j] = 1
while len(pixels_to_check) > 0:
pixel = pixels_to_check.pop(0)
if calc_rgb_distance(image[pixel[0] + box_start_row, pixel[1] + box_start_col],
boundary_color) < threshold_seedfill:
original_image[pixel[0] + box_start_row, pixel[1] + box_start_col] = color
# left
if pixel[1] - 1 >= 0 and box[pixel[0], pixel[1] - 1] == 0 and not color_match(original_image[pixel[0] + box_start_row, pixel[1] - 1 + box_start_col], color):
pixels_to_check.append((pixel[0], pixel[1] - 1))
box[pixel[0], pixel[1] - 1] = 1
# top
if pixel[0] - 1 >= 0 and box[pixel[0] - 1, pixel[1]] == 0 and not color_match(original_image[pixel[0] - 1 + box_start_row, pixel[1] + box_start_col], color):
pixels_to_check.append((pixel[0] - 1, pixel[1]))
box[pixel[0] - 1, pixel[1]] = 1
# right
if pixel[1] + 1 < box_width and box[pixel[0], pixel[1] + 1] == 0 and not color_match(original_image[pixel[0] + box_start_row, pixel[1] + 1 + box_start_col], color):
pixels_to_check.append((pixel[0], pixel[1] + 1))
box[pixel[0], pixel[1] + 1] = 1
# bottom
if pixel[0] + 1 < box_height and box[pixel[0] + 1, pixel[1]] == 0 and not color_match(original_image[pixel[0] + 1 + box_start_row, pixel[1] + box_start_col], color):
pixels_to_check.append((pixel[0] + 1, pixel[1]))
box[pixel[0] + 1, pixel[1]] = 1
return binarize_bitmap(original_image, box_start_row, box_start_col, box_width, box_height, is_text_inverse)
def color_match(color1, color2):
return color1[0] == color2[0] and color1[1] == color2[1] and color1[2] == color2[2]
def calc_rgb_distance(color1, color2):
return math.sqrt((color1[0] - color2[0]) * (color1[0] - color2[0]) + (color1[1] - color2[1]) * (color1[1] - color2[1]) + (color1[2] - color2[2]) * (color1[2] - color2[2]))
def binarize_bitmap(original_image, box_start_row, box_start_col, box_width, box_height, is_text_inverse):
grayscale = skimage.color.rgb2gray(original_image)
text_box_bitmap = numpy.zeros((box_height, box_width))
binarization_threshold = 0.6
for y in range(box_height):
for x in range(box_width):
if is_text_inverse:
# text is white
if grayscale[y + box_start_row, x + box_start_col] < binarization_threshold:
text_box_bitmap[y, x] = 1
else:
text_box_bitmap[y, x] = 0
else:
if grayscale[y + box_start_row, x + box_start_col] < binarization_threshold:
text_box_bitmap[y, x] = 0
else:
text_box_bitmap[y, x] = 1
return text_box_bitmap
if __name__ == "__main__":
box = remove_background(skimage.img_as_float(skimage.io.imread("example1.jpg")), 0, 0, 100, 50, True)
pylab.imshow(skimage.color.gray2rgb(box))
pylab.show()
pass
|
|
# -*- coding: utf-8 -
#
# This file is part of restkit released under the MIT license.
# See the NOTICE for more information.
import cgi
import copy
import mimetypes
import os
from StringIO import StringIO
import types
import urlparse
import uuid
from .datastructures import MultiDict
from .errors import AlreadyRead, RequestError
from .forms import multipart_form_encode, form_encode
from .tee import ResponseTeeInput
from .util import to_bytestring
class Request(object):
def __init__(self, url, method='GET', body=None, headers=None):
headers = headers or []
self.url = url
self.initial_url = url
self.method = method
self._headers = None
self._body = None
# set parsed uri
self.headers = headers
if body is not None:
self.body = body
def _headers__get(self):
if not isinstance(self._headers, MultiDict):
self._headers = MultiDict(self._headers or [])
return self._headers
def _headers__set(self, value):
self._headers = MultiDict(copy.copy(value))
headers = property(_headers__get, _headers__set, doc=_headers__get.__doc__)
def _parsed_url(self):
if self.url is None:
raise ValueError("url isn't set")
return urlparse.urlparse(self.url)
parsed_url = property(_parsed_url, doc="parsed url")
def _path__get(self):
parsed_url = self.parsed_url
path = parsed_url.path or '/'
return urlparse.urlunparse(('','', path, parsed_url.params,
parsed_url.query, parsed_url.fragment))
path = property(_path__get)
def _host__get(self):
try:
h = self.parsed_url.netloc.encode('ascii')
except UnicodeEncodeError:
h = self.parsed_url.netloc.encode('idna')
hdr_host = self.headers.iget("host")
if not hdr_host:
return h
return hdr_host
host = property(_host__get)
def is_chunked(self):
te = self.headers.iget("transfer-encoding")
return (te is not None and te.lower() == "chunked")
def is_ssl(self):
return self.parsed_url.scheme == "https"
def _set_body(self, body):
ctype = self.headers.ipop('content-type', None)
clen = self.headers.ipop('content-length', None)
if isinstance(body, dict):
if ctype is not None and \
ctype.startswith("multipart/form-data"):
type_, opts = cgi.parse_header(ctype)
boundary = opts.get('boundary', uuid.uuid4().hex)
self._body, self.headers = multipart_form_encode(body,
self.headers, boundary)
else:
ctype = "application/x-www-form-urlencoded; charset=utf-8"
self._body = form_encode(body)
elif hasattr(body, "boundary"):
ctype = "multipart/form-data; boundary=%s" % self.body.boundary
clen = body.get_size()
self._body = body
else:
self._body = body
if not ctype:
ctype = 'application/octet-stream'
if hasattr(self.body, 'name'):
ctype = mimetypes.guess_type(body.name)[0]
if not clen:
if hasattr(self._body, 'fileno'):
try:
self._body.flush()
except IOError:
pass
try:
fno = self._body.fileno()
clen = str(os.fstat(fno)[6])
except IOError:
if not self.is_chunked():
clen = len(self._body.read())
elif hasattr(self._body, 'getvalue') and not \
self.is_chunked():
clen = len(self._body.getvalue())
elif isinstance(self._body, types.StringTypes):
self._body = to_bytestring(self._body)
clen = len(self._body)
if clen is not None:
self.headers['Content-Length'] = clen
if ctype is not None:
self.headers['Content-Type'] = ctype
def _get_body(self):
return self._body
body = property(_get_body, _set_body, doc="request body")
class BodyWrapper(object):
def __init__(self, resp, connection):
self.resp = resp
self.body = resp._body
self.connection = connection
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, traceback):
self.close()
def close(self):
""" release connection """
self.connection.release(self.resp.should_close)
def __iter__(self):
return self
def next(self):
try:
return self.body.next()
except StopIteration:
self.close()
raise
def read(self, size=None):
data = self.body.read(size=size)
if not data:
self.close()
return data
def readline(self, size=None):
line = self.body.readline(size=size)
if not line:
self.close()
return line
def readlines(self, size=None):
lines = self.body.readlines(size=size)
if self.body.close:
self.close()
return lines
class Response(object):
charset = "utf8"
unicode_errors = 'strict'
def __init__(self, connection, request, resp):
self.request = request
self.connection = connection
self._body = resp.body
# response infos
self.headers = resp.headers
self.status = resp.status
self.status_int = resp.status_int
self.version = resp.version
self.headerslist = resp.headers.items()
self.location = resp.headers.iget('location')
self.final_url = request.url
self.should_close = resp.should_close()
self._closed = False
self._already_read = False
if request.method == "HEAD":
""" no body on HEAD, release the connection now """
self.connection.release()
self._body = StringIO()
def __getitem__(self, key):
try:
return getattr(self, key)
except AttributeError:
pass
return self.headers.iget(key)
def __contains__(self, key):
return (self.headers.iget(key) is not None)
def __iter__(self):
return self.headers.iteritems()
def can_read(self):
return not self._already_read
def body_string(self, charset=None, unicode_errors="strict"):
""" return body string, by default in bytestring """
if not self.can_read():
raise AlreadyRead()
body = self._body.read()
self._already_read = True
# release connection
self.connection.release(self.should_close)
if charset is not None:
try:
body = body.decode(charset, unicode_errors)
except UnicodeDecodeError:
pass
return body
def body_stream(self):
""" stream body """
if not self.can_read():
raise AlreadyRead()
self._already_read = True
return BodyWrapper(self, self.connection)
def tee(self):
""" copy response input to standard output or a file if length >
sock.MAX_BODY. This make possible to reuse it in your
appplication. When all the input has been read, connection is
released """
if not hasattr(self._body, "reader"):
# head case
return self._body
return ResponseTeeInput(self, self.connection,
should_close=self.should_close)
ClientResponse = Response
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The setup script is the centre of all activity in building, distributing,
and installing modules using the Distutils. It is required for ``pip install``.
See more: https://docs.python.org/2/distutils/setupscript.html
"""
from __future__ import print_function
import os
from datetime import date
from setuptools import setup, find_packages
# --- import your package ---
import constant2 as package
if __name__ == "__main__":
# --- Automatically generate setup parameters ---
# Your package name
PKG_NAME = package.__name__
# Your GitHub user name
try:
GITHUB_USERNAME = package.__github_username__
except:
GITHUB_USERNAME = "Unknown-Github-Username"
# Short description will be the description on PyPI
try:
SHORT_DESCRIPTION = package.__short_description__ # GitHub Short Description
except:
print(
"'__short_description__' not found in '%s.__init__.py'!" % PKG_NAME)
SHORT_DESCRIPTION = "No short description!"
# Long description will be the body of content on PyPI page
try:
LONG_DESCRIPTION = open("README.rst", "rb").read().decode("utf-8")
except:
LONG_DESCRIPTION = "No long description!"
# Version number, VERY IMPORTANT!
VERSION = package.__version__
# Author and Maintainer
try:
AUTHOR = package.__author__
except:
AUTHOR = "Unknown"
try:
AUTHOR_EMAIL = package.__author_email__
except:
AUTHOR_EMAIL = None
try:
MAINTAINER = package.__maintainer__
except:
MAINTAINER = "Unknown"
try:
MAINTAINER_EMAIL = package.__maintainer_email__
except:
MAINTAINER_EMAIL = None
PACKAGES, INCLUDE_PACKAGE_DATA, PACKAGE_DATA, PY_MODULES = (
None, None, None, None,
)
# It's a directory style package
if os.path.exists(__file__[:-8] + PKG_NAME):
# Include all sub packages in package directory
PACKAGES = [PKG_NAME] + ["%s.%s" % (PKG_NAME, i)
for i in find_packages(PKG_NAME)]
# Include everything in package directory
INCLUDE_PACKAGE_DATA = True
PACKAGE_DATA = {
"": ["*.*"],
}
# It's a single script style package
elif os.path.exists(__file__[:-8] + PKG_NAME + ".py"):
PY_MODULES = [PKG_NAME, ]
# The project directory name is the GitHub repository name
repository_name = os.path.basename(os.path.dirname(__file__))
# Project Url
URL = "https://github.com/{0}/{1}".format(GITHUB_USERNAME, repository_name)
# Use todays date as GitHub release tag
github_release_tag = str(date.today())
# Source code download url
DOWNLOAD_URL = "https://pypi.python.org/pypi/{0}/{1}#downloads".format(
PKG_NAME, VERSION)
try:
LICENSE = package.__license__
except:
print("'__license__' not found in '%s.__init__.py'!" % PKG_NAME)
LICENSE = ""
PLATFORMS = [
"Windows",
"MacOS",
"Unix",
]
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS",
"Operating System :: Unix",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
]
"""
Full list can be found at: https://pypi.python.org/pypi?%3Aaction=list_classifiers
"""
# Read requirements.txt, ignore comments
try:
REQUIRES = list()
f = open("requirements.txt", "rb")
for line in f.read().decode("utf-8").split("\n"):
line = line.strip()
if "#" in line:
line = line[:line.find("#")].strip()
if line:
REQUIRES.append(line)
except:
print("'requirements.txt' not found!")
REQUIRES = list()
setup(
name=PKG_NAME,
description=SHORT_DESCRIPTION,
long_description=LONG_DESCRIPTION,
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
packages=PACKAGES,
include_package_data=INCLUDE_PACKAGE_DATA,
package_data=PACKAGE_DATA,
py_modules=PY_MODULES,
url=URL,
download_url=DOWNLOAD_URL,
classifiers=CLASSIFIERS,
platforms=PLATFORMS,
license=LICENSE,
install_requires=REQUIRES,
)
"""
Appendix
--------
::
Frequent used classifiers List = [
"Development Status :: 1 - Planning",
"Development Status :: 2 - Pre-Alpha",
"Development Status :: 3 - Alpha",
"Development Status :: 4 - Beta",
"Development Status :: 5 - Production/Stable",
"Development Status :: 6 - Mature",
"Development Status :: 7 - Inactive",
"Intended Audience :: Customer Service",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Financial and Insurance Industry",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Information Technology",
"Intended Audience :: Legal Industry",
"Intended Audience :: Manufacturing",
"Intended Audience :: Other Audience",
"Intended Audience :: Religion",
"Intended Audience :: Science/Research",
"Intended Audience :: System Administrators",
"Intended Audience :: Telecommunications Industry",
"License :: OSI Approved :: BSD License",
"License :: OSI Approved :: MIT License",
"License :: OSI Approved :: Apache Software License",
"License :: OSI Approved :: GNU General Public License (GPL)",
"License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)",
"Natural Language :: English",
"Natural Language :: Chinese (Simplified)",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS",
"Operating System :: Unix",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 2 :: Only",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3 :: Only",
]
"""
|
|
'''
FileName:
Author:KWJ(kyson)
UpdateTime:2016/10/10
Introduction:
'''
from __future__ import division
import copy
from operator import attrgetter
from ryu.base import app_manager
from ryu.base.app_manager import lookup_service_brick
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER, DEAD_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib import hub
from ryu.lib.packet import packet
from ryu.controller import ofp_event
from ryu.lib.packet import ethernet
from ryu.lib.packet import arp
from ryu.lib.packet import ether_types
import setting
import redis
class NetworkMonitor(app_manager.RyuApp):
"""
NetworkMonitor is a Ryu app for collecting traffic information.
"""
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(NetworkMonitor, self).__init__(*args, **kwargs)
self.name = 'monitor'
self.datapaths = {}
self.port_stats = {}
self.port_speed = {}
self.flow_stats = {}
self.flow_speed = {}
self.stats = {}
self.port_features = {}
self.free_bandwidth = {}
self.mac_to_port = {}
self.ip_to_port = {}
self.ipfreebw = {}
# Start to green thread to monitor traffic and calculating
# free bandwidth of links respectively.
self.monitor_thread = hub.spawn(self._monitor)
@set_ev_cls(ofp_event.EventOFPStateChange,
[MAIN_DISPATCHER, DEAD_DISPATCHER])
def _state_change_handler(self, ev):
"""
Record datapath's info
"""
datapath = ev.datapath
if ev.state == MAIN_DISPATCHER:
if not datapath.id in self.datapaths:
self.logger.debug('register datapath: %016x', datapath.id)
self.datapaths[datapath.id] = datapath
elif ev.state == DEAD_DISPATCHER:
if datapath.id in self.datapaths:
self.logger.debug('unregister datapath: %016x', datapath.id)
del self.datapaths[datapath.id]
def _monitor(self):
"""
Main entry method of monitoring traffic.
"""
while True:
self.stats['flow'] = {}
self.stats['port'] = {}
self._save_ipfreebw(self.free_bandwidth,self.ip_to_port,self.ipfreebw)
pool = redis.ConnectionPool(host='127.0.0.1',port=6379,db=0)
r = redis.StrictRedis(connection_pool=pool)
for key in self.ipfreebw.keys():
r.set(key,self.ipfreebw[key])
print(self.free_bandwidth)
print (self.ip_to_port)
print (self.ipfreebw)
for dp in self.datapaths.values():
self.port_features.setdefault(dp.id, {})
self._request_stats(dp)
# refresh data.
hub.sleep(setting.MONITOR_PERIOD)
if self.stats['flow'] or self.stats['port']:
self.show_stat('flow')
self.show_stat('port')
hub.sleep(1)
def _request_stats(self, datapath):
"""
Sending request msg to datapath
"""
self.logger.debug('send stats request: %016x', datapath.id)
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
req = parser.OFPPortDescStatsRequest(datapath, 0)
datapath.send_msg(req)
req = parser.OFPPortStatsRequest(datapath, 0, ofproto.OFPP_ANY)
datapath.send_msg(req)
req = parser.OFPFlowStatsRequest(datapath)
datapath.send_msg(req)
def _save_freebandwidth(self, dpid, port_no, speed):
# Calculate free bandwidth of port and save it.
port_state = self.port_features.get(dpid).get(port_no)
if port_state:
capacity = port_state[2]
curr_bw = self._get_free_bw(capacity, speed)
key = (dpid,port_no)
if key not in setting.SW_PORT:
self.free_bandwidth.setdefault(key, None)
self.free_bandwidth[(dpid, port_no)] = curr_bw
else:
self.logger.info("Fail in getting port state")
def _save_stats(self, _dict, key, value, length):
if key not in _dict:
_dict[key] = []
_dict[key].append(value)
if len(_dict[key]) > length:
_dict[key].pop(0)
def _get_speed(self, now, pre, period):
if period:
return (now - pre) / (period)
else:
return 0
def _get_free_bw(self, capacity, speed):
# capacity:OFPPortDescStatsReply default is kbit/s
return max(capacity*10**3 - speed * 8, 0)
def _get_time(self, sec, nsec):
return sec + nsec / (10 ** 9)
def _get_period(self, n_sec, n_nsec, p_sec, p_nsec):
return self._get_time(n_sec, n_nsec) - self._get_time(p_sec, p_nsec)
def _save_ipfreebw(self,freebw,ip_port,ipfreebw):
for key in ip_port.keys():
ipfreebw[ip_port[key]]=freebw[key]
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# install table-miss flow entry
#
# We specify NO BUFFER to max_len of the output action due to
# OVS bug. At this moment, if we specify a lesser number, e.g.,
# 128, OVS will send Packet-In with invalid buffer_id and
# truncated packet data. In that case, we cannot output packets
# correctly. The bug has been fixed in OVS v2.1.0.
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
def add_flow(self, datapath, priority, match, actions, buffer_id=None):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match,
instructions=inst)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst)
datapath.send_msg(mod)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
# If you hit this you might want to increase
# the "miss_send_length" of your switch
if ev.msg.msg_len < ev.msg.total_len:
self.logger.debug("packet truncated: only %s of %s bytes",
ev.msg.msg_len, ev.msg.total_len)
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
if eth.ethertype == ether_types.ETH_TYPE_LLDP:
# ignore lldp packet
return
dst = eth.dst
src = eth.src
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
#self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = in_port
# mac_of_port
for p in pkt.get_protocols(arp.arp):
key = (dpid, in_port)
value = p.src_ip
if key not in setting.SW_PORT:
self.ip_to_port.setdefault(key, value)
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
actions = [parser.OFPActionOutput(out_port)]
# install a flow to avoid packet_in next time
if out_port != ofproto.OFPP_FLOOD:
match = parser.OFPMatch(in_port=in_port, eth_dst=dst)
# verify if we have a valid buffer_id, if yes avoid to send both
# flow_mod & packet_out
if msg.buffer_id != ofproto.OFP_NO_BUFFER:
self.add_flow(datapath, 1, match, actions, msg.buffer_id)
return
else:
self.add_flow(datapath, 1, match, actions)
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
datapath.send_msg(out)
@set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER)
def _flow_stats_reply_handler(self, ev):
"""
Save flow stats reply info into self.flow_stats.
Calculate flow speed and Save it.
"""
body = ev.msg.body
dpid = ev.msg.datapath.id
self.stats['flow'][dpid] = body
self.flow_stats.setdefault(dpid, {})
self.flow_speed.setdefault(dpid, {})
for stat in sorted([flow for flow in body if flow.priority == 1],
key=lambda flow: (flow.match.get('in_port'),
flow.match.get('eth_dst'))):
key = (stat.match['in_port'], stat.match.get('eth_dst'),
stat.instructions[0].actions[0].port)
value = (stat.packet_count, stat.byte_count,
stat.duration_sec, stat.duration_nsec)
self._save_stats(self.flow_stats[dpid], key, value, 5)
# Get flow's speed.
pre = 0
period = setting.MONITOR_PERIOD
tmp = self.flow_stats[dpid][key]
if len(tmp) > 1:
pre = tmp[-2][1]
period = self._get_period(tmp[-1][2], tmp[-1][3],
tmp[-2][2], tmp[-2][3])
speed = self._get_speed(self.flow_stats[dpid][key][-1][1],
pre, period)
self._save_stats(self.flow_speed[dpid], key, speed, 5)
@set_ev_cls(ofp_event.EventOFPPortStatsReply, MAIN_DISPATCHER)
def _port_stats_reply_handler(self, ev):
"""
Save port's stats info
Calculate port's speed and save it.
"""
body = ev.msg.body
dpid = ev.msg.datapath.id
self.stats['port'][dpid] = body
for stat in sorted(body, key=attrgetter('port_no')):
port_no = stat.port_no
if port_no != ofproto_v1_3.OFPP_LOCAL:
key = (dpid, port_no)
value = (stat.tx_bytes, stat.rx_bytes, stat.rx_errors,
stat.duration_sec, stat.duration_nsec)
self._save_stats(self.port_stats, key, value, 5)
# Get port speed.
pre = 0
period = setting.MONITOR_PERIOD
tmp = self.port_stats[key]
if len(tmp) > 1:
pre = tmp[-2][0] + tmp[-2][1]
period = self._get_period(tmp[-1][3], tmp[-1][4],
tmp[-2][3], tmp[-2][4])
speed = self._get_speed(
self.port_stats[key][-1][0] + self.port_stats[key][-1][1],
pre, period)
self._save_stats(self.port_speed, key, speed, 5)
self._save_freebandwidth(dpid, port_no, speed)
@set_ev_cls(ofp_event.EventOFPPortDescStatsReply, MAIN_DISPATCHER)
def port_desc_stats_reply_handler(self, ev):
"""
Save port description info.
"""
msg = ev.msg
dpid = msg.datapath.id
ofproto = msg.datapath.ofproto
config_dict = {ofproto.OFPPC_PORT_DOWN: "Down",
ofproto.OFPPC_NO_RECV: "No Recv",
ofproto.OFPPC_NO_FWD: "No Farward",
ofproto.OFPPC_NO_PACKET_IN: "No Packet-in"}
state_dict = {ofproto.OFPPS_LINK_DOWN: "Down",
ofproto.OFPPS_BLOCKED: "Blocked",
ofproto.OFPPS_LIVE: "Live"}
ports = []
for p in ev.msg.body:
ports.append('port_no=%d hw_addr=%s name=%s config=0x%08x '
'state=0x%08x curr=0x%08x advertised=0x%08x '
'supported=0x%08x peer=0x%08x curr_speed=%d '
'max_speed=%d' %
(p.port_no, p.hw_addr,
p.name, p.config,
p.state, p.curr, p.advertised,
p.supported, p.peer, p.curr_speed,
p.max_speed))
if p.config in config_dict:
config = config_dict[p.config]
else:
config = "up"
if p.state in state_dict:
state = state_dict[p.state]
else:
state = "up"
port_feature = (config, state, p.curr_speed*100)
self.port_features[dpid][p.port_no] = port_feature
@set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER)
def _port_status_handler(self, ev):
"""
Handle the port status changed event.
"""
msg = ev.msg
reason = msg.reason
port_no = msg.desc.port_no
dpid = msg.datapath.id
ofproto = msg.datapath.ofproto
reason_dict = {ofproto.OFPPR_ADD: "added",
ofproto.OFPPR_DELETE: "deleted",
ofproto.OFPPR_MODIFY: "modified", }
if reason in reason_dict:
print "switch%d: port %s %s" % (dpid, reason_dict[reason], port_no)
else:
print "switch%d: Illeagal port state %s %s" % (port_no, reason)
def show_stat(self, type):
'''
Show statistics info according to data type.
type: 'port' 'flow'
'''
if setting.TOSHOW is False:
return
bodys = self.stats[type]
if(type == 'flow'):
print('datapath '' in-port ip-dst '
'out-port packets bytes flow-speed(B/s)')
print('---------------- '' -------- ----------------- '
'-------- -------- -------- -----------')
for dpid in bodys.keys():
for stat in sorted(
[flow for flow in bodys[dpid] if flow.priority == 1],
key=lambda flow: (flow.match.get('in_port'),
flow.match.get('eth_dst'))):
print('%016x %8x %17s %8x %8d %8d %8.1f' % (
dpid,
stat.match['in_port'], stat.match['eth_dst'],
stat.instructions[0].actions[0].port,
stat.packet_count, stat.byte_count,
abs(self.flow_speed[dpid][
(stat.match.get('in_port'),
stat.match.get('eth_dst'),
stat.instructions[0].actions[0].port)][-1])))
print '\n'
if(type == 'port'):
print('datapath port ''rx-pkts rx-bytes rx-error '
'tx-pkts tx-bytes tx-error port-speed(B/s)'
' current-capacity(Kbps) '
'port-stat link-stat')
print('---------------- -------- ''-------- -------- -------- '
'-------- -------- -------- '
'---------------- ---------------- '
' ----------- -----------')
format = '%016x %8x %8d %8d %8d %8d %8d %8d %8.1f %16d %16s %16s'
for dpid in bodys.keys():
for stat in sorted(bodys[dpid], key=attrgetter('port_no')):
if stat.port_no != ofproto_v1_3.OFPP_LOCAL:
print(format % (
dpid, stat.port_no,
stat.rx_packets, stat.rx_bytes, stat.rx_errors,
stat.tx_packets, stat.tx_bytes, stat.tx_errors,
abs(self.port_speed[(dpid, stat.port_no)][-1]),
self.port_features[dpid][stat.port_no][2],
self.port_features[dpid][stat.port_no][0],
self.port_features[dpid][stat.port_no][1]))
print '\n'
|
|
#! /usr/bin/env python2.7
from sys import argv
from operator import itemgetter
import urllib2
import csv
def print_list(list):
for l in list:
print "[ {0} ]".format(l)
def print_support(support):
for s in support:
print "[ {0} = {1} ]".format(s, support[s])
def print_freq_items(sets, support):
for ss in sets:
for s in ss:
print "Frequent set: {{ {0} }}, support: {1:.2f}".format(", ".join(s), support[s])
def print_freq_items_tofile(sets, support):
f = open('mfis.txt', 'w')
for ss in sets:
for s in ss:
f.write("Frequent set: {{ {0} }}, support: {1:.2f}\n".format(", ".join(s), support[s]))
f.close()
def print_rules(rules):
for rule in rules:
print "{{ {0} }} --> {{ {1} }}, conf: {2:.2f}".format(", ".join(rule[0]), ", ".join(rule[1]), rule[2])
def print_rules_tofile(rules):
f = open('topar.txt', 'w')
for rule in rules:
f.write("{{ {0} }} --> {{ {1} }}, conf: {2:.2f} \n".format(", ".join(rule[0]), ", ".join(rule[1]), rule[2]))
f.close()
def apriori(data, min_sup=0.3):
single_candidates = get_single_candidates(data)
#print "Single Candidates" #DEBUG
#print_list(single_candidates) #DEBUG
datasets = map(set, data)
frequent_singles, sup_cnts = prune_by_support(datasets, single_candidates, min_sup)
#print "Freq Item Sets" #DEBUG
#print_list(frequent_singles) #DEBUG
#print "Supports " #DEBUG
#print_support(sup_cnts) #DEBUG
frequent_sets = []
frequent_sets.append(frequent_singles)
k = 0
while(len(frequent_sets[k]) > 0):
candidates = get_candidates(frequent_sets[k])
#print "{0} iter".format(k) #DEBUG
#print "Candidates: " #DEBUG
#print_list(candidates) #DEBUG
freq_k_sets, sup_k_cnts = prune_by_support(datasets, candidates, min_sup)
#print "Freq Sets:" #DEBUG
#print_list(freq_k_sets) #DEBUG
#print "Supports: " #DEBUG
#print_support(sup_k_cnts) #DEBUG
sup_cnts.update(sup_k_cnts)
frequent_sets.append(freq_k_sets)
k += 1
return frequent_sets, sup_cnts
def get_single_candidates(dataset):
single_candidates = []
for transaction in dataset:
for item in transaction:
c = [item]
if not c in single_candidates:
single_candidates.append(c)
single_candidates.sort()
return map(frozenset, single_candidates)
def get_candidates(frequent_sets):
ret_frequent = []
freq_len = len(frequent_sets)
#print "get_candidates : freq_sets = {0}, len = {1}".format(frequent_sets, freq_len)
for i in range(freq_len):
for j in range(i+1, freq_len):
fli = list(frequent_sets[i])
fli.sort()
#print "fli: {0}, {1}".format(fli, i)
flj = list(frequent_sets[j])
flj.sort()
#print "flj: {0}, {1}".format(flj, j)
if (len(fli) < 2):
fsi = fli[0]
fsj = flj[0]
#print "fsi == fsj : {0} == {1}".format(fsi, fsj)
ret_frequent.append(frequent_sets[i] | frequent_sets[j])
else:
fsi = fli[:-1]
fsj = flj[:-1]
#print "fsi == fsj : {0} == {1}".format(fsi, fsj)
if fsi == fsj:
ret_frequent.append(frequent_sets[i] | frequent_sets[j])
return ret_frequent
def prune_by_support(datasets, candidates, min_sup):
items_cnts = {}
data_len = float(len(datasets))
prunned_items = []
support_cnts = {}
for transaction in datasets:
for candidate in candidates:
if candidate.issubset(transaction):
items_cnts.setdefault(candidate, 0)
items_cnts[candidate] += 1
for candidate_set in items_cnts:
support = items_cnts[candidate_set] / data_len
if support >= min_sup:
prunned_items.append(candidate_set)
support_cnts[candidate_set] = support
return prunned_items, support_cnts
def get_candidate_rules(frequent_sets):
ret_frequent = []
freq_len = len(frequent_sets)
#print "get_candidates : freq_sets = {0}, len = {1}".format(frequent_sets, freq_len)
for i in range(freq_len):
for j in range(i+1, freq_len):
fli = list(frequent_sets[i])
fli.sort()
#print "fli: {0}, {1}".format(fli, i)
flj = list(frequent_sets[j])
flj.sort()
#print "flj: {0}, {1}".format(flj, j)
if (len(fli) < 2):
fsi = fli[0]
fsj = flj[0]
#print "fsi == fsj : {0} == {1}".format(fsi, fsj)
ret_frequent.append(frequent_sets[i] | frequent_sets[j])
else:
fsi = fli[:-1]
fsj = flj[:-1]
#print "fsi == fsj : {0} == {1}".format(fsi, fsj)
if fsi == fsj:
ret_frequent.append(frequent_sets[i] | frequent_sets[j])
return ret_frequent
def generate_rules(f_set, Hm, sup_cnts, min_conf, rules):
k = len(f_set)
m = len(Hm[0])
if (k > m + 1):
Hm1 = get_candidate_rules(Hm)
prune_by_confidence(f_set, Hm1, sup_cnts, min_conf, rules)
#print "Pruned rules: {0}:".format(Hm1)
if len(Hm1) > 1:
generate_rules(f_set, Hm1, sup_cnts, min_conf, rules)
def prune_by_confidence(f_set, H, sup_cnts, min_conf, rules):
for consequence in H:
rule = f_set - consequence
confidence = sup_cnts[f_set] / sup_cnts[rule]
if confidence >= min_conf:
#print "** Rule: {0} \nconsequence: {1}\n confidence: {2}".format(rule, consequence, confidence)
rules.append((rule, consequence, confidence))
def get_rules(f, sup_cnts, min_conf = 0.6):
rules = []
for i in range(1, len(f)):
for f_set in f[i]:
Hm = [frozenset([itemset]) for itemset in f_set]
generate_rules(f_set, Hm, sup_cnts, min_conf, rules)
#print rules
return rules
def get_data_from_url(url):
response = urllib2.urlopen(url)
cr = csv.reader(response)
return cr
def get_data_from_file(file_name):
file_data = []
with open(file_name, 'rb') as csvfile:
data = csv.reader(csvfile)
file_data = data
return file_data
def transform_data(data):
ret = []
transformer = [
{'democrat':'Democrat', 'republican':'Republican'},
{'y': 'Handicapped Infants', 'n': 'Not Handicapped Infants', '?':'HI N/A'},
{'y': 'Water Project Cost Sharing', 'n': 'Not Water Project Cost Sharing', '?':' WPCS N/A'},
{'y': 'Adoption of the Budget Resolution', 'n':'Not Adoption of the Budget Resolution', '?':'ABR N/A'},
{'y': 'Physician Fee Freeze', 'n':'Not Physician Fee Freeze', '?':'PFF N/A'},
{'y': 'El Salvador Aid', 'n':'Not El Salvador Aid', '?':'ESA N/A'},
{'y': 'Religious Groups in Schools', 'n':'Not Religious Groups in Schools', '?':'RGS N/A'},
{'y': 'Anti Satellite Test Band', 'n':'Not Anty Satellite Test Band', '?':'ASTB N/A'},
{'y': 'Aid to Nicaraguan Contras', 'n':'Not Aid to Nicaraguan Contras', '?':'ANC N/A'},
{'y': 'MX Missile', 'n': 'Not MX Missile', '?':'MXM N/A'},
{'y': 'Immigration', 'n': 'Not Immigration', '?':'IM N/A'},
{'y': 'Synfuels Corporation Cutback', 'n':'Not Synfuel Corporatino Cutback', '?': 'SCC N/A'},
{'y': 'Education Spending', 'n': 'Education Spending', '?': 'ES N/A'},
{'y': 'Superfund Right to Sue', 'n':'Not Superfund Right to Sue', '?' :'SRS N/A'},
{'y': 'Crime', 'n':'Not Crime', '?': 'CR N/A'},
{'y': 'Duty Free Exports', 'n': 'Not Duty Free Exports', '?':'DFE N/A'},
{'y': 'Export Administration Act South Africa', 'n': 'Not Export Administration Act South Africa', '?':'EAASA N/A'}
]
for d in data:
ret.append(
[
transformer[0][d[0]],
transformer[1][d[1]],
transformer[2][d[2]],
transformer[3][d[3]],
transformer[4][d[4]],
transformer[5][d[5]],
transformer[6][d[6]],
transformer[7][d[7]],
transformer[8][d[8]],
transformer[9][d[9]],
transformer[10][d[10]],
transformer[11][d[11]],
transformer[12][d[12]],
transformer[13][d[13]],
transformer[14][d[14]],
transformer[15][d[15]],
transformer[16][d[16]]
]
)
return ret
if __name__ == '__main__':
args = argv[1:]
if len(args) > 0:
if (args[0] != 'url'):
data_str = args[0]
i = data_str.find('//')
if i > 1:
url = args[0]
data = get_data_from_url(url)
elif (data_str.find('.csv') == (len(data_str) - 4)):
file_name = args[0]
data = get_data_from_file(file_name)
else:
print "I only know how to read data from an URI or a CSV file"
else:
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data'
data = get_data_from_url(url)
data = transform_data(data)
else:
print "Using test data"
data = [
['Bread', 'Milk'],
['Bread', 'Diapers', 'Beer', 'Eggs'],
['Milk', 'Diapers', 'Beer', 'Coke'],
['Bread', 'Milk', 'Diapers', 'Beer'],
['Bread', 'Milk', 'Diapers', 'Coke']
]
if len(args) >= 3:
min_sup = int(args[1])
min_conf = int(args[2])
else:
min_sup = .3
min_conf = .6
frequent_sets, sup_cnts = apriori(data, min_sup)
frequent_sets = frequent_sets[:-1]
#print "Final Frequent Sets:"
#print_list(frequent_sets)
#print_support(sup_cnts)
print_freq_items_tofile(frequent_sets, sup_cnts)
rules = get_rules(frequent_sets, sup_cnts, min_conf)
#print "Rules: "
rules.sort(key = itemgetter(2), reverse=True)
print_rules_tofile(rules)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Perturb a `LinearOperator` with a rank `K` update."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_diag
from tensorflow.python.ops.linalg import linear_operator_identity
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"LinearOperatorLowRankUpdate",
]
@tf_export("linalg.LinearOperatorLowRankUpdate")
@linear_operator.make_composite_tensor
class LinearOperatorLowRankUpdate(linear_operator.LinearOperator):
"""Perturb a `LinearOperator` with a rank `K` update.
This operator acts like a [batch] matrix `A` with shape
`[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `M x N` matrix.
`LinearOperatorLowRankUpdate` represents `A = L + U D V^H`, where
```
L, is a LinearOperator representing [batch] M x N matrices
U, is a [batch] M x K matrix. Typically K << M.
D, is a [batch] K x K matrix.
V, is a [batch] N x K matrix. Typically K << N.
V^H is the Hermitian transpose (adjoint) of V.
```
If `M = N`, determinants and solves are done using the matrix determinant
lemma and Woodbury identities, and thus require L and D to be non-singular.
Solves and determinants will be attempted unless the "is_non_singular"
property of L and D is False.
In the event that L and D are positive-definite, and U = V, solves and
determinants can be done using a Cholesky factorization.
```python
# Create a 3 x 3 diagonal linear operator.
diag_operator = LinearOperatorDiag(
diag_update=[1., 2., 3.], is_non_singular=True, is_self_adjoint=True,
is_positive_definite=True)
# Perturb with a rank 2 perturbation
operator = LinearOperatorLowRankUpdate(
operator=diag_operator,
u=[[1., 2.], [-1., 3.], [0., 0.]],
diag_update=[11., 12.],
v=[[1., 2.], [-1., 3.], [10., 10.]])
operator.shape
==> [3, 3]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [3, 4] Tensor
operator.matmul(x)
==> Shape [3, 4] Tensor
```
### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [M, N], with b >= 0
x.shape = [B1,...,Bb] + [N, R], with R >= 0.
```
### Performance
Suppose `operator` is a `LinearOperatorLowRankUpdate` of shape `[M, N]`,
made from a rank `K` update of `base_operator` which performs `.matmul(x)` on
`x` having `x.shape = [N, R]` with `O(L_matmul*N*R)` complexity (and similarly
for `solve`, `determinant`. Then, if `x.shape = [N, R]`,
* `operator.matmul(x)` is `O(L_matmul*N*R + K*N*R)`
and if `M = N`,
* `operator.solve(x)` is `O(L_matmul*N*R + N*K*R + K^2*R + K^3)`
* `operator.determinant()` is `O(L_determinant + L_solve*N*K + K^2*N + K^3)`
If instead `operator` and `x` have shape `[B1,...,Bb, M, N]` and
`[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular`, `self_adjoint`, `positive_definite`,
`diag_update_positive` and `square`. These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
base_operator,
u,
diag_update=None,
v=None,
is_diag_update_positive=None,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorLowRankUpdate"):
"""Initialize a `LinearOperatorLowRankUpdate`.
This creates a `LinearOperator` of the form `A = L + U D V^H`, with
`L` a `LinearOperator`, `U, V` both [batch] matrices, and `D` a [batch]
diagonal matrix.
If `L` is non-singular, solves and determinants are available.
Solves/determinants both involve a solve/determinant of a `K x K` system.
In the event that L and D are self-adjoint positive-definite, and U = V,
this can be done using a Cholesky factorization. The user should set the
`is_X` matrix property hints, which will trigger the appropriate code path.
Args:
base_operator: Shape `[B1,...,Bb, M, N]`.
u: Shape `[B1,...,Bb, M, K]` `Tensor` of same `dtype` as `base_operator`.
This is `U` above.
diag_update: Optional shape `[B1,...,Bb, K]` `Tensor` with same `dtype`
as `base_operator`. This is the diagonal of `D` above.
Defaults to `D` being the identity operator.
v: Optional `Tensor` of same `dtype` as `u` and shape `[B1,...,Bb, N, K]`
Defaults to `v = u`, in which case the perturbation is symmetric.
If `M != N`, then `v` must be set since the perturbation is not square.
is_diag_update_positive: Python `bool`.
If `True`, expect `diag_update > 0`.
is_non_singular: Expect that this operator is non-singular.
Default is `None`, unless `is_positive_definite` is auto-set to be
`True` (see below).
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. Default is `None`, unless `base_operator` is self-adjoint
and `v = None` (meaning `u=v`), in which case this defaults to `True`.
is_positive_definite: Expect that this operator is positive definite.
Default is `None`, unless `base_operator` is positive-definite
`v = None` (meaning `u=v`), and `is_diag_update_positive`, in which case
this defaults to `True`.
Note that we say an operator is positive definite when the quadratic
form `x^H A x` has positive real part for all nonzero `x`.
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
Raises:
ValueError: If `is_X` flags are set in an inconsistent way.
"""
parameters = dict(
base_operator=base_operator,
u=u,
diag_update=diag_update,
v=v,
is_diag_update_positive=is_diag_update_positive,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name
)
dtype = base_operator.dtype
if diag_update is not None:
if is_diag_update_positive and dtype.is_complex:
logging.warn("Note: setting is_diag_update_positive with a complex "
"dtype means that diagonal is real and positive.")
if diag_update is None:
if is_diag_update_positive is False:
raise ValueError(
"Default diagonal is the identity, which is positive. However, "
"user set 'is_diag_update_positive' to False.")
is_diag_update_positive = True
# In this case, we can use a Cholesky decomposition to help us solve/det.
self._use_cholesky = (
base_operator.is_positive_definite and base_operator.is_self_adjoint
and is_diag_update_positive
and v is None)
# Possibly auto-set some characteristic flags from None to True.
# If the Flags were set (by the user) incorrectly to False, then raise.
if base_operator.is_self_adjoint and v is None and not dtype.is_complex:
if is_self_adjoint is False:
raise ValueError(
"A = L + UDU^H, with L self-adjoint and D real diagonal. Since"
" UDU^H is self-adjoint, this must be a self-adjoint operator.")
is_self_adjoint = True
# The condition for using a cholesky is sufficient for SPD, and
# we no weaker choice of these hints leads to SPD. Therefore,
# the following line reads "if hints indicate SPD..."
if self._use_cholesky:
if (
is_positive_definite is False
or is_self_adjoint is False
or is_non_singular is False):
raise ValueError(
"Arguments imply this is self-adjoint positive-definite operator.")
is_positive_definite = True
is_self_adjoint = True
values = base_operator.graph_parents + [u, diag_update, v]
with ops.name_scope(name, values=values):
# Create U and V.
self._u = linear_operator_util.convert_nonref_to_tensor(u, name="u")
if v is None:
self._v = self._u
else:
self._v = linear_operator_util.convert_nonref_to_tensor(v, name="v")
if diag_update is None:
self._diag_update = None
else:
self._diag_update = linear_operator_util.convert_nonref_to_tensor(
diag_update, name="diag_update")
# Create base_operator L.
self._base_operator = base_operator
graph_parents = base_operator.graph_parents + [
self.u, self._diag_update, self.v]
graph_parents = [p for p in graph_parents if p is not None]
super(LinearOperatorLowRankUpdate, self).__init__(
dtype=self._base_operator.dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
parameters=parameters,
name=name)
self._set_graph_parents(graph_parents)
# Create the diagonal operator D.
self._set_diag_operators(diag_update, is_diag_update_positive)
self._is_diag_update_positive = is_diag_update_positive
self._check_shapes()
def _check_shapes(self):
"""Static check that shapes are compatible."""
# Broadcast shape also checks that u and v are compatible.
uv_shape = array_ops.broadcast_static_shape(
self.u.shape, self.v.shape)
batch_shape = array_ops.broadcast_static_shape(
self.base_operator.batch_shape, uv_shape[:-2])
tensor_shape.Dimension(
self.base_operator.domain_dimension).assert_is_compatible_with(
uv_shape[-2])
if self._diag_update is not None:
tensor_shape.dimension_at_index(uv_shape, -1).assert_is_compatible_with(
self._diag_update.shape[-1])
array_ops.broadcast_static_shape(
batch_shape, self._diag_update.shape[:-1])
def _set_diag_operators(self, diag_update, is_diag_update_positive):
"""Set attributes self._diag_update and self._diag_operator."""
if diag_update is not None:
self._diag_operator = linear_operator_diag.LinearOperatorDiag(
self._diag_update, is_positive_definite=is_diag_update_positive)
else:
if tensor_shape.dimension_value(self.u.shape[-1]) is not None:
r = tensor_shape.dimension_value(self.u.shape[-1])
else:
r = array_ops.shape(self.u)[-1]
self._diag_operator = linear_operator_identity.LinearOperatorIdentity(
num_rows=r, dtype=self.dtype)
@property
def u(self):
"""If this operator is `A = L + U D V^H`, this is the `U`."""
return self._u
@property
def v(self):
"""If this operator is `A = L + U D V^H`, this is the `V`."""
return self._v
@property
def is_diag_update_positive(self):
"""If this operator is `A = L + U D V^H`, this hints `D > 0` elementwise."""
return self._is_diag_update_positive
@property
def diag_update(self):
"""If this operator is `A = L + U D V^H`, this is the diagonal of `D`."""
return self._diag_update
@property
def diag_operator(self):
"""If this operator is `A = L + U D V^H`, this is `D`."""
return self._diag_operator
@property
def base_operator(self):
"""If this operator is `A = L + U D V^H`, this is the `L`."""
return self._base_operator
def _assert_self_adjoint(self):
# Recall this operator is:
# A = L + UDV^H.
# So in one case self-adjoint depends only on L
if self.u is self.v and self.diag_update is None:
return self.base_operator.assert_self_adjoint()
# In all other cases, sufficient conditions for self-adjoint can be found
# efficiently. However, those conditions are not necessary conditions.
return super(LinearOperatorLowRankUpdate, self).assert_self_adjoint()
def _shape(self):
batch_shape = array_ops.broadcast_static_shape(
self.base_operator.batch_shape,
self.diag_operator.batch_shape)
batch_shape = array_ops.broadcast_static_shape(
batch_shape,
self.u.shape[:-2])
batch_shape = array_ops.broadcast_static_shape(
batch_shape,
self.v.shape[:-2])
return batch_shape.concatenate(self.base_operator.shape[-2:])
def _shape_tensor(self):
batch_shape = array_ops.broadcast_dynamic_shape(
self.base_operator.batch_shape_tensor(),
self.diag_operator.batch_shape_tensor())
batch_shape = array_ops.broadcast_dynamic_shape(
batch_shape,
array_ops.shape(self.u)[:-2])
batch_shape = array_ops.broadcast_dynamic_shape(
batch_shape,
array_ops.shape(self.v)[:-2])
return array_ops.concat(
[batch_shape, self.base_operator.shape_tensor()[-2:]], axis=0)
def _get_uv_as_tensors(self):
"""Get (self.u, self.v) as tensors (in case they were refs)."""
u = ops.convert_to_tensor_v2_with_dispatch(self.u)
if self.v is self.u:
v = u
else:
v = ops.convert_to_tensor_v2_with_dispatch(self.v)
return u, v
def _matmul(self, x, adjoint=False, adjoint_arg=False):
u, v = self._get_uv_as_tensors()
l = self.base_operator
d = self.diag_operator
leading_term = l.matmul(x, adjoint=adjoint, adjoint_arg=adjoint_arg)
if adjoint:
uh_x = math_ops.matmul(u, x, adjoint_a=True, adjoint_b=adjoint_arg)
d_uh_x = d.matmul(uh_x, adjoint=adjoint)
v_d_uh_x = math_ops.matmul(v, d_uh_x)
return leading_term + v_d_uh_x
else:
vh_x = math_ops.matmul(v, x, adjoint_a=True, adjoint_b=adjoint_arg)
d_vh_x = d.matmul(vh_x, adjoint=adjoint)
u_d_vh_x = math_ops.matmul(u, d_vh_x)
return leading_term + u_d_vh_x
def _determinant(self):
if self.is_positive_definite:
return math_ops.exp(self.log_abs_determinant())
# The matrix determinant lemma gives
# https://en.wikipedia.org/wiki/Matrix_determinant_lemma
# det(L + UDV^H) = det(D^{-1} + V^H L^{-1} U) det(D) det(L)
# = det(C) det(D) det(L)
# where C is sometimes known as the capacitance matrix,
# C := D^{-1} + V^H L^{-1} U
u, v = self._get_uv_as_tensors()
det_c = linalg_ops.matrix_determinant(self._make_capacitance(u=u, v=v))
det_d = self.diag_operator.determinant()
det_l = self.base_operator.determinant()
return det_c * det_d * det_l
def _diag_part(self):
# [U D V^T]_{ii} = sum_{jk} U_{ij} D_{jk} V_{ik}
# = sum_{j} U_{ij} D_{jj} V_{ij}
u, v = self._get_uv_as_tensors()
product = u * math_ops.conj(v)
if self.diag_update is not None:
product *= array_ops.expand_dims(self.diag_update, axis=-2)
return (
math_ops.reduce_sum(product, axis=-1) + self.base_operator.diag_part())
def _log_abs_determinant(self):
u, v = self._get_uv_as_tensors()
# Recall
# det(L + UDV^H) = det(D^{-1} + V^H L^{-1} U) det(D) det(L)
# = det(C) det(D) det(L)
log_abs_det_d = self.diag_operator.log_abs_determinant()
log_abs_det_l = self.base_operator.log_abs_determinant()
if self._use_cholesky:
chol_cap_diag = array_ops.matrix_diag_part(
linalg_ops.cholesky(self._make_capacitance(u=u, v=v)))
log_abs_det_c = 2 * math_ops.reduce_sum(
math_ops.log(chol_cap_diag), axis=[-1])
else:
det_c = linalg_ops.matrix_determinant(self._make_capacitance(u=u, v=v))
log_abs_det_c = math_ops.log(math_ops.abs(det_c))
if self.dtype.is_complex:
log_abs_det_c = math_ops.cast(log_abs_det_c, dtype=self.dtype)
return log_abs_det_c + log_abs_det_d + log_abs_det_l
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
if self.base_operator.is_non_singular is False:
raise ValueError(
"Solve not implemented unless this is a perturbation of a "
"non-singular LinearOperator.")
# The Woodbury formula gives:
# https://en.wikipedia.org/wiki/Woodbury_matrix_identity
# (L + UDV^H)^{-1}
# = L^{-1} - L^{-1} U (D^{-1} + V^H L^{-1} U)^{-1} V^H L^{-1}
# = L^{-1} - L^{-1} U C^{-1} V^H L^{-1}
# where C is the capacitance matrix, C := D^{-1} + V^H L^{-1} U
# Note also that, with ^{-H} being the inverse of the adjoint,
# (L + UDV^H)^{-H}
# = L^{-H} - L^{-H} V C^{-H} U^H L^{-H}
l = self.base_operator
if adjoint:
# If adjoint, U and V have flipped roles in the operator.
v, u = self._get_uv_as_tensors()
# Capacitance should still be computed with u=self.u and v=self.v, which
# after the "flip" on the line above means u=v, v=u. I.e. no need to
# "flip" in the capacitance call, since the call to
# matrix_solve_with_broadcast below is done with the `adjoint` argument,
# and this takes care of things.
capacitance = self._make_capacitance(u=v, v=u)
else:
u, v = self._get_uv_as_tensors()
capacitance = self._make_capacitance(u=u, v=v)
# L^{-1} rhs
linv_rhs = l.solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
# V^H L^{-1} rhs
vh_linv_rhs = math_ops.matmul(v, linv_rhs, adjoint_a=True)
# C^{-1} V^H L^{-1} rhs
if self._use_cholesky:
capinv_vh_linv_rhs = linalg_ops.cholesky_solve(
linalg_ops.cholesky(capacitance), vh_linv_rhs)
else:
capinv_vh_linv_rhs = linear_operator_util.matrix_solve_with_broadcast(
capacitance, vh_linv_rhs, adjoint=adjoint)
# U C^{-1} V^H M^{-1} rhs
u_capinv_vh_linv_rhs = math_ops.matmul(u, capinv_vh_linv_rhs)
# L^{-1} U C^{-1} V^H L^{-1} rhs
linv_u_capinv_vh_linv_rhs = l.solve(u_capinv_vh_linv_rhs, adjoint=adjoint)
# L^{-1} - L^{-1} U C^{-1} V^H L^{-1}
return linv_rhs - linv_u_capinv_vh_linv_rhs
def _make_capacitance(self, u, v):
# C := D^{-1} + V^H L^{-1} U
# which is sometimes known as the "capacitance" matrix.
# L^{-1} U
linv_u = self.base_operator.solve(u)
# V^H L^{-1} U
vh_linv_u = math_ops.matmul(v, linv_u, adjoint_a=True)
# D^{-1} + V^H L^{-1} V
capacitance = self._diag_operator.inverse().add_to_tensor(vh_linv_u)
return capacitance
@property
def _composite_tensor_fields(self):
return ("base_operator", "u", "diag_update", "v", "is_diag_update_positive")
|
|
from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource)
from neuralnilm.source import (standardise, discretize, fdiff, power_and_fdiff,
RandomSegments, RandomSegmentsInMemory,
SameLocation, MultiSource)
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import (MixtureDensityLayer, DeConv1DLayer,
SharedWeightsDenseLayer, BLSTMLayer)
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter, RectangularOutputPlotter, StartEndMeanPlotter
from neuralnilm.updates import clipped_nesterov_momentum
from neuralnilm.disaggregate import disaggregate
from neuralnilm.rectangulariser import rectangularise
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity, softmax
from lasagne.objectives import squared_error, binary_crossentropy
from lasagne.init import Uniform, Normal
from lasagne.layers import (DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer,
DimshuffleLayer, DropoutLayer, ConcatLayer, PadLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
Max powers:
microwave = 3000W
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
# PATH = "/home/jack/experiments/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 25000
UKDALE_FILENAME = '/data/dk3810/ukdale.h5'
MAX_TARGET_POWER = 3000
ON_POWER_THRESHOLD = 200
MIN_ON_DURATION = 18
MIN_OFF_DURATION = 30
TARGET_APPLIANCE = 'microwave'
SEQ_LENGTH = 256
N_SEQ_PER_BATCH = 64
TRAIN_BUILDINGS = [1] #, 2]
VALIDATION_BUILDINGS = [1] # 5
SKIP_PROBABILITY_FOR_TARGET = 0.5
INDEPENDENTLY_CENTER_INPUTS = True
WINDOW_PER_BUILDING = {
1: ("2013-03-17", "2014-12-01"),
2: ("2013-05-22", "2013-10-01"),
3: ("2013-02-27", "2013-04-01"),
4: ("2013-03-09", "2013-09-20"),
5: ("2014-06-29", "2014-08-27")
}
INPUT_STATS = {
'mean': np.array([297.87216187], dtype=np.float32),
'std': np.array([374.43884277], dtype=np.float32)
}
def only_train_on_real_data(net, iteration):
net.logger.info(
"Iteration {}: Now only training on real data.".format(iteration))
net.source.sources[0]['train_probability'] = 0.0
net.source.sources[1]['train_probability'] = 1.0
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=lambda x, t: squared_error(x, t).mean(),
updates_func=nesterov_momentum,
learning_rate=1e-1,
learning_rate_changes_by_iteration={
1000: 1e-2,
2000: 1e-3
},
epoch_callbacks={
350000: only_train_on_real_data
},
do_save_activations=True,
auto_reshape=True,
layers_config=[
{
'type': BLSTMLayer,
'num_units': 60,
'merge_mode': 'concatenate'
},
{
'type': BLSTMLayer,
'num_units': 80,
'merge_mode': 'concatenate'
},
{
'type': DenseLayer,
'num_units': 1,
'nonlinearity': None
}
]
)
def exp_a(name):
logger = logging.getLogger(name)
real_appliance_source1 = RealApplianceSource(
logger=logger,
filename=UKDALE_FILENAME,
appliances=[
TARGET_APPLIANCE,
['fridge freezer', 'fridge', 'freezer'],
'dish washer',
'kettle',
['washer dryer', 'washing machine']
],
max_appliance_powers=[MAX_TARGET_POWER, 300, 2500, 2600, 2400],
on_power_thresholds=[ON_POWER_THRESHOLD] + [10] * 4,
min_on_durations=[MIN_ON_DURATION, 60, 1800, 12, 1800],
min_off_durations=[MIN_OFF_DURATION, 12, 1800, 12, 600],
divide_input_by_max_input_power=False,
window_per_building=WINDOW_PER_BUILDING,
seq_length=SEQ_LENGTH,
output_one_appliance=True,
train_buildings=TRAIN_BUILDINGS,
validation_buildings=VALIDATION_BUILDINGS,
n_seq_per_batch=N_SEQ_PER_BATCH,
skip_probability=0.75,
skip_probability_for_first_appliance=SKIP_PROBABILITY_FOR_TARGET,
# target_is_start_and_end_and_mean=True,
standardise_input=True,
input_stats=INPUT_STATS,
independently_center_inputs=INDEPENDENTLY_CENTER_INPUTS
)
# same_location_source1 = SameLocation(
# logger=logger,
# filename=UKDALE_FILENAME,
# target_appliance=TARGET_APPLIANCE,
# window_per_building=WINDOW_PER_BUILDING,
# seq_length=SEQ_LENGTH,
# train_buildings=TRAIN_BUILDINGS,
# validation_buildings=VALIDATION_BUILDINGS,
# n_seq_per_batch=N_SEQ_PER_BATCH,
# skip_probability=SKIP_PROBABILITY_FOR_TARGET,
# # target_is_start_and_end_and_mean=True,
# standardise_input=True,
# offset_probability=1,
# divide_target_by=MAX_TARGET_POWER,
# input_stats=INPUT_STATS,
# independently_center_inputs=INDEPENDENTLY_CENTER_INPUTS,
# on_power_threshold=ON_POWER_THRESHOLD,
# min_on_duration=MIN_ON_DURATION,
# min_off_duration=MIN_OFF_DURATION
# )
# multi_source = MultiSource(
# sources=[
# {
# 'source': real_appliance_source1,
# 'train_probability': 0.5,
# 'validation_probability': 0
# },
# {
# 'source': same_location_source1,
# 'train_probability': 0.5,
# 'validation_probability': 1
# }
# ],
# standardisation_source=same_location_source1
# )
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=real_appliance_source1,
plotter=Plotter(
n_seq_to_plot=32,
n_training_examples_to_plot=16)
))
net = Net(**net_dict_copy)
return net
def main():
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception:
logger.exception("Exception")
# raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
"""
Emacs variables
Local Variables:
compile-command: "cp /home/jack/workspace/python/neuralnilm/scripts/e557.py /mnt/sshfs/imperial/workspace/python/neuralnilm/scripts/"
End:
"""
|
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import sys
import textwrap
from types import ModuleType
import pytest
from pex.compatibility import WINDOWS, nested, to_bytes
from pex.installer import EggInstaller, WheelInstaller
from pex.pex import PEX
from pex.testing import (
make_installer,
named_temporary_file,
run_simple_pex_test,
temporary_dir,
write_simple_pex
)
from pex.util import DistributionHelper
try:
from unittest import mock
except ImportError:
import mock
@pytest.mark.skipif('sys.version_info > (3,)')
def test_pex_uncaught_exceptions():
body = "raise Exception('This is an exception')"
so, rc = run_simple_pex_test(body)
assert b'This is an exception' in so, 'Standard out was: %s' % so
assert rc == 1
def test_excepthook_honored():
body = textwrap.dedent("""
import sys
def excepthook(ex_type, ex, tb):
print('Custom hook called with: {0}'.format(ex))
sys.exit(42)
sys.excepthook = excepthook
raise Exception('This is an exception')
""")
so, rc = run_simple_pex_test(body)
assert so == b'Custom hook called with: This is an exception\n', 'Standard out was: %s' % so
assert rc == 42
def _test_sys_exit(arg, expected_output, expected_rc):
body = "import sys; sys.exit({arg})".format(arg=arg)
so, rc = run_simple_pex_test(body)
assert so == expected_output, 'Should not print SystemExit traceback.'
assert rc == expected_rc
def test_pex_sys_exit_does_not_print_for_numeric_value():
_test_sys_exit(2, b'', 2)
def test_pex_sys_exit_prints_non_numeric_value_no_traceback():
text = 'something went wrong'
sys_exit_arg = '"' + text + '"'
# encode the string somehow that's compatible with 2 and 3
expected_output = to_bytes(text) + b'\n'
_test_sys_exit(sys_exit_arg, expected_output, 1)
def test_pex_sys_exit_doesnt_print_none():
_test_sys_exit('', to_bytes(''), 0)
def test_pex_sys_exit_prints_objects():
_test_sys_exit('Exception("derp")', to_bytes('derp\n'), 1)
@pytest.mark.skipif('hasattr(sys, "pypy_version_info")')
def test_pex_atexit_swallowing():
body = textwrap.dedent("""
import atexit
def raise_on_exit():
raise Exception('This is an exception')
atexit.register(raise_on_exit)
""")
so, rc = run_simple_pex_test(body)
assert so == b''
assert rc == 0
env_copy = os.environ.copy()
env_copy.update(PEX_TEARDOWN_VERBOSE='1')
so, rc = run_simple_pex_test(body, env=env_copy)
assert b'This is an exception' in so
assert rc == 0
def test_minimum_sys_modules():
# builtins stay
builtin_module = ModuleType('my_builtin')
modules = {'my_builtin': builtin_module}
new_modules = PEX.minimum_sys_modules([], modules)
assert new_modules == modules
new_modules = PEX.minimum_sys_modules(['bad_path'], modules)
assert new_modules == modules
# tainted evict
tainted_module = ModuleType('tainted_module')
tainted_module.__path__ = ['bad_path']
modules = {'tainted_module': tainted_module}
new_modules = PEX.minimum_sys_modules([], modules)
assert new_modules == modules
new_modules = PEX.minimum_sys_modules(['bad_path'], modules)
assert new_modules == {}
assert tainted_module.__path__ == []
# tainted cleaned
tainted_module = ModuleType('tainted_module')
tainted_module.__path__ = ['bad_path', 'good_path']
modules = {'tainted_module': tainted_module}
new_modules = PEX.minimum_sys_modules([], modules)
assert new_modules == modules
new_modules = PEX.minimum_sys_modules(['bad_path'], modules)
assert new_modules == modules
assert tainted_module.__path__ == ['good_path']
# If __path__ is not a list the module is removed; typically this implies
# it's a namespace package (https://www.python.org/dev/peps/pep-0420/) where
# __path__ is a _NamespacePath.
try:
from importlib._bootstrap_external import _NamespacePath
bad_path = _NamespacePath("hello", "world", None)
except ImportError:
bad_path = {"hello": "world"}
class FakeModule(object):
pass
tainted_module = FakeModule()
tainted_module.__path__ = bad_path # Not a list as expected
modules = {'tainted_module': tainted_module}
new_modules = PEX.minimum_sys_modules(['bad_path'], modules)
assert new_modules == {}
def test_site_libs():
with nested(mock.patch.object(PEX, '_get_site_packages'), temporary_dir()) as (
mock_site_packages, tempdir):
site_packages = os.path.join(tempdir, 'site-packages')
os.mkdir(site_packages)
mock_site_packages.return_value = set([site_packages])
site_libs = PEX.site_libs()
assert site_packages in site_libs
@pytest.mark.skipif(WINDOWS, reason='No symlinks on windows')
def test_site_libs_symlink():
with nested(mock.patch.object(PEX, '_get_site_packages'), temporary_dir()) as (
mock_site_packages, tempdir):
site_packages = os.path.join(tempdir, 'site-packages')
os.mkdir(site_packages)
site_packages_link = os.path.join(tempdir, 'site-packages-link')
os.symlink(site_packages, site_packages_link)
mock_site_packages.return_value = set([site_packages_link])
site_libs = PEX.site_libs()
assert os.path.realpath(site_packages) in site_libs
assert site_packages_link in site_libs
def test_site_libs_excludes_prefix():
"""Windows returns sys.prefix as part of getsitepackages(). Make sure to exclude it."""
with nested(mock.patch.object(PEX, '_get_site_packages'), temporary_dir()) as (
mock_site_packages, tempdir):
site_packages = os.path.join(tempdir, 'site-packages')
os.mkdir(site_packages)
mock_site_packages.return_value = set([site_packages, sys.prefix])
site_libs = PEX.site_libs()
assert site_packages in site_libs
assert sys.prefix not in site_libs
@pytest.mark.parametrize('zip_safe', (False, True))
@pytest.mark.parametrize('project_name', ('my_project', 'my-project'))
@pytest.mark.parametrize('installer_impl', (EggInstaller, WheelInstaller))
def test_pex_script(installer_impl, project_name, zip_safe):
kw = dict(name=project_name, installer_impl=installer_impl, zip_safe=zip_safe)
with make_installer(**kw) as installer:
bdist = DistributionHelper.distribution_from_path(installer.bdist())
env_copy = os.environ.copy()
env_copy['PEX_SCRIPT'] = 'hello_world'
so, rc = run_simple_pex_test('', env=env_copy)
assert rc == 1, so.decode('utf-8')
assert b'Could not find script hello_world' in so
so, rc = run_simple_pex_test('', env=env_copy, dists=[bdist])
assert rc == 0, so.decode('utf-8')
assert b'hello world' in so
env_copy['PEX_SCRIPT'] = 'shell_script'
so, rc = run_simple_pex_test('', env=env_copy, dists=[bdist])
assert rc == 1, so.decode('utf-8')
assert b'Unable to parse' in so
def test_pex_run():
with named_temporary_file() as fake_stdout:
with temporary_dir() as temp_dir:
pex = write_simple_pex(
temp_dir,
'import sys; sys.stdout.write("hello"); sys.stderr.write("hello"); sys.exit(0)'
)
rc = PEX(pex.path()).run(stdin=None, stdout=fake_stdout, stderr=fake_stdout)
assert rc == 0
fake_stdout.seek(0)
assert fake_stdout.read() == b'hellohello'
def test_pex_paths():
# Tests that PEX_PATH allows importing sources from the referenced pex.
with named_temporary_file() as fake_stdout:
with temporary_dir() as temp_dir:
pex1_path = os.path.join(temp_dir, 'pex1')
write_simple_pex(
pex1_path,
exe_contents='',
sources=[
('foo_pkg/__init__.py', ''),
('foo_pkg/foo_module.py', 'def foo_func():\n return "42"')
]
)
pex2_path = os.path.join(temp_dir, 'pex2')
pex2 = write_simple_pex(
pex2_path,
'import sys; from bar_pkg.bar_module import bar_func; '
'sys.stdout.write(bar_func()); sys.exit(0)',
sources=[
('bar_pkg/bar_module.py',
'from foo_pkg.foo_module import foo_func\ndef bar_func():\n return foo_func()')
]
)
rc = PEX(pex2.path()).run(stdin=None, stdout=fake_stdout, env={'PEX_PATH': pex1_path})
assert rc == 0
fake_stdout.seek(0)
assert fake_stdout.read() == b'42'
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. module:: glacier.py
:platform: Unix, Windows
:synopsis: Command line interface for amazon glacier
"""
import sys
import os
import ConfigParser
import argparse
import re
import locale
import glob
import csv
import json
import glacierexception
import constants
from prettytable import PrettyTable
from GlacierWrapper import GlacierWrapper
from functools import wraps
def output_headers(headers, output):
"""
Prints a list of headers - single item output.
:param headers: the output to be printed as {'header1':'data1',...}
:type headers: dict
"""
rows = [(k, headers[k]) for k in headers.keys()]
if output not in constants.TABLE_OUTPUT_FORMAT:
raise ValueError("Output format must be {}, got"
": {}".format(constants.TABLE_OUTPUT_FORMAT,
output))
if output == 'print':
table = PrettyTable(["Header", "Value"])
for row in rows:
if len(str(row[1])) <= 138:
table.add_row(row)
print table
if output == 'csv':
csvwriter = csv.writer(sys.stdout, quoting=csv.QUOTE_ALL)
for row in rows:
csvwriter.writerow(row)
if output == 'json':
print json.dumps(headers)
def output_table(results, output, keys=None, sort_key=None):
"""
Prettyprints results. Expects a list of identical dicts.
Use the dict keys as headers unless keys is given;
one line for each item.
Expected format of data is a list of dicts:
[{'key1':'data1.1', 'key2':'data1.2', ... },
{'key1':'data1.2', 'key2':'data2.2', ... },
...]
keys: dict of headers to be printed for each key:
{'key1':'header1', 'key2':'header2',...}
sort_key: the key to use for sorting the table.
"""
if output not in constants.TABLE_OUTPUT_FORMAT:
raise ValueError("Output format must be {}, "
"got {}".format(constants.TABLE_OUTPUT_FORMAT,
output))
if output == 'print':
if len(results) == 0:
print 'No output!'
return
headers = [keys[k] for k in keys.keys()] if keys else results[0].keys()
table = PrettyTable(headers)
for line in results:
table.add_row([line[k] if k in line else '' for k in (keys.keys() if keys else headers)])
if sort_key:
table.sortby = keys[sort_key] if keys else sort_key
print table
if output == 'csv':
csvwriter = csv.writer(sys.stdout, quoting=csv.QUOTE_ALL)
keys = results[0].keys()
csvwriter.writerow(keys)
for row in results:
csvwriter.writerow([row[k] for k in keys])
if output == 'json':
print json.dumps(results)
def output_msg(msg, output, success=True):
"""
In case of a single message output, e.g. nothing found.
:param msg: a single message to output.
:type msg: str
:param success: whether the operation was a success or not.
:type success: boolean
"""
if output not in constants.TABLE_OUTPUT_FORMAT:
raise ValueError("Output format must be {}, "
"got {}".format(constants.TABLE_OUTPUT_FORMAT,
output))
if msg is not None:
if output == 'print':
print msg
if output == 'csv':
csvwriter = csv.writer(sys.stdout, quoting=csv.QUOTE_ALL)
csvwriter.writerow(msg)
if output == 'json':
print json.dumps(msg)
if not success:
sys.exit(125)
def size_fmt(num, decimals = 1):
"""
Formats file sizes in human readable format. Anything bigger than
TB is returned is TB. Number of decimals is optional,
defaults to 1.
"""
fmt = "%%3.%sf %%s"% decimals
for x in ['bytes','KB','MB','GB']:
if num < 1024.0:
return fmt % (num, x)
num /= 1024.0
return fmt % (num, 'TB')
def default_glacier_wrapper(args, **kwargs):
"""
Convenience function to call an instance of GlacierWrapper
with all required arguments.
"""
return GlacierWrapper(args.aws_access_key,
args.aws_secret_key,
args.region,
args.account_id,
bookkeeping=args.bookkeeping,
no_bookkeeping=args.no_bookkeeping,
bookkeeping_domain_name=args.bookkeeping_domain_name,
sdb_access_key=args.sdb_access_key,
sdb_secret_key=args.sdb_secret_key,
sdb_region=args.sdb_region,
# sns_enable=args.sns_enable,
# sns_topic=args.sns_topic,
# sns_monitored_vaults=args.sns_monitored_vaults,
# sns_options=args.sns_options,
# config_object=args.config_object,
logfile=args.logfile,
loglevel=args.loglevel,
logtostdout=args.logtostdout)
def handle_errors(fn):
"""
Decorator for exception handling.
"""
@wraps(fn)
def wrapper(*args, **kwargs):
try:
return fn(*args, **kwargs)
except glacierexception.GlacierException as e:
# We are only interested in the error message in case
# it is a self-caused exception.
e.write(indentation='|| ', stack=False, message=True)
sys.exit(e.exitcode)
return wrapper
@handle_errors
def lsvault(args):
"""
Returns a list of vaults (if any).
"""
glacier = default_glacier_wrapper(args)
vault_list = glacier.lsvault()
keys = {'VaultName': "Vault name",
'VaultARN': "ARN",
'CreationDate': "Created",
'SizeInBytes': "Size"}
output_table(vault_list, args.output, keys=keys)
@handle_errors
def mkvault(args):
"""
Create a new vault.
"""
glacier = default_glacier_wrapper(args)
response = glacier.mkvault(args.vault)
output_headers(response, args.output)
@handle_errors
def rmvault(args):
"""
Remove a vault.
"""
glacier = default_glacier_wrapper(args)
response = glacier.rmvault(args.vault)
output_headers(response, args.output)
@handle_errors
def describevault(args):
"""
Give the description of a vault.
"""
glacier = default_glacier_wrapper(args)
response = glacier.describevault(args.vault)
headers = {'LastInventoryDate': "LastInventory",
'NumberOfArchives': "Archives",
'SizeInBytes': "Size",
'VaultARN': "ARN",
'CreationDate': "Created"}
output_table([response], args.output, keys=headers)
@handle_errors
def listmultiparts(args):
"""
Give an overview of all multipart uploads that are not finished.
"""
glacier = default_glacier_wrapper(args)
response = glacier.listmultiparts(args.vault)
if not response:
output_msg('No active multipart uploads.', args.output,
success=False)
else:
output_table(response, args.output)
@handle_errors
def abortmultipart(args):
"""
Abort a multipart upload which is in progress.
"""
glacier = default_glacier_wrapper(args)
response = glacier.abortmultipart(args.vault, args.uploadId)
output_headers(response, args.output)
@handle_errors
def listjobs(args):
"""
List all the active jobs for a vault.
"""
glacier = default_glacier_wrapper(args)
job_list = glacier.list_jobs(args.vault)
if job_list == []:
output_msg('No jobs.', args.output, success=False)
return
headers = {'Action': "Action",
'ArchiveId': "Archive ID",
'StatusCode': "Status",
'CreationDate': "Initiated",
'VaultARN': "VaultARN",
'JobId': "Job ID"}
output_table(job_list, args.output, keys=headers)
@handle_errors
def describejob(args):
"""
Give the description of a job.'
"""
glacier = default_glacier_wrapper(args)
job = glacier.describejob(args.vault, args.jobid)
output_headers(job, args.output)
@handle_errors
def download(args):
"""
Download an archive.
"""
glacier = default_glacier_wrapper(args)
response = glacier.download(args.vault, args.archive, args.partsize,
out_file_name=args.outfile,
overwrite=args.overwrite)
if args.outfile:
output_msg(response, args.output, success=True)
@handle_errors
def upload(args):
"""
Upload a file or a set of files to a Glacier vault.
"""
# See if we got a bacula-style file set.
# This is /path/to/vol001|vol002|vol003
if args.bacula:
if len(args.filename) > 1:
raise glacierexception.InputException("Bacula-style file name input can "\
"accept only one file name argument.")
fileset = args.filename[0].split('|')
if len(fileset) > 1:
dirname = os.path.dirname(fileset[0])
args.filename = [fileset[0]]
args.filename += [os.path.join(dirname, fileset[i]) for i in range(1, len(fileset))]
glacier = default_glacier_wrapper(args)
results = []
# If we have one or more file names, they appear in a list.
# Iterate over these file names; do path expansion and wildcard expansion
# just in case the shell didn't take care of that.
# If no file name given it's an empty list, and we expect the file to
# be read over stdin.
if args.filename:
for f in args.filename:
# In case the shell does not expand wildcards, if any, do this here.
if f[0] == '~':
f = os.path.expanduser(f)
globbed = glob.glob(f)
if globbed:
for g in globbed:
response = glacier.upload(args.vault, g,
args.description,
args.region, args.stdin,
args.name, args.partsize,
args.uploadid,
args.resume)
results.append({"Uploaded file": g,
"Created archive with ID": response[0],
"Archive SHA256 tree hash": response[1],
"Description": args.description})
else:
raise glacierexception.InputException(
"File name given for upload can not "\
"be found: {}.".format(f),
code='CommandError')
elif args.stdin:
# No file name; using stdin.
response = glacier.upload(args.vault, None, args.description,
args.region, args.stdin,
args.name, args.partsize,
args.uploadid, args.resume)
results = [{"Created archive with ID": response[0],
"Archive SHA256 tree hash": response[1],
"Description": args.description}]
else:
raise glacierexception.InputException(
'''No input given. Either give a file name or file names
on the command line, or use the --stdin switch and pipe
in the data over stdin.''',
cause='No file name and no stdin pipe.',
code='CommandError')
output_table(results, args.output) if len(results) > 1 \
else output_headers(results[0], args.output)
@handle_errors
def getarchive(args):
"""
Initiate an archive retrieval job.
"""
glacier = default_glacier_wrapper(args)
status, job, jobid = glacier.getarchive(args.vault, args.archive)
output_headers(job, args.output)
@handle_errors
def rmarchive(args):
"""
Remove an archive from a vault.
"""
glacier = default_glacier_wrapper(args)
glacier.rmarchive(args.vault, args.archive)
output_msg("Archive removed.", args.output, success=True)
@handle_errors
def search(args):
"""
Search the database for file name or description.
"""
glacier = default_glacier_wrapper(args)
response = glacier.search(vault=args.vault,
region=args.region,
search_term=args.searchterm,
file_name=args.filename)
output_table(response, args.output)
@handle_errors
def inventory(args):
"""
Fetch latest inventory (or start a retrieval job if not ready).
"""
glacier = default_glacier_wrapper(args)
output = args.output
if sys.stdout.isatty() and output == 'print':
print 'Checking inventory, please wait.\r',
sys.stdout.flush()
job, inventory = glacier.inventory(args.vault, args.refresh)
if inventory:
if sys.stdout.isatty() and output == 'print':
print "Inventory of vault: {}".format(inventory["VaultARN"])
print "Inventory Date: {}\n".format(['InventoryDate'])
print "Content:"
headers = {'ArchiveDescription': 'Archive Description',
'CreationDate': 'Uploaded',
'Size': 'Size',
'ArchiveId': 'Archive ID',
'SHA256TreeHash': 'SHA256 tree hash'}
output_table(inventory['ArchiveList'],
args.output,
keys=headers)
if sys.stdout.isatty() and output == 'print':
size = 0
for item in inventory['ArchiveList']:
size += int(item['Size'])
print "This vault contains {} items, total size "\
"{}.".format(len(inventory['ArchiveList']),
size_fmt(size))
else:
result = {'Status':'Inventory retrieval in progress.',
'Job ID':job['JobId'],
'Job started (time in UTC)':job['CreationDate']}
output_headers(result, args.output)
@handle_errors
def treehash(args):
"""
Calculates the tree hash of the given file(s).
"""
glacier = default_glacier_wrapper(args)
hash_results = []
for f in args.filename:
if f:
# In case the shell does not expand wildcards,
# if any, do this here.
if f[0] == '~':
f = os.path.expanduser(f)
globbed = glob.glob(f)
if globbed:
for g in globbed:
hash_results.append(
{'File name': g,
'SHA256 tree hash': glacier.get_tree_hash(g)})
else:
raise glacierexception.InputException('No file name given.',
code='CommandError')
output_table(hash_results, args.output)
def snssync(args):
"""
If monitored_vaults is specified in configuration file, subscribe
vaults specificed in it to notifications, otherwiser
subscribe all vault.
"""
glacier = default_glacier_wrapper(args)
response = glacier.sns_sync(sns_options=args.sns_options,
output=args.output)
output_table(response, args.output)
def snssubscribe(args):
"""
Subscribe individual vaults to notifications by method
specified by user.
"""
protocol = args.protocol
endpoint = args.endpoint
vault_names = args.vault
topic = args.topic
glacier = default_glacier_wrapper(args)
response = glacier.sns_subscribe(protocol, endpoint, topic,
vault_names=vault_names,
sns_options=args.sns_options)
output_table(response, args.output)
def snslistsubscriptions(args):
"""
List subscriptions.
"""
protocol = args.protocol
endpoint = args.endpoint
topic = args.topic
glacier = default_glacier_wrapper(args)
response = glacier.sns_list_subscriptions(protocol, endpoint,
topic,
sns_options=args.sns_options)
output_table(response, args.output)
def snslisttopics(args):
glacier = default_glacier_wrapper(args)
response = glacier.sns_list_topics(sns_options=args.sns_options)
output_table(response, args.output)
def snsunsubscribe(args):
"""
Unsubscribe individual vaults from notifications for
specified protocol, endpoint and vault.
"""
protocol = args.protocol
endpoint = args.endpoint
topic = args.topic
glacier = default_glacier_wrapper(args)
response = glacier.sns_unsubscribe(protocol, endpoint,
topic,
sns_options=args.sns_options)
output_table(response, args.output)
class CustomArgParseFormatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
def _get_help_string(self, action):
"""
This method is identical to the base one, except that if the argument
ends in '-key', the default value is suppressed so that we don't print
out sensitive passwords (from the config file).
"""
help = action.help
if '%(default)' not in action.help:
if action.default is not argparse.SUPPRESS:
defaulting_nargs = [argparse.OPTIONAL, argparse.ZERO_OR_MORE]
if action.option_strings or action.nargs in defaulting_nargs:
if action.option_strings[0].endswith('-key'):
pass
else:
help += ' (default: %(default)s)'
return help
def main():
program_description = u"""
Command line interface for Amazon Glacier
"""
# Config parser
conf_parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False)
conf_parser.add_argument("-c",
"--conf",
default="~/.glacier-cmd",
help="Name of the file to log messages to.",
metavar="FILE")
conf_parser.add_argument('--logtostdout',
action='store_true',
help="Send log messages "\
"to stdout instead of "\
"the config file.")
args, remaining_argv = conf_parser.parse_known_args()
# Here we parse config from files in home folder or in current
# folder We use separate topics for aws and glacier
# specific configs
aws = glacier = sdb = {}
config = ConfigParser.SafeConfigParser()
sns = {'topics_present':False, 'topic':'aws-glacier-notifications'}
configs_read = config.read([constants.SYSTEM_WIDE_CONFIG_FILENAME,
os.path.expanduser(constants.USER_CONFIG_FILENAME),
args.conf])
if configs_read:
try:
aws = dict(config.items("aws"))
except ConfigParser.NoSectionError:
pass
try:
glacier = dict(config.items("glacier"))
except ConfigParser.NoSectionError:
pass
try:
sdb = dict(config.items("sdb"))
for key,value in sdb.items():
sdb["sdb_{}".format(key)]=value
del sdb[key]
except ConfigParser.NoSectionError:
pass
topics_present = any(topic for topic in config.sections() if topic.startswith("SNS:"))
if topics_present:
sns = { 'topics_present':True }
sns_topics = []
for topic in config.sections():
if topic.startswith("SNS:"):
s = {
'topic':topic.split("SNS:")[-1],
'options':dict(config.items(topic))
}
sns_topics += [s]
if sns_topics:
sns['topics'] = sns_topics
elif any(topic for topic in config.sections() if topic == "SNS"):
sns = { 'topics_present':False }
if not config.get('SNS', 'topic', vars={ "topic":None }):
sns['topic'] = "aws-glacier-notifications"
else:
sns['topic'] = config.get('SNS', 'topic')
# Join config options with environments
aws = dict(os.environ.items() + aws.items() )
glacier = dict(os.environ.items() + glacier.items() )
sdb = dict(os.environ.items() + sdb.items() )
# Helper functions
filt_s= lambda x: x.lower().replace("_","-")
filt = lambda x,y="": dict(((y+"-" if y not in filt_s(k) else "") +
filt_s(k), v) for (k, v) in x.iteritems())
"""
>>> a = {'notifications': 'True', 'monitored_vaults': 'vvt,vv1', "aws-foo":"neki"}
>>> filt(a, "aws").get('aws-foo')
'neki'
"""
a_required = lambda x: x not in filt(aws, "aws")
s_required = lambda x: x not in filt(sdb, "sdb")
required = lambda x: x not in filt(glacier)
a_default = lambda x: filt(aws, "aws").get(x)
s_default = lambda x: filt(sdb, "sdb").get(x)
default = lambda x: filt(glacier).get(x)
# Main configuration parser
parser = argparse.ArgumentParser(parents=[conf_parser],
formatter_class=CustomArgParseFormatter,
description=program_description)
subparsers = parser.add_subparsers(title='Subcommands',
help=u"For subcommand help, use: glacier-cmd <subcommand> -h")
# Amazon Web Services settings
group = parser.add_argument_group('aws')
group.add_argument('--aws-access-key',
required=a_required("aws-access-key"),
default=a_default("aws-access-key"),
help="Your aws access key "\
"{}".format(constants.HELP_MESSAGE_CONFIG))
group.add_argument('--aws-secret-key',
required=a_required("aws-secret-key"),
default=a_default("aws-secret-key"),
help="Your aws secret key "\
"{}".format(constants.HELP_MESSAGE_CONFIG))
# Glacier settings
group = parser.add_argument_group('glacier')
group.add_argument('--region',
required=required("region"),
default=default("region"),
help="Region where you want to store \
your archives "\
"{}".format(constants.HELP_MESSAGE_CONFIG))
group.add_argument('--account-id',
required=False,
default=default("account-id") if default("account-id") else '-',
help="AWS account ID of the account that owns the vault")
bookkeeping = True if default('bookkeeping') == 'True' else False
group.add_argument('--bookkeeping',
required=False,
default=bookkeeping,
action="store_true",
help="Should we keep book of all created "\
"archives. This requires a Amazon "\
"SimpleDB account and its "\
"bookkeeping domain name set")
group.add_argument('--no-bookkeeping',
required=False,
default=False,
action="store_true",
help="Explicitly disables bookkeeping, "\
"regardless of other configuration "\
"or command line options.")
group.add_argument('--bookkeeping-domain-name',
required=False,
default=default("bookkeeping-domain-name"),
help="Amazon SimpleDB domain name "
"for bookkeeping.")
group.add_argument('--logfile',
required=False,
default=os.path.expanduser('~/.glacier-cmd.log'),
help='File to write log messages to.')
group.add_argument('--loglevel',
required=False,
default=default('loglevel') if default('loglevel') else 'WARNING',
choices=["-1", "DEBUG", "0", "INFO", "1", "WARNING",
"2", "ERROR", "3", "CRITICAL"],
help="Set the lowest level of messages you want to log.")
group.add_argument('--output',
required=False,
default=default('output') if default('output') else 'print',
choices=constants.TABLE_OUTPUT_FORMAT,
help="Set how to return results: print to "\
"the screen, or as csv resp. json string. "\
"NOTE: to receive full output use csv or "\
"json. `print` removes lines "\
"longer than 138 chars")
# SimpleDB settings
group = parser.add_argument_group('sdb')
group.add_argument('--sdb-access-key',
required=False,
default=(s_default("sdb-access-key") or
a_default("aws-access-key")),
help="aws access key to be used with \
bookkeeping {}".format(constants.HELP_MESSAGE_CONFIG))
group.add_argument('--sdb-secret-key',
required=False,
default=(s_default("sdb-secret-key") or
a_default("aws-secret-key")),
help="aws secret key to be used with "\
"bookkeeping {}".format(constants.HELP_MESSAGE_CONFIG))
group.add_argument('--sdb-region',
required=False,
default=s_default("sdb-region") or default("region"),
help="Region where you want to store "\
"bookkeeping {}".format(constants.HELP_MESSAGE_CONFIG))
# glacier-cmd mkvault <vault>
parser_mkvault = subparsers.add_parser("mkvault",
help="Create a new vault.")
parser_mkvault.add_argument('vault',
help='The vault to be created.')
parser_mkvault.set_defaults(func=mkvault)
# glacier-cmd lsvault
parser_lsvault = subparsers.add_parser("lsvault",
help="List available vaults.")
parser_lsvault.set_defaults(func=lsvault)
# glacier-cmd describevault <vault>
parser_describevault = subparsers.add_parser('describevault',
help='Describe a vault.')
parser_describevault.add_argument('vault',
help='The vault to be described.')
parser_describevault.set_defaults(func=describevault)
# glacier-cmd rmvault <vault>
parser_rmvault = subparsers.add_parser('rmvault',
help='Remove a vault.')
parser_rmvault.add_argument('vault',
help='The vault to be removed.')
parser_rmvault.set_defaults(func=rmvault)
# glacier-cmd upload <vault> <filename> [--description <description>] [--name <store file name>] [--partsize <part size>]
# glacier-cmd upload <vault> --stdin [--description <description>] [--name <store file name>] [--partsize <part size>]
parser_upload = subparsers.add_parser('upload',
formatter_class=argparse.RawTextHelpFormatter,
help='Upload an archive to Amazon Glacier.')
parser_upload.add_argument('vault',
help='The vault the archive is to be stored in.')
parser_upload.add_argument('filename', nargs='*', default=None,
help='''\
The name(s) of the local file(s) to be uploaded. Wildcards
are accepted. Can not be used if --stdin is used.''')
parser_upload.add_argument('--stdin', action='store_true',
help='''\
Read data from stdin, instead of local file.
Can not be used if <filename> is given.''')
parser_upload.add_argument('--name', default=None,
help='''\
Use the given name as the filename for bookkeeping
purposes. To be used in conjunction with --stdin or
when the file being uploaded is a temporary file.''')
parser_upload.add_argument('--partsize', type=int, default=-1,
help='''\
Part size to use for upload (in MB). Must
be a power of 2 in the range:
1, 2, 4, 8, ..., 2,048, 4,096.
Values that are not a power of 2 will be
adjusted upwards to the next power of 2.
Amazon accepts up to 10,000 parts per upload.
Smaller parts result in more frequent progress
updates, and less bandwidth wasted if a part
needs to be re-transmitted. On the other hand,
smaller parts limit the size of the archive that
can be uploaded. Some examples:
partsize MaxArchiveSize
1 1*1024*1024*10000 ~= 9.7 GB
4 4*1024*1024*10000 ~= 39 GB
16 16*1024*1024*10000 ~= 156 GB
128 128*1024*1024*10000 ~= 1.2 TB
4096 4096*1024*1024*10000 ~= 39 TB
If not given, the smallest possible part size
will be used when uploading a file, and 128 MB
when uploading from stdin or from a FIFO pipe.''')
parser_upload.add_argument('--description', default=None,
help='''\
Description of the file to be uploaded. Use quotes
if your file name contains spaces. (optional).''')
parser_upload.add_argument('--uploadid', default=None,
help='''\
The uploadId of a multipart upload that is not
finished yet. If given, glacier-cmd will attempt
to resume this upload using the given file, or by
re-reading the data from stdin.''')
parser_upload.add_argument('--resume', action='store_true',
help='''\
Attempt to resume an interrupted multi-part upload.
Does not work in combination with --stdin, and
requires bookkeeping to be enabled.
(not implemented yet)''')
parser_upload.add_argument('--bacula', action='store_true',
help='''\
The (single!) file name will be parsed using Bacula's
style of providing multiple names on the command line.
E.g.: /path/to/backup/vol001|vol002|vol003''')
parser_upload.set_defaults(func=upload)
# glacier-cmd listmultiparts <vault>
parser_listmultiparts = subparsers.add_parser('listmultiparts',
help='List all active multipart uploads.')
parser_listmultiparts.add_argument('vault',
help='The vault to check the active multipart uploads for.')
parser_listmultiparts.set_defaults(func=listmultiparts)
# glacier-cmd abortmultipart <vault> <uploadId>
parser_abortmultipart = subparsers.add_parser('abortmultipart',
help='Abort a multipart upload.')
parser_abortmultipart.add_argument('vault',
help='The vault the upload is for.')
parser_abortmultipart.add_argument('uploadId',
help='The id of the upload to be aborted, try listmultiparts.')
parser_abortmultipart.set_defaults(func=abortmultipart)
# glacier-cmd inventory <vault> [--refresh]
parser_inventory = subparsers.add_parser('inventory',
help="List inventory of a vault, if available. If not "\
"available, creates inventory retrieval job if none "\
"running already.")
parser_inventory.add_argument('vault',
help='The vault to list the inventory of.')
parser_inventory.add_argument('--refresh', action='store_true',
help="Create an inventory retrieval job, even if inventory is "\
"available or with another retrieval job running.")
parser_inventory.set_defaults(func=inventory)
# glacier-cmd getarchive <vault> <archive>
parser_getarchive = subparsers.add_parser('getarchive',
help='Requests to make an archive available for download.')
parser_getarchive.add_argument('vault',
help='The vault the archive is stored in.')
parser_getarchive.add_argument('archive',
help='The archive id.')
parser_getarchive.set_defaults(func=getarchive)
# glacier-cmd download <vault> <archive> [--outfile <file name>]
parser_download = subparsers.add_parser('download',
formatter_class=argparse.RawTextHelpFormatter,
help='Download a file by archive id.')
parser_download.add_argument('vault',
help="Specify the vault in which archive is located.")
parser_download.add_argument('archive',
help='The archive to be downloaded.')
parser_download.add_argument('--outfile',
help='''\
The name of the local file to store the archive.
If omitted, stdout will be used.''')
parser_download.add_argument('--overwrite', action='store_true',
help='''
Overwrite an existing local file if one exists when
downloading an archive.''')
parser_download.add_argument('--partsize', type=int, default=-1,
help='''\
Part size to use for download (in MB). Must
be a power of 2 in the range:
1, 2, 4, 8, ..., 2,048, 4,096.
Values that are not a power of 2 will be
adjusted upwards to the next power of 2.
Amazon accepts up to 10,000 parts per download.
Smaller parts result in more frequent progress
updates, and less bandwidth wasted if a part
needs to be re-transmitted. On the other hand,
smaller parts limit the size of the archive that
can be downloaded and result in slower overall
performance. Some examples:
partsize MaxArchiveSize
1 1*1024*1024*10000 ~= 9.7 GB
4 4*1024*1024*10000 ~= 39 GB
16 16*1024*1024*10000 ~= 156 GB
128 128*1024*1024*10000 ~= 1.2 TB
4096 4096*1024*1024*10000 ~= 39 TB
If not given, the smallest possible part size
will be used depending on the size of the job
at hand.''')
parser_download.set_defaults(func=download)
# glacier-cmd rmarchive <vault> <archive>
parser_rmarchive = subparsers.add_parser('rmarchive',
help='Remove archive from Amazon Glacier.')
parser_rmarchive.add_argument('vault',
help='The vault the archive is stored in.')
parser_rmarchive.add_argument('archive',
help='The archive id of the archive to be removed.')
parser_rmarchive.set_defaults(func=rmarchive)
# glacier-cmd search [<vault>] [--filename <file name>] [--searchterm <search term>]
parser_search = subparsers.add_parser('search',
help='Search Amazon SimpleDB database for available archives \
(requires bookkeeping to be enabled).')
parser_search.add_argument('vault', nargs='?', default=None,
help='The vault to search in. Searching all if omitted.')
parser_search.add_argument('--filename', default=None,
help='Search key for searching by (part of) file names.')
parser_search.add_argument('--searchterm', default=None,
help='Search key for searching (part of) description fields.')
parser_search.set_defaults(func=search)
# glacier-cmd listjobs <vault>
parser_listjobs = subparsers.add_parser('listjobs',
help='List active jobs in a vault.')
parser_listjobs.add_argument('vault',
help='The vault to list the jobs for.')
parser_listjobs.set_defaults(func=listjobs)
# glacier-cmd describejob <vault>
parser_describejob = subparsers.add_parser('describejob',
help='Describe a job.')
parser_describejob.add_argument('vault',
help='The vault the job is listed for.')
parser_describejob.add_argument('jobid',
help='The job ID of the job to be described.')
parser_describejob.set_defaults(func=describejob)
# glacier-cmd hash <filename>
parser_describejob = subparsers.add_parser('treehash',
help="Calculate the tree-hash (Amazon style sha256-hash) "\
"of a file.")
parser_describejob.add_argument('filename', nargs='*',
help='The filename to calculate the treehash of.')
parser_describejob.set_defaults(func=treehash)
# SNS related commands are located in their own subparser
parser_sns = subparsers.add_parser('sns',
help="Subcommands related to SNS")
sns_subparsers = parser_sns.add_subparsers(title="Subcommands "\
"related to SNS")
# glacier-cmd sns syncs
sns_parser_sync = sns_subparsers.add_parser('sync',
help="Go through configuration file and either "\
"subscribe all vaults to default topic or, "\
"if sections are present, create separate "\
"topics and subscribe specified vaults to that topic.")
sns_parser_sync.set_defaults(func=snssync, sns_options=sns)
# glacier-cmd sns subscribe protocol endpoint topic [--vault]
sns_parser_subscribe = sns_subparsers.add_parser('subscribe',
help="Subscribe to topic.")
sns_parser_subscribe.add_argument("protocol",
help="Protocol used for notifications. Can be email, "\
"http, https or sms.")
sns_parser_subscribe.add_argument("endpoint",
help="Valid applicable endpoint - email address, "\
"URL or phone number.")
sns_parser_subscribe.add_argument("topic",
help="Topic for which notifications will be sent "\
"to specified protocol and endpoint.")
sns_parser_subscribe.add_argument("--vault",
help="Optional vault names, seperated by comma, "\
"for this a new topic will be created and subscribed to.")
sns_parser_subscribe.set_defaults(func=snssubscribe,
sns_options={ "options":sns, })
# glacier-cmd sns unsubscribe [--protocol <protocol>] [--endpoint <endpoint>] [--topic <topic>]
sns_parser_unsubscribe = sns_subparsers.add_parser('unsubscribe',
help="Unsubscribe from a specified topic.")
sns_parser_unsubscribe.add_argument("--protocol",
help="Protocol used for notifications. Can be email, "\
"http, https or sms.")
sns_parser_unsubscribe.add_argument("--endpoint",
help="Valid applicable endpoint - email address, "\
"URL or phone number.")
sns_parser_unsubscribe.add_argument("--topic",
help="Topic for which notifications will be sent to "\
"specified protocol and endpoint.")
sns_parser_unsubscribe.set_defaults(func=snsunsubscribe,
sns_options=sns)
# glacier-cmd sns lssub [--protocol <protocol>] [--endpoint <endpoint>] [--topic <topic>]
sns_parser_listsubs = sns_subparsers.add_parser('lssub',
help="List subscriptions. Other arguments are ANDed together.")
sns_parser_listsubs.add_argument("--protocol",
help="Show only subscriptions on a specified protocol.")
sns_parser_listsubs.add_argument("--endpoint",
help="Show only subscriptions to a specified endpoint.")
sns_parser_listsubs.add_argument("--topic",
help="Show only subscriptions for a specified topic.")
sns_parser_listsubs.set_defaults(func=snslistsubscriptions,
sns_options=sns)
# glacier-cmd sns lstopic
sns_parser_listtopics = sns_subparsers.add_parser('lstopic',
help="List all topics.")
sns_parser_listtopics.set_defaults(func=snslisttopics,
sns_options=sns)
# TODO args.logtostdout becomes false when parsing the
# remaining_argv so here we bridge this. An ugly hack but it works.
logtostdout = args.logtostdout
# Process the remaining arguments.
args = parser.parse_args(remaining_argv)
args.logtostdout = logtostdout
# Run the subcommand.
args.func(args)
if __name__ == "__main__":
sys.exit(main())
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import netutils
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.neutron import neutron
from heat.engine import support
class Subnet(neutron.NeutronResource):
PROPERTIES = (
NETWORK_ID, NETWORK, SUBNETPOOL, PREFIXLEN, CIDR,
VALUE_SPECS, NAME, IP_VERSION, DNS_NAMESERVERS, GATEWAY_IP,
ENABLE_DHCP, ALLOCATION_POOLS, TENANT_ID, HOST_ROUTES,
IPV6_RA_MODE, IPV6_ADDRESS_MODE,
) = (
'network_id', 'network', 'subnetpool', 'prefixlen', 'cidr',
'value_specs', 'name', 'ip_version', 'dns_nameservers', 'gateway_ip',
'enable_dhcp', 'allocation_pools', 'tenant_id', 'host_routes',
'ipv6_ra_mode', 'ipv6_address_mode',
)
_ALLOCATION_POOL_KEYS = (
ALLOCATION_POOL_START, ALLOCATION_POOL_END,
) = (
'start', 'end',
)
_HOST_ROUTES_KEYS = (
ROUTE_DESTINATION, ROUTE_NEXTHOP,
) = (
'destination', 'nexthop',
)
_IPV6_DHCP_MODES = (
DHCPV6_STATEFUL, DHCPV6_STATELESS, SLAAC,
) = (
'dhcpv6-stateful', 'dhcpv6-stateless', 'slaac',
)
ATTRIBUTES = (
NAME_ATTR, NETWORK_ID_ATTR, TENANT_ID_ATTR, ALLOCATION_POOLS_ATTR,
GATEWAY_IP_ATTR, HOST_ROUTES_ATTR, IP_VERSION_ATTR, CIDR_ATTR,
DNS_NAMESERVERS_ATTR, ENABLE_DHCP_ATTR,
) = (
'name', 'network_id', 'tenant_id', 'allocation_pools',
'gateway_ip', 'host_routes', 'ip_version', 'cidr',
'dns_nameservers', 'enable_dhcp',
)
properties_schema = {
NETWORK_ID: properties.Schema(
properties.Schema.STRING,
support_status=support.SupportStatus(
status=support.HIDDEN,
version='5.0.0',
message=_('Use property %s.') % NETWORK,
previous_status=support.SupportStatus(
status=support.DEPRECATED,
version='2014.2'
)
),
constraints=[
constraints.CustomConstraint('neutron.network')
],
),
NETWORK: properties.Schema(
properties.Schema.STRING,
_('The ID of the attached network.'),
required=True,
constraints=[
constraints.CustomConstraint('neutron.network')
],
support_status=support.SupportStatus(version='2014.2')
),
SUBNETPOOL: properties.Schema(
properties.Schema.STRING,
_('The name or ID of the subnet pool.'),
constraints=[
constraints.CustomConstraint('neutron.subnetpool')
],
support_status=support.SupportStatus(version='6.0.0'),
),
PREFIXLEN: properties.Schema(
properties.Schema.INTEGER,
_('Prefix length for subnet allocation from subnet pool.'),
constraints=[constraints.Range(min=0)],
support_status=support.SupportStatus(version='6.0.0'),
),
CIDR: properties.Schema(
properties.Schema.STRING,
_('The CIDR.'),
constraints=[
constraints.CustomConstraint('net_cidr')
]
),
VALUE_SPECS: properties.Schema(
properties.Schema.MAP,
_('Extra parameters to include in the creation request.'),
default={},
update_allowed=True
),
NAME: properties.Schema(
properties.Schema.STRING,
_('The name of the subnet.'),
update_allowed=True
),
IP_VERSION: properties.Schema(
properties.Schema.INTEGER,
_('The IP version, which is 4 or 6.'),
default=4,
constraints=[
constraints.AllowedValues([4, 6]),
]
),
DNS_NAMESERVERS: properties.Schema(
properties.Schema.LIST,
_('A specified set of DNS name servers to be used.'),
default=[],
update_allowed=True
),
GATEWAY_IP: properties.Schema(
properties.Schema.STRING,
_('The gateway IP address. Set to any of [ null | ~ | "" ] to '
'create the subnet without a gateway. If omitted, the first IP '
'address within the subnet is assigned to the gateway.'),
update_allowed=True
),
ENABLE_DHCP: properties.Schema(
properties.Schema.BOOLEAN,
_('Set to true if DHCP is enabled and false if DHCP is disabled.'),
default=True,
update_allowed=True
),
ALLOCATION_POOLS: properties.Schema(
properties.Schema.LIST,
_('The start and end addresses for the allocation pools.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
ALLOCATION_POOL_START: properties.Schema(
properties.Schema.STRING,
required=True,
constraints=[
constraints.CustomConstraint('ip_addr')
]
),
ALLOCATION_POOL_END: properties.Schema(
properties.Schema.STRING,
required=True,
constraints=[
constraints.CustomConstraint('ip_addr')
]
),
},
),
update_allowed=True
),
TENANT_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the tenant who owns the network. Only administrative'
' users can specify a tenant ID other than their own.')
),
HOST_ROUTES: properties.Schema(
properties.Schema.LIST,
schema=properties.Schema(
properties.Schema.MAP,
schema={
ROUTE_DESTINATION: properties.Schema(
properties.Schema.STRING,
required=True,
constraints=[
constraints.CustomConstraint('net_cidr')
]
),
ROUTE_NEXTHOP: properties.Schema(
properties.Schema.STRING,
required=True,
constraints=[
constraints.CustomConstraint('ip_addr')
]
),
},
),
update_allowed=True
),
IPV6_RA_MODE: properties.Schema(
properties.Schema.STRING,
_('IPv6 RA (Router Advertisement) mode.'
' dhcpv6-stateful, dhcpv6-stateless, or slaac.'),
constraints=[
constraints.AllowedValues([DHCPV6_STATEFUL, DHCPV6_STATELESS,
SLAAC]),
],
support_status=support.SupportStatus(version='2015.1')
),
IPV6_ADDRESS_MODE: properties.Schema(
properties.Schema.STRING,
_('IPv6 address mode.'
' dhcpv6-stateful, dhcpv6-stateless, or slaac.'),
constraints=[
constraints.AllowedValues([DHCPV6_STATEFUL, DHCPV6_STATELESS,
SLAAC]),
],
support_status=support.SupportStatus(version='2015.1')
),
}
attributes_schema = {
NAME_ATTR: attributes.Schema(
_("Friendly name of the subnet."),
type=attributes.Schema.STRING
),
NETWORK_ID_ATTR: attributes.Schema(
_("Parent network of the subnet."),
type=attributes.Schema.STRING
),
TENANT_ID_ATTR: attributes.Schema(
_("Tenant owning the subnet."),
type=attributes.Schema.STRING
),
ALLOCATION_POOLS_ATTR: attributes.Schema(
_("Ip allocation pools and their ranges."),
type=attributes.Schema.LIST
),
GATEWAY_IP_ATTR: attributes.Schema(
_("Ip of the subnet's gateway."),
type=attributes.Schema.STRING
),
HOST_ROUTES_ATTR: attributes.Schema(
_("Additional routes for this subnet."),
type=attributes.Schema.LIST
),
IP_VERSION_ATTR: attributes.Schema(
_("Ip version for the subnet."),
type=attributes.Schema.STRING
),
CIDR_ATTR: attributes.Schema(
_("CIDR block notation for this subnet."),
type=attributes.Schema.STRING
),
DNS_NAMESERVERS_ATTR: attributes.Schema(
_("List of dns nameservers."),
type=attributes.Schema.LIST
),
ENABLE_DHCP_ATTR: attributes.Schema(
_("'true' if DHCP is enabled for this subnet; 'false' otherwise."),
type=attributes.Schema.STRING
),
}
def translation_rules(self, props):
return [
properties.TranslationRule(props,
properties.TranslationRule.REPLACE,
[self.NETWORK],
value_path=[self.NETWORK_ID])
]
@classmethod
def _null_gateway_ip(cls, props):
if cls.GATEWAY_IP not in props:
return
# Specifying null in the gateway_ip will result in
# a property containing an empty string.
# A null gateway_ip has special meaning in the API
# so this needs to be set back to None.
# See bug https://bugs.launchpad.net/heat/+bug/1226666
if props.get(cls.GATEWAY_IP) == '':
props[cls.GATEWAY_IP] = None
def validate(self):
super(Subnet, self).validate()
subnetpool = self.properties[self.SUBNETPOOL]
prefixlen = self.properties[self.PREFIXLEN]
cidr = self.properties[self.CIDR]
if subnetpool and cidr:
raise exception.ResourcePropertyConflict(self.SUBNETPOOL,
self.CIDR)
if not subnetpool and not cidr:
raise exception.PropertyUnspecifiedError(self.SUBNETPOOL,
self.CIDR)
if prefixlen and cidr:
raise exception.ResourcePropertyConflict(self.PREFIXLEN,
self.CIDR)
ra_mode = self.properties[self.IPV6_RA_MODE]
address_mode = self.properties[self.IPV6_ADDRESS_MODE]
if (self.properties[self.IP_VERSION] == 4) and (
ra_mode or address_mode):
msg = _('ipv6_ra_mode and ipv6_address_mode are not supported '
'for ipv4.')
raise exception.StackValidationFailed(message=msg)
if ra_mode and address_mode and (ra_mode != address_mode):
msg = _('When both ipv6_ra_mode and ipv6_address_mode are set, '
'they must be equal.')
raise exception.StackValidationFailed(message=msg)
gateway_ip = self.properties.get(self.GATEWAY_IP)
if (gateway_ip and gateway_ip not in ['~', ''] and
not netutils.is_valid_ip(gateway_ip)):
msg = (_('Gateway IP address "%(gateway)s" is in '
'invalid format.'), gateway_ip)
raise exception.StackValidationFailed(message=msg)
def handle_create(self):
props = self.prepare_properties(
self.properties,
self.physical_resource_name())
self.client_plugin().resolve_network(props, self.NETWORK,
'network_id')
if self.SUBNETPOOL in props and props[self.SUBNETPOOL]:
props['subnetpool_id'] = self.client_plugin(
).find_resourceid_by_name_or_id(
'subnetpool', props.pop('subnetpool'))
self._null_gateway_ip(props)
subnet = self.client().create_subnet({'subnet': props})['subnet']
self.resource_id_set(subnet['id'])
def handle_delete(self):
try:
self.client().delete_subnet(self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
else:
return True
def _show_resource(self):
return self.client().show_subnet(self.resource_id)['subnet']
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
props = self.prepare_update_properties(json_snippet)
if (self.ALLOCATION_POOLS in prop_diff and
self.ALLOCATION_POOLS not in props):
props[self.ALLOCATION_POOLS] = []
self.client().update_subnet(
self.resource_id, {'subnet': props})
def resource_mapping():
return {
'OS::Neutron::Subnet': Subnet,
}
|
|
# Copyright 2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test MongoClient's mongos load balancing using a mock."""
import sys
import threading
sys.path[0:0] = [""]
from pymongo.errors import AutoReconnect, InvalidOperation
from pymongo.server_selectors import writable_server_selector
from pymongo.topology_description import TOPOLOGY_TYPE
from test import unittest, client_context, MockClientTest
from test.pymongo_mocks import MockClient
from test.utils import connected, wait_until
@client_context.require_connection
def setUpModule():
pass
class SimpleOp(threading.Thread):
def __init__(self, client):
super(SimpleOp, self).__init__()
self.client = client
self.passed = False
def run(self):
self.client.db.command('ismaster')
self.passed = True # No exception raised.
def do_simple_op(client, nthreads):
threads = [SimpleOp(client) for _ in range(nthreads)]
for t in threads:
t.start()
for t in threads:
t.join()
for t in threads:
assert t.passed
def writable_addresses(topology):
return set(server.description.address for server in
topology.select_servers(writable_server_selector))
class TestMongosLoadBalancing(MockClientTest):
def mock_client(self, **kwargs):
mock_client = MockClient(
standalones=[],
members=[],
mongoses=['a:1', 'b:2', 'c:3'],
host='a:1,b:2,c:3',
connect=False,
**kwargs)
# Latencies in seconds.
mock_client.mock_rtts['a:1'] = 0.020
mock_client.mock_rtts['b:2'] = 0.025
mock_client.mock_rtts['c:3'] = 0.045
return mock_client
def test_lazy_connect(self):
# While connected() ensures we can trigger connection from the main
# thread and wait for the monitors, this test triggers connection from
# several threads at once to check for data races.
nthreads = 10
client = self.mock_client()
self.assertEqual(0, len(client.nodes))
# Trigger initial connection.
do_simple_op(client, nthreads)
wait_until(lambda: len(client.nodes) == 3, 'connect to all mongoses')
def test_reconnect(self):
nthreads = 10
client = connected(self.mock_client())
# connected() ensures we've contacted at least one mongos. Wait for
# all of them.
wait_until(lambda: len(client.nodes) == 3, 'connect to all mongoses')
# Trigger reconnect.
client.close()
do_simple_op(client, nthreads)
wait_until(lambda: len(client.nodes) == 3,
'reconnect to all mongoses')
def test_failover(self):
nthreads = 10
client = connected(self.mock_client(localThresholdMS=0.001))
wait_until(lambda: len(client.nodes) == 3, 'connect to all mongoses')
# Our chosen mongos goes down.
client.kill_host('a:1')
# Trigger failover to higher-latency nodes. AutoReconnect should be
# raised at most once in each thread.
passed = []
def f():
try:
client.db.command('ismaster')
except AutoReconnect:
# Second attempt succeeds.
client.db.command('ismaster')
passed.append(True)
threads = [threading.Thread(target=f) for _ in range(nthreads)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(nthreads, len(passed))
# Down host removed from list.
self.assertEqual(2, len(client.nodes))
def test_local_threshold(self):
client = connected(self.mock_client(localThresholdMS=30))
self.assertEqual(30, client.local_threshold_ms)
wait_until(lambda: len(client.nodes) == 3, 'connect to all mongoses')
topology = client._topology
# All are within a 30-ms latency window, see self.mock_client().
self.assertEqual(set([('a', 1), ('b', 2), ('c', 3)]),
writable_addresses(topology))
# No error
client.admin.command('ismaster')
client = connected(self.mock_client(localThresholdMS=0))
self.assertEqual(0, client.local_threshold_ms)
# No error
client.db.command('ismaster')
# Our chosen mongos goes down.
client.kill_host('%s:%s' % next(iter(client.nodes)))
try:
client.db.command('ismaster')
except:
pass
# We eventually connect to a new mongos.
def connect_to_new_mongos():
try:
return client.db.command('ismaster')
except AutoReconnect:
pass
wait_until(connect_to_new_mongos, 'connect to a new mongos')
def test_load_balancing(self):
# Although the server selection JSON tests already prove that
# select_servers works for sharded topologies, here we do an end-to-end
# test of discovering servers' round trip times and configuring
# localThresholdMS.
client = connected(self.mock_client())
wait_until(lambda: len(client.nodes) == 3, 'connect to all mongoses')
# Prohibited for topology type Sharded.
with self.assertRaises(InvalidOperation):
client.address
topology = client._topology
self.assertEqual(TOPOLOGY_TYPE.Sharded,
topology.description.topology_type)
# a and b are within the 15-ms latency window, see self.mock_client().
self.assertEqual(set([('a', 1), ('b', 2)]),
writable_addresses(topology))
client.mock_rtts['a:1'] = 0.045
# Discover only b is within latency window.
wait_until(lambda: set([('b', 2)]) == writable_addresses(topology),
'discover server "a" is too far')
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/env python
#
# test_multibytecodec_support.py
# Common Unittest Routines for CJK codecs
#
import codecs
import os
import re
import sys
import unittest
from httplib import HTTPException
from test import test_support
from StringIO import StringIO
class TestBase:
encoding = '' # codec name
codec = None # codec tuple (with 4 elements)
tstring = '' # string to test StreamReader
codectests = None # must set. codec test tuple
roundtriptest = 1 # set if roundtrip is possible with unicode
has_iso10646 = 0 # set if this encoding contains whole iso10646 map
xmlcharnametest = None # string to test xmlcharrefreplace
unmappedunicode = u'\udeee' # a unicode codepoint that is not mapped.
def setUp(self):
if self.codec is None:
self.codec = codecs.lookup(self.encoding)
self.encode = self.codec.encode
self.decode = self.codec.decode
self.reader = self.codec.streamreader
self.writer = self.codec.streamwriter
self.incrementalencoder = self.codec.incrementalencoder
self.incrementaldecoder = self.codec.incrementaldecoder
def test_chunkcoding(self):
for native, utf8 in zip(*[StringIO(f).readlines()
for f in self.tstring]):
u = self.decode(native)[0]
self.assertEqual(u, utf8.decode('utf-8'))
if self.roundtriptest:
self.assertEqual(native, self.encode(u)[0])
def test_errorhandle(self):
for source, scheme, expected in self.codectests:
if type(source) == type(''):
func = self.decode
else:
func = self.encode
if expected:
result = func(source, scheme)[0]
self.assertEqual(result, expected)
else:
self.assertRaises(UnicodeError, func, source, scheme)
def test_xmlcharrefreplace(self):
if self.has_iso10646:
return
s = u"\u0b13\u0b23\u0b60 nd eggs"
self.assertEqual(
self.encode(s, "xmlcharrefreplace")[0],
"ଓଣୠ nd eggs"
)
def test_customreplace_encode(self):
if self.has_iso10646:
return
from htmlentitydefs import codepoint2name
def xmlcharnamereplace(exc):
if not isinstance(exc, UnicodeEncodeError):
raise TypeError("don't know how to handle %r" % exc)
l = []
for c in exc.object[exc.start:exc.end]:
if ord(c) in codepoint2name:
l.append(u"&%s;" % codepoint2name[ord(c)])
else:
l.append(u"&#%d;" % ord(c))
return (u"".join(l), exc.end)
codecs.register_error("test.xmlcharnamereplace", xmlcharnamereplace)
if self.xmlcharnametest:
sin, sout = self.xmlcharnametest
else:
sin = u"\xab\u211c\xbb = \u2329\u1234\u232a"
sout = "«ℜ» = ⟨ሴ⟩"
self.assertEqual(self.encode(sin,
"test.xmlcharnamereplace")[0], sout)
def test_callback_wrong_objects(self):
def myreplace(exc):
return (ret, exc.end)
codecs.register_error("test.cjktest", myreplace)
for ret in ([1, 2, 3], [], None, object(), 'string', ''):
self.assertRaises(TypeError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_callback_long_index(self):
def myreplace(exc):
return (u'x', long(exc.end))
codecs.register_error("test.cjktest", myreplace)
self.assertEqual(self.encode(u'abcd' + self.unmappedunicode + u'efgh',
'test.cjktest'), ('abcdxefgh', 9))
def myreplace(exc):
return (u'x', sys.maxint + 1)
codecs.register_error("test.cjktest", myreplace)
self.assertRaises((IndexError, OverflowError), self.encode,
self.unmappedunicode, 'test.cjktest')
def test_callback_None_index(self):
def myreplace(exc):
return (u'x', None)
codecs.register_error("test.cjktest", myreplace)
self.assertRaises(TypeError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_callback_backward_index(self):
def myreplace(exc):
if myreplace.limit > 0:
myreplace.limit -= 1
return (u'REPLACED', 0)
else:
return (u'TERMINAL', exc.end)
myreplace.limit = 3
codecs.register_error("test.cjktest", myreplace)
self.assertEqual(self.encode(u'abcd' + self.unmappedunicode + u'efgh',
'test.cjktest'),
('abcdREPLACEDabcdREPLACEDabcdREPLACEDabcdTERMINALefgh', 9))
def test_callback_forward_index(self):
def myreplace(exc):
return (u'REPLACED', exc.end + 2)
codecs.register_error("test.cjktest", myreplace)
self.assertEqual(self.encode(u'abcd' + self.unmappedunicode + u'efgh',
'test.cjktest'), ('abcdREPLACEDgh', 9))
def test_callback_index_outofbound(self):
def myreplace(exc):
return (u'TERM', 100)
codecs.register_error("test.cjktest", myreplace)
self.assertRaises(IndexError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_incrementalencoder(self):
UTF8Reader = codecs.getreader('utf-8')
for sizehint in [None] + range(1, 33) + \
[64, 128, 256, 512, 1024]:
istream = UTF8Reader(StringIO(self.tstring[1]))
ostream = StringIO()
encoder = self.incrementalencoder()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
e = encoder.encode(data)
ostream.write(e)
self.assertEqual(ostream.getvalue(), self.tstring[0])
def test_incrementaldecoder(self):
UTF8Writer = codecs.getwriter('utf-8')
for sizehint in [None, -1] + range(1, 33) + \
[64, 128, 256, 512, 1024]:
istream = StringIO(self.tstring[0])
ostream = UTF8Writer(StringIO())
decoder = self.incrementaldecoder()
while 1:
data = istream.read(sizehint)
if not data:
break
else:
u = decoder.decode(data)
ostream.write(u)
self.assertEqual(ostream.getvalue(), self.tstring[1])
def test_incrementalencoder_error_callback(self):
inv = self.unmappedunicode
e = self.incrementalencoder()
self.assertRaises(UnicodeEncodeError, e.encode, inv, True)
e.errors = 'ignore'
self.assertEqual(e.encode(inv, True), '')
e.reset()
def tempreplace(exc):
return (u'called', exc.end)
codecs.register_error('test.incremental_error_callback', tempreplace)
e.errors = 'test.incremental_error_callback'
self.assertEqual(e.encode(inv, True), 'called')
# again
e.errors = 'ignore'
self.assertEqual(e.encode(inv, True), '')
def test_streamreader(self):
UTF8Writer = codecs.getwriter('utf-8')
for name in ["read", "readline", "readlines"]:
for sizehint in [None, -1] + range(1, 33) + \
[64, 128, 256, 512, 1024]:
istream = self.reader(StringIO(self.tstring[0]))
ostream = UTF8Writer(StringIO())
func = getattr(istream, name)
while 1:
data = func(sizehint)
if not data:
break
if name == "readlines":
ostream.writelines(data)
else:
ostream.write(data)
self.assertEqual(ostream.getvalue(), self.tstring[1])
def test_streamwriter(self):
readfuncs = ('read', 'readline', 'readlines')
UTF8Reader = codecs.getreader('utf-8')
for name in readfuncs:
for sizehint in [None] + range(1, 33) + \
[64, 128, 256, 512, 1024]:
istream = UTF8Reader(StringIO(self.tstring[1]))
ostream = self.writer(StringIO())
func = getattr(istream, name)
while 1:
if sizehint is not None:
data = func(sizehint)
else:
data = func()
if not data:
break
if name == "readlines":
ostream.writelines(data)
else:
ostream.write(data)
self.assertEqual(ostream.getvalue(), self.tstring[0])
class TestBase_Mapping(unittest.TestCase):
pass_enctest = []
pass_dectest = []
supmaps = []
def __init__(self, *args, **kw):
unittest.TestCase.__init__(self, *args, **kw)
try:
self.open_mapping_file().close() # test it to report the error early
except (IOError, HTTPException):
self.skipTest("Could not retrieve "+self.mapfileurl)
def open_mapping_file(self):
return test_support.open_urlresource(self.mapfileurl)
def test_mapping_file(self):
if self.mapfileurl.endswith('.xml'):
self._test_mapping_file_ucm()
else:
self._test_mapping_file_plain()
def _test_mapping_file_plain(self):
_unichr = lambda c: eval("u'\\U%08x'" % int(c, 16))
unichrs = lambda s: u''.join(_unichr(c) for c in s.split('+'))
urt_wa = {}
with self.open_mapping_file() as f:
for line in f:
if not line:
break
data = line.split('#')[0].strip().split()
if len(data) != 2:
continue
csetval = eval(data[0])
if csetval <= 0x7F:
csetch = chr(csetval & 0xff)
elif csetval >= 0x1000000:
csetch = chr(csetval >> 24) + chr((csetval >> 16) & 0xff) + \
chr((csetval >> 8) & 0xff) + chr(csetval & 0xff)
elif csetval >= 0x10000:
csetch = chr(csetval >> 16) + \
chr((csetval >> 8) & 0xff) + chr(csetval & 0xff)
elif csetval >= 0x100:
csetch = chr(csetval >> 8) + chr(csetval & 0xff)
else:
continue
unich = unichrs(data[1])
if unich == u'\ufffd' or unich in urt_wa:
continue
urt_wa[unich] = csetch
self._testpoint(csetch, unich)
def _test_mapping_file_ucm(self):
with self.open_mapping_file() as f:
ucmdata = f.read()
uc = re.findall('<a u="([A-F0-9]{4})" b="([0-9A-F ]+)"/>', ucmdata)
for uni, coded in uc:
unich = unichr(int(uni, 16))
codech = ''.join(chr(int(c, 16)) for c in coded.split())
self._testpoint(codech, unich)
def test_mapping_supplemental(self):
for mapping in self.supmaps:
self._testpoint(*mapping)
def _testpoint(self, csetch, unich):
if (csetch, unich) not in self.pass_enctest:
try:
self.assertEqual(unich.encode(self.encoding), csetch)
except UnicodeError, exc:
self.fail('Encoding failed while testing %s -> %s: %s' % (
repr(unich), repr(csetch), exc.reason))
if (csetch, unich) not in self.pass_dectest:
try:
self.assertEqual(csetch.decode(self.encoding), unich)
except UnicodeError, exc:
self.fail('Decoding failed while testing %s -> %s: %s' % (
repr(csetch), repr(unich), exc.reason))
def load_teststring(name):
dir = test_support.findfile('cjkencodings')
with open(os.path.join(dir, name + '.txt'), 'rb') as f:
encoded = f.read()
with open(os.path.join(dir, name + '-utf8.txt'), 'rb') as f:
utf8 = f.read()
return encoded, utf8
|
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import operator
import apache_beam as beam
from apache_beam import io
from apache_beam import pvalue
from apache_beam.options import pipeline_options
from apache_beam.options import value_provider
from apache_beam.transforms import util
import bigquery_mod as bq_mod
import common as c
class RuntimeOptions(pipeline_options.PipelineOptions):
"""Specifies runtime options for the pipeline.
Class defining the arguments that can be passed to the pipeline to
customize the execution.
"""
@classmethod
def _add_argparse_args(cls, parser):
parser.add_value_provider_argument(f'--{c._OPTION_INPUT_BQ_QUERY}')
parser.add_value_provider_argument(f'--{c._OPTION_INPUT_BQ_PROJECT}')
parser.add_value_provider_argument(f'--{c._OPTION_TEMP_GCS_LOCATION}')
parser.add_value_provider_argument(f'--{c._OPTION_OUTPUT_FOLDER}')
parser.add_value_provider_argument(f'--{c._OPTION_OUTPUT_BQ_PROJECT}')
parser.add_value_provider_argument(f'--{c._OPTION_OUTPUT_BQ_DATASET}')
parser.add_value_provider_argument(
f'--{c._OPTION_CUSTOMER_ID_COLUMN_NAME}')
parser.add_value_provider_argument(
f'--{c._OPTION_TRANSACTION_DATE_COLUMN_NAME}')
parser.add_value_provider_argument(
f'--{c._OPTION_SALES_COLUMN_NAME}')
parser.add_value_provider_argument(
f'--{c._OPTION_EXTRA_DIMENSION_COLUMN_NAME}')
parser.add_value_provider_argument(f'--{c._OPTION_DATE_PARSING_PATTERN}')
parser.add_value_provider_argument(
f'--{c._OPTION_MODEL_TIME_GRANULARITY}',
default=c.TimeGranularityParams.GRANULARITY_WEEKLY)
parser.add_value_provider_argument(
f'--{c._OPTION_FREQUENCY_MODEL_TYPE}', default=c._MODEL_TYPE_MBGNBD)
parser.add_value_provider_argument(
f'--{c._OPTION_CALIBRATION_START_DATE}')
parser.add_value_provider_argument(f'--{c._OPTION_CALIBRATION_END_DATE}')
parser.add_value_provider_argument(f'--{c._OPTION_COHORT_START_DATE}')
parser.add_value_provider_argument(f'--{c._OPTION_COHORT_END_DATE}')
parser.add_value_provider_argument(f'--{c._OPTION_HOLDOUT_END_DATE}')
parser.add_value_provider_argument(
f'--{c._OPTION_PREDICTION_PERIOD}', default=52, type=int)
parser.add_value_provider_argument(
f'--{c._OPTION_OUTPUT_SEGMENTS}', default=5, type=int)
parser.add_value_provider_argument(
f'--{c._OPTION_TRANSACTION_FREQUENCY_THRESHOLD}', default=15,
type=int)
parser.add_value_provider_argument(
f'--{c._OPTION_PENALIZER_COEF}', default=0.0, type=float)
parser.add_value_provider_argument(
f'--{c._OPTION_ROUND_NUMBERS}', default="False")
def run(argv=None):
"""Main function.
Main function containing the Apache Beam pipeline describing how to process
the input CSV file to generate the LTV predictions.
"""
parser = argparse.ArgumentParser()
_, pipeline_args = parser.parse_known_args(argv)
options = pipeline_options.PipelineOptions(pipeline_args)
runtime_options = options.view_as(RuntimeOptions)
with beam.Pipeline(options=options) as pipeline:
options = (
pipeline
| 'Create single element Stream containing options dict' >>
beam.Create([options.get_all_options()])
| beam.Map(lambda x: {
k: v.get() if isinstance(v, value_provider.ValueProvider)
else v
for (k, v) in x.items()
})
| beam.Map(c.set_extra_options)
)
full_elog = (
pipeline
| bq_mod.ReadFromBigQuery(
project=getattr(runtime_options, c._OPTION_INPUT_BQ_PROJECT),
query=getattr(runtime_options, c._OPTION_INPUT_BQ_QUERY),
gcs_location=getattr(runtime_options, c._OPTION_TEMP_GCS_LOCATION),
use_standard_sql=True
)
| beam.FlatMap(
c.bq_row_to_list,
pvalue.AsSingleton(options)) # (customer_id, date_str, date,
# sales, extra_dimension?)
)
full_elog_merged = (
full_elog
| beam.Filter(lambda x: x[3] > 0) # sales > 0
| beam.Map(lambda x: ((x[0], x[1]), x)) # key: (customer_id, date)
| 'Group full elog by customer and date' >> beam.GroupByKey()
| beam.Map(c.merge_full_elog_by_customer_and_date) # (customer_id,
# date_str, date,
# sales)
)
min_max_dates = (
full_elog_merged
| beam.Map(lambda x: x[2]) # date
| beam.CombineGlobally(c.MinMaxDatesFn())
| beam.Map(c.min_max_dates_dict)
)
limits_dates = (
min_max_dates
| beam.FlatMap(c.limit_dates_boundaries, pvalue.AsSingleton(options))
)
cohort = (
full_elog_merged
| beam.FlatMap(c.filter_customers_in_cohort,
pvalue.AsSingleton(limits_dates))
| 'Distinct Customer IDs in Cohort' >> util.Distinct()
)
cohort_count = (
cohort
| 'Count cohort entries' >> beam.combiners.Count.Globally()
)
cohort_set = (
cohort
| beam.Map(lambda x: (x, 1))
)
all_customer_ids = (
full_elog_merged
| beam.Map(lambda x: x[0]) # key: customer_id
| 'Distinct all Customer IDs' >> util.Distinct()
)
all_customer_ids_count = (
all_customer_ids
| 'Count all customers' >> beam.combiners.Count.Globally()
)
num_customers = (
pipeline
| 'Create single elem Stream I' >> beam.Create([1])
| beam.FlatMap(c.count_customers,
pvalue.AsSingleton(cohort_count),
pvalue.AsSingleton(all_customer_ids_count),
pvalue.AsSingleton(options))
)
cal_hol_elog = (
full_elog_merged
| beam.FlatMap(c.filter_cohort_records_in_cal_hol,
pvalue.AsDict(cohort_set),
pvalue.AsSingleton(limits_dates))
)
cal_hol_elog_count = (
cal_hol_elog
| 'Count cal hol elog entries' >> beam.combiners.Count.Globally()
)
calibration = (
cal_hol_elog
| beam.FlatMap(c.filter_records_in_calibration,
pvalue.AsSingleton(limits_dates))
)
num_txns_total = (
full_elog_merged
| beam.FlatMap(c.filter_records_in_cal_hol,
pvalue.AsSingleton(limits_dates))
| 'Count num txns total' >> beam.combiners.Count.Globally()
)
num_txns = (
pipeline
| 'Create single elem Stream II' >> beam.Create([1])
| beam.FlatMap(c.count_txns,
pvalue.AsSingleton(cal_hol_elog_count),
pvalue.AsSingleton(num_txns_total),
pvalue.AsSingleton(options))
)
calcbs = (
calibration
| beam.Map(lambda x: (x[0], x))
| 'Group calibration elog by customer id' >> beam.GroupByKey()
| beam.FlatMap(
c.create_cal_cbs,
pvalue.AsSingleton(options),
pvalue.AsSingleton(limits_dates)
) # (customer_id, number_of_transactions, average_order_value,
# frequency, recency, total_time_observed)
)
first_transaction_dates_by_customer = (
cal_hol_elog
| beam.Map(lambda x: (x[0], x)) # customer_id
| 'Group cal hol elog by customer id' >> beam.GroupByKey()
| beam.Map(lambda x: (x[0], min(map(operator.itemgetter(2), x[1])))
) # item 2 -> date
)
cal_hol_elog_repeat = (
cal_hol_elog
| beam.FlatMap(c.filter_first_transaction_date_records,
pvalue.AsDict(first_transaction_dates_by_customer))
| beam.FlatMap(
c.calculate_time_unit_numbers, # (customer_id, date,
# time_unit_number)
pvalue.AsSingleton(options),
pvalue.AsSingleton(limits_dates))
| beam.Map(lambda x: (x[2], 1)) # key: time_unit_number
| 'Group cal hol elog repeat by time unit number' >>
beam.GroupByKey()
| beam.Map(lambda x: (x[0], sum(x[1]))
) # (time_unit_number, occurrences)
)
repeat_tx = (
pipeline
| 'Create single elem Stream III' >> beam.Create([1])
| beam.FlatMap(c.calculate_cumulative_repeat_transactions,
pvalue.AsIter(cal_hol_elog_repeat)
) # (time_unit_number, repeat_transactions,
# repeat_transactions_cumulative)
)
model_validation = (
pipeline
| 'Create single elem Stream IV' >> beam.Create([1])
| beam.FlatMap(c.calculate_model_fit_validation,
pvalue.AsSingleton(options),
pvalue.AsSingleton(limits_dates),
pvalue.AsIter(calcbs),
pvalue.AsIter(repeat_tx),
pvalue.AsSingleton(num_customers),
pvalue.AsSingleton(num_txns))
)
_ = (
model_validation
| beam.Map(c.raise_error_if_invalid_mape)
)
_ = (
model_validation
| beam.Map(lambda x: x[0])
| 'Write to validation_params table' >>
io.WriteToBigQuery(
table=c.TableValueProvider(
getattr(runtime_options, c._OPTION_OUTPUT_BQ_PROJECT),
getattr(runtime_options, c._OPTION_OUTPUT_BQ_DATASET),
'validation_params'
),
custom_gcs_temp_location=getattr(runtime_options, c._OPTION_TEMP_GCS_LOCATION),
validate=False,
schema={
'fields': [
{'name': 'calibration_start_date', 'type': 'STRING'},
{'name': 'calibration_end_date', 'type': 'STRING'},
{'name': 'cohort_start_date', 'type': 'STRING'},
{'name': 'cohort_end_date', 'type': 'STRING'},
{'name': 'holdout_end_date', 'type': 'STRING'},
{'name': 'model_time_granularity', 'type': 'STRING'},
{'name': 'model', 'type': 'RECORD',
'fields': [
{'name': 'frequency_model', 'type': 'STRING'},
{'name': 'num_customers_cohort', 'type': 'INTEGER'},
{'name': 'perc_customers_cohort', 'type': 'FLOAT'},
{'name': 'num_transactions_validation', 'type': 'INTEGER'},
{'name': 'perc_transactions_validation', 'type': 'FLOAT'},
{'name': 'validation_mape', 'type': 'STRING'},
]}
]
},
write_disposition=io.BigQueryDisposition.WRITE_TRUNCATE,
create_disposition=io.BigQueryDisposition.CREATE_IF_NEEDED)
)
fullcbs_without_extra_dimension = (
full_elog_merged
| beam.Map(lambda x: (x[0], x)) # key: customer_id
| 'Group full merged elog by customer id' >> beam.GroupByKey()
| beam.FlatMap(
c.create_fullcbs,
pvalue.AsSingleton(options),
pvalue.AsSingleton(min_max_dates)
) # (customer_id, number_of_transactions, historical_aov,
# frequency, recency, total_time_observed)
)
full_elog_if_extra_dimension = (
full_elog
| 'Discard records if no extra dimension' >> beam.FlatMap(
c.discard_if_no_extra_dimension, pvalue.AsSingleton(options))
)
extra_dimensions_stats = (
full_elog_if_extra_dimension
| beam.Map(lambda x: ((x[0], x[4]), x)
) # key: (customer_id, extra_dimension)
| 'Group full elog by customer id and extra dimension' >>
beam.GroupByKey()
| beam.Map(
c.create_extra_dimensions_stats
) # (customer_id, extra_dimension, dimension_count, tot_sales,
# max_dimension_date)
)
top_dimension_per_customer = (
extra_dimensions_stats
| beam.Map(lambda x: (x[0], x)) # customer_id
| 'Group extra dimension stats by customer id' >> beam.GroupByKey()
| beam.Map(
c.extract_top_extra_dimension
) # (customer_id, extra_dimension, dimension_count, tot_sales,
# max_dimension_date)
)
customer_dimension_map = (
top_dimension_per_customer
| beam.Map(
lambda x: (x[0], x[1])) # (customer_id, extra_dimension)
)
prediction = (
pipeline
| 'Create single elem Stream V' >> beam.Create([1])
| beam.FlatMap(
c.calculate_prediction,
pvalue.AsSingleton(options),
pvalue.AsIter(fullcbs_without_extra_dimension),
pvalue.AsSingleton(num_customers),
pvalue.AsSingleton(num_txns)
) # [customer_id, p_alive, predicted_purchases, future_aov,
# historical_aov, expected_value, frequency, recency,
# total_time_observed], prediction_params
)
prediction_by_customer_no_segments_no_extra_dimension = (
prediction
| beam.FlatMap(lambda x: x[0]) # Extract predictions by customer
)
prediction_by_customer_no_segments = (
prediction_by_customer_no_segments_no_extra_dimension
| beam.FlatMap(
c.add_top_extra_dimension_to_fullcbs,
pvalue.AsSingleton(options),
pvalue.AsDict(customer_dimension_map)
) # [customer_id, p_alive, predicted_purchases, future_aov
# historical_aov, expected_value, frequency, recency,
# total_time_observed, extra_dimension?]
)
_ = (
prediction
| beam.Map(lambda x: x[1]) # Extract prediction params
| 'Write to prediction_params table' >>
io.WriteToBigQuery(
table=c.TableValueProvider(
getattr(runtime_options, c._OPTION_OUTPUT_BQ_PROJECT),
getattr(runtime_options, c._OPTION_OUTPUT_BQ_DATASET),
'prediction_params'
),
custom_gcs_temp_location=getattr(runtime_options, c._OPTION_TEMP_GCS_LOCATION),
validate=False,
schema={
'fields': [
{'name': 'prediction_period', 'type': 'INTEGER'},
{'name': 'prediction_period_unit', 'type': 'STRING'},
{'name': 'model_time_granularity', 'type': 'STRING'},
{'name': 'customers_modeled', 'type': 'INTEGER'},
{'name': 'transactions_observed', 'type': 'INTEGER'},
{'name': 'frequency_model', 'type': 'STRING'},
{'name': 'bgnbd_model_params', 'type': 'RECORD',
'fields': [
{'name': 'a', 'type': 'FLOAT'},
{'name': 'b', 'type': 'FLOAT'},
{'name': 'r', 'type': 'FLOAT'},
{'name': 'alpha', 'type': 'FLOAT'}
]},
{'name': 'bgbb_model_params', 'type': 'RECORD',
'fields': [
{'name': 'alpha', 'type': 'FLOAT'},
{'name': 'beta', 'type': 'FLOAT'},
{'name': 'gamma', 'type': 'FLOAT'},
{'name': 'delta', 'type': 'FLOAT'}
]},
{'name': 'paretonbd_model_params',
'type': 'RECORD',
'fields': [
{'name': 'r', 'type': 'FLOAT'},
{'name': 's', 'type': 'FLOAT'},
{'name': 'alpha', 'type': 'FLOAT'},
{'name': 'beta', 'type': 'FLOAT'}
]},
{'name': 'gamma_gamma_params',
'type': 'RECORD',
'fields': [
{'name': 'p', 'type': 'FLOAT'},
{'name': 'q', 'type': 'FLOAT'},
{'name': 'v', 'type': 'FLOAT'}
]}
]
},
write_disposition=io.BigQueryDisposition.WRITE_TRUNCATE,
create_disposition=io.BigQueryDisposition.CREATE_IF_NEEDED)
)
num_rows = (
full_elog_merged
| 'Count num rows in full elog merged' >>
beam.combiners.Count.Globally()
)
segment_predictions_exact = (
pipeline
| 'Create single elem Stream VII' >> beam.Create([1])
| beam.FlatMap(lambda _, rows_count: [
rows_count <= c._SEGMENT_PREDICTION_THRESHOLD],
pvalue.AsSingleton(num_rows))
)
sharded_cust_predictions_no_segments_exact, \
sharded_cust_predictions_no_segments_hash = (
prediction_by_customer_no_segments
| beam.FlatMap(
c.prediction_sharded,
pvalue.AsSingleton(options),
pvalue.AsSingleton(segment_predictions_exact)
) # [customer_id, p_alive, predicted_purchases, future_aov,
# historical_aov, expected_value, frequency, recency,
# total_time_observed, extra_dimension?]
| beam.Partition(lambda x, _: 0 if x[1] else 1, 2)
)
# BEGIN of "exact" branch
prediction_by_customer_exact = (
pipeline
| 'Create single elem Stream VIII' >> beam.Create([1])
| beam.FlatMap(c.split_in_ntiles_exact,
pvalue.AsSingleton(options),
pvalue.AsIter(
sharded_cust_predictions_no_segments_exact)
) # [customer_id, p_alive, predicted_purchases,
# future_aov, historical_aov, expected_value,
# frequency, recency, total_time_observed,
# segment, extra_dimension?]
)
# END of "exact" branch
# BEGIN of "hash" branch
customer_count_by_expected_value = (
sharded_cust_predictions_no_segments_hash
| beam.Map(lambda x: (x[0][5], 1)) # (expected_value, 1)
| 'Group customer predictions by expected value' >>
beam.GroupByKey()
| beam.Map(
lambda x: (x[0], sum(x[1]))) # expected_value, customers_count
)
hash_segment_limits = (
pipeline
| 'Create single elem Stream IX' >> beam.Create([1])
| beam.FlatMap(c.expected_values_segment_limits,
pvalue.AsSingleton(options),
pvalue.AsIter(customer_count_by_expected_value),
pvalue.AsSingleton(all_customer_ids_count))
)
prediction_by_customer_hash = (
sharded_cust_predictions_no_segments_hash
| beam.Map(lambda x: x[0])
| beam.FlatMap(c.split_in_ntiles_hash,
pvalue.AsSingleton(hash_segment_limits)
) # [customer_id, p_alive, predicted_purchases,
# future_aov, historical_aov, expected_value,
# frequency, recency, total_time_observed,
# segment, extra_dimension?]
)
# END of "hash" branch
prediction_by_customer = (
# only one of these two streams will contains values
(prediction_by_customer_exact, prediction_by_customer_hash)
| beam.Flatten()
| beam.Map(c.clean_nan_and_inf)
)
_ = (
prediction_by_customer
| beam.FlatMap(lambda x, opts: [x + ['']]
if not opts[c._OPTION_EXTRA_DIMENSION_EXISTS] else [x],
pvalue.AsSingleton(options))
| 'prediction_by_customer to Dict' >> beam.Map(c.list_to_dict, [
'customer_id', 'p_alive', 'predicted_purchases',
'future_aov', 'historical_aov',
'expected_value', 'frequency', 'recency',
'total_time_observed', 'segment',
'extra_dimension'])
| 'Write to prediction_by_customer table' >>
io.WriteToBigQuery(
table=c.TableValueProvider(
getattr(runtime_options, c._OPTION_OUTPUT_BQ_PROJECT),
getattr(runtime_options, c._OPTION_OUTPUT_BQ_DATASET),
'prediction_by_customer'
),
custom_gcs_temp_location=getattr(runtime_options, c._OPTION_TEMP_GCS_LOCATION),
validate=False,
schema='customer_id:STRING, p_alive:FLOAT64'
', predicted_purchases:FLOAT64'
', future_aov:FLOAT64, historical_aov:FLOAT64'
', expected_value:FLOAT64, frequency:INT64'
', recency:FLOAT64'
', total_time_observed:FLOAT64, segment:INT64'
', extra_dimension:STRING',
write_disposition=io.BigQueryDisposition.WRITE_TRUNCATE,
create_disposition=io.BigQueryDisposition.CREATE_IF_NEEDED)
)
prediction_summary_temp = (
prediction_by_customer
| beam.Map(lambda x: (x[9], x)) # key: segment
| 'Group customer predictions by segment' >> beam.GroupByKey()
| beam.FlatMap(c.generate_prediction_summary,
pvalue.AsSingleton(options)
) # (segment, average_retention_probability,
# average_predicted_customer_value,
# average_predicted_order_value,
# average_predicted_purchases, total_customer_value,
# number_of_customers)
)
tot_equity = (
prediction_summary_temp
| beam.Map(lambda x: x[5]) # total_customer_value
| beam.CombineGlobally(sum)
)
prediction_summary = (
prediction_summary_temp
| beam.FlatMap(
c.calculate_perc_of_total_customer_value,
pvalue.AsSingleton(tot_equity),
pvalue.AsSingleton(options)
) # (segment, average_retention_probability,
# average_predicted_customer_value,
# average_predicted_order_value,
# average_predicted_purchases,
# total_customer_value, number_of_customers,
# perc_of_total_customer_value)
)
_ = (
prediction_summary
| 'prediction_summary to Dict' >> beam.Map(c.list_to_dict, [
'segment', 'average_retention_probability',
'average_predicted_customer_value',
'average_predicted_order_value', 'average_predicted_purchases',
'total_customer_value', 'number_of_customers',
'perc_of_total_customer_value'])
| 'Write to prediction_summary table' >> io.WriteToBigQuery(
table=c.TableValueProvider(
getattr(runtime_options, c._OPTION_OUTPUT_BQ_PROJECT),
getattr(runtime_options, c._OPTION_OUTPUT_BQ_DATASET),
'prediction_summary'
),
custom_gcs_temp_location=getattr(runtime_options, c._OPTION_TEMP_GCS_LOCATION),
validate=False,
schema='segment:INT64 ,average_retention_probability:FLOAT64'
', average_predicted_customer_value:FLOAT64'
', average_predicted_order_value:FLOAT64'
', average_predicted_purchases:FLOAT64'
', total_customer_value:FLOAT64'
', number_of_customers:FLOAT64'
', perc_of_total_customer_value:FLOAT64',
write_disposition=io.BigQueryDisposition.WRITE_TRUNCATE,
create_disposition=io.BigQueryDisposition.CREATE_IF_NEEDED)
)
prediction_summary_extra_dimension = (
prediction_by_customer
| 'Discard prediction if there is not extra dimension' >>
beam.FlatMap(c.discard_if_no_extra_dimension,
pvalue.AsSingleton(options))
| beam.Map(lambda x: (x[10], x)) # extra dimension
| 'Group customer predictions by extra dimension' >>
beam.GroupByKey()
| beam.FlatMap(c.generate_prediction_summary_extra_dimension,
pvalue.AsSingleton(tot_equity),
pvalue.AsSingleton(options))
)
_ = (
prediction_summary_extra_dimension
| 'prediction_summary_extra_dimension to Dict' >> beam.Map(c.list_to_dict, [
'extra_dimension', 'average_retention_probability',
'average_predicted_customer_value',
'average_predicted_order_value',
'average_predicted_purchases', 'total_customer_value',
'number_of_customers', 'perc_of_total_customer_value'])
| 'Write to prediction_summary_extra_dimension table' >> io.WriteToBigQuery(
table=c.TableValueProvider(
getattr(runtime_options, c._OPTION_OUTPUT_BQ_PROJECT),
getattr(runtime_options, c._OPTION_OUTPUT_BQ_DATASET),
'prediction_summary_extra_dimension'
),
custom_gcs_temp_location=getattr(runtime_options, c._OPTION_TEMP_GCS_LOCATION),
validate=False,
schema='extra_dimension:STRING'
', average_retention_probability:FLOAT64'
', average_predicted_customer_value:FLOAT64'
', average_predicted_order_value:FLOAT64'
', average_predicted_purchases:FLOAT64'
', total_customer_value:FLOAT64'
', number_of_customers:INT64'
', perc_of_total_customer_value:FLOAT64',
write_disposition=io.BigQueryDisposition.WRITE_TRUNCATE,
create_disposition=io.BigQueryDisposition.CREATE_IF_NEEDED)
)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
|
|
import xlwt
import requests
import ast
import datetime
from openpyxl import Workbook
now = datetime.datetime.now()
"""
This takes a list of PLOS article dois and will produce a excel spreadsheet a monthly breakdown of all articles
The list of articles can be in .txt format separated by linebreaks or csv where the list of articles are in separate cells along the same column.
Requires:
- The excel file creator package xlwt available from https://pypi.python.org/pypi/xlwt
The function report is provided which successfully provided breakdowns in July 2015
"""
class ALM:
def __init__(self, dictionaryWithSourceNamesAsKeys, listOfSourceNames, doi,title,dateParts):
self.dic = dictionaryWithSourceNamesAsKeys
self.sources = listOfSourceNames
self.doi = str(doi)
self.title = str(title)
self.dateParts = dateParts
def getALM(doi,url="http://alm.plos.org/api/events?work_id=http://doi.org/", urlForName = "http://alm.plos.org/api/works/doi:"):
#now add the rest of the url...
BASE_HEADERS = {'Accept': 'application/json',"version":"6"}
parameters= {'api_key':'PTAGXPDWSBH_CJRNJE54'}
urlForName = urlForName + str(doi)
url += str(doi)
if url:
resp = requests.get(url,
params = parameters,
headers=BASE_HEADERS)
if resp.status_code != 200:# check for html errors
return doi + " returns " + str(resp.status_code) + " error"
resp = ast.literal_eval(resp.text) ## this built in function maps the text to dictionarys/lists
#we now want to arrange this into a more searchable database, we can go down a level
events = resp["events"] # we only want the events since the rest of the data is only metadata on the request
# events is a list of dictionaries containing all the data for a source
# the sources names are kept under the "source_id" key in the event's dictionary.. we want this to be searchable by name since the order is not consistent between articles it seems...
dicByName = {}
sources = [] # having a list of the sources so we can iterate easily on it
#check if events is empty
if len(events) == 0:
return doi + " returns empty"
for a in events:
sources.append(a["source_id"])
dicByName[a["source_id"]] = a
#do something similar to get the name
resp = requests.get(urlForName,
params = parameters,
headers=BASE_HEADERS)
if resp.status_code != 200:# check for html errors
return '"' + doi + '"' + " returns " + str(resp.status_code) + " error"
resp = ast.literal_eval(resp.text)
name = resp["work"]["title"]
pubDate = resp["work"]["issued"]["date-parts"][0]
#create an instance of the alm class
asAlm = ALM(dicByName,sources,doi,name,pubDate)
return asAlm
else:
return "invalid doi or url"
def report(articleFile, output, skipErrorALMs = False, dailyNumbers = False):
"""
Takes a file (articleFile) with a list of dois to run. Saves an excel spreadsheet in the containing folder with month by month breakdowns of all the ALMs for that
article from all sources available via the PLOS ALM API v6.0
It also will optionally run a report which provides daily data where available.
Articles which return an error will stop the function; however this is bypassed when skipErrorALMs is True
"""
outputPath = output + "\output.xls"
#articleFile = open("C:\Users\Hugh\Desktop\New OpenDocument Spreadsheet.csv")
#articleList = articleFile.readlines()
try:
articleFile = open(articleFile)
articleList = articleFile.readlines()
except:
articleList = [articleFile]
if articleFile == "":
raise ValueError
#cleanup the article list if they have been separated bt commas
tempList = []
for a in articleList:
a = a.rstrip(",\n")
tempList.append(a)
articleList = tempList
almMasterList = [] #we want a master list to keep all the alms
#collect all the alms!
for a in tempList:
tempALM = getALM(a)
if type(tempALM) == str:
if skipErrorALMs == False:
return tempALM
else:
almMasterList.append(tempALM)
print len(almMasterList)
#print almMasterList[0].title
#print almMasterList[0].dic["pmceuropedata"]["timestamp"]
#print str((int(almMasterList[0].dic["pmceuropedata"]["timestamp"][0:4]),int(almMasterList[0].dic["pmceuropedata"]["timestamp"][5:7])))
#find the oldest to make the spreadsheet from by counter
oldestLenght = 0
oldest = False
for b in almMasterList:
#print b.sources
if len(b.dic["counter"]["by_month"]) > oldestLenght:
oldestLenght = len(b.dic["counter"]["by_month"])
oldest = b
if oldest == False:
return("No ALM found to build report")
#Now we have the oldest we want to make the xls
book = xlwt.Workbook(encoding="utf-8")
#set up all the sheets with article titles and dois named by source
sheetDict = {}
dateColumnIndexDic = {} # in the form {"sourceName": [(month,year)...]...}
for sor in oldest.sources:
sheet=book.add_sheet(sor)
sheet.write(0, 0, "DOI")
sheet.write(0, 1,"Title")
sheetDict[sor] = sheet
#while we're here lets write all the dates on every page and keep a list of the dates for each source
months = oldest.dic[sor]["by_month"]
listOfMonths = []#in the form [(month,year)...]
columnForMonth = 2
dateIndex = 0
month = 0
year = 0
for date in months:
if date == months[0]:
sheet.write(0,columnForMonth, (str(months[dateIndex]["month"]) + "-" +str(months[dateIndex]["year"])))
listOfMonths.append((months[dateIndex]["month"],months[dateIndex]["year"]))
month = months[dateIndex]["month"]
year = months[dateIndex]["year"]
else:
if(month!=12):
month +=1
else:
month = 1
year +=1
sheet.write(0,columnForMonth, str(month)+"-"+str(year))
listOfMonths.append((month,year))
columnForMonth += 1
dateIndex +=1
try:
while listOfMonths[-1][1] < int(oldest.dic[sor]["timestamp"][0:4]) or listOfMonths[-1][0] < int(almMasterList[0].dic[sor]["timestamp"][5:7]):#keep adding months if we're not up to date
#print listOfMonths[-1][1] < int(oldest.dic[sor]["timestamp"][0:4])
if(month!=12):
month +=1
else:
month = 1
year +=1
sheet.write(0,columnForMonth, str(month)+"-"+str(year))
listOfMonths.append((month,year))
columnForMonth += 1
dateIndex +=1
except:
a=a
# print sor
dateColumnIndexDic[sor] = listOfMonths
#now lets run through every manuscript and write their data
workingRow = 1
for met in almMasterList:
for s in met.sources:
try:
workingSheet = sheetDict[s]
except: #if the source wasn't in the oldest we can just make it
workingSheet = book.add_sheet(s)
workingSheet.write(0, 0, "DOI")
workingSheet.write(0, 1,"Title")
workingSheet.write(workingRow,0, met.doi)
workingSheet.write(workingRow,1, met.title)
#columnInitializer = 2 # reset the columns
columnIndex = False
#dateList.index(metric.sources['counter'].by_month[0][0]) + columnForMonth
for data in met.dic[s]["by_month"]:
try:
columnIndex = dateColumnIndexDic[s].index((data["month"],data["year"])) +2
workingSheet.write(workingRow, columnIndex, data["total"])
except:
# need to add missing dates
try:#what if it's empty?
if len(dateColumnIndexDic.keys()) == 0:
columnIndex = 2
workingSheet.write(workingRow, columnIndex, data["total"])
workingSheet.write(0,columnIndex, str(data["month"])+"-"+str(data["year"]))
except:
try:#what if it's missing a more recent date?
columnIndex = dateColumnIndexDic[s].index((data["month"]-1,data["year"])) +3
workingSheet.write(workingRow, columnIndex, data["total"])
workingSheet.write(0,columnIndex, str(data["month"])+"-"+str(data["year"]))
except:
return "Unable to write ALM at: " + str(met.doi) + "_" + str(s) + "_" + str(data["month"]-1)+"-"+str(data["year"])
workingRow+=1#move to the next row for a new article
try:
book.save(outputPath) # the path to save the output
except:
return "Can't access output file"
if dailyNumbers == True:
dailyReport(almMasterList, output, oldest)
def dailyReport (almList, outputPath, oldest):
outputPath += "\dailyOutput.xls"
#set up sheet
dailyBook = Workbook()
sourceDatesDic = {} # keep a dictionary to find the column indexes for sources
removalDic = {}
#get the days for counter and do it for all sources
startYear = oldest.dateParts[0]
startMonth = oldest.dateParts[1]
startDay = 1
#check if any of the sources are missing from the oldest sources, if so add them for this purpose
tempSources = oldest.sources
for t in almList:
for k in t.sources:
if k not in tempSources:
print k
tempSources.append(k)
for sor in tempSources:
year = startYear
month = startMonth
day = startDay
sheet=dailyBook.create_sheet()
sheet.title = sor
values = ["DOI","Title", "Pub Date","Year","Month","Day"]
cells = ["A4","B4", "C4","C1","c2","c3"]
removalDic[sor] = 0
for c in cells:
workingCell = sheet.cell(c)
workingCell.value = values[cells.index(c)]
workingColumn = 4
datesDic = {} # keep a dictionary of the date indexes for this source with the key [year, month, day]
while year <= now.year:
while month <= 12:
while day <= 31:
toWrite = [year,month,day]
rows = [1,2,3]
for d in rows:
cell = sheet.cell(row=d,column = workingColumn)
cell.value = toWrite[rows.index(d)]
datesDic[str([year,month,day])] = workingColumn
workingColumn +=1
if (month == 2 and day == 28) or (day == 30 and (month == 11 or month ==4 or month ==6 or month ==9)):
#print str([year,month,day])
break
day += 1
month+=1
day = 1
month = 1
year+=1
sourceDatesDic[sor] = datesDic
workingRow = 5
for met in almList:
for s in met.sources:
workingSheet = dailyBook.get_sheet_by_name(s)
columns = [1,2,3]
content = [met.doi, met.title, str(met.dateParts)]
for w in columns:
cell = workingSheet.cell(row=workingRow,column=w)
cell.value = content[columns.index(w)]
if len(met.dic[s]["by_day"]) != 0:
for data in met.dic[s]["by_day"]:
try:
columnIndex = sourceDatesDic[s][str([data["year"],data["month"],data["day"]])]
cell = workingSheet.cell(row=workingRow, column = columnIndex)
if s == "counter":
cell.value = data["pdf"]+data["html"]
else:
cell.value= data["total"]
except:
print str([data["year"],data["month"],data["day"]])
else:
removalDic[s] += 1
workingRow+=1#move to the next row for a new article
try:
print outputPath
#remove sources with no daily data
for q in tempSources:
if removalDic[q] == len(almList):
dailyBook.remove_sheet(dailyBook.get_sheet_by_name(q))
dailyBook.remove_sheet(dailyBook.get_sheet_by_name("Sheet"))#remove the default sheet
dailyBook.save(outputPath)
except:
return "Can't access daily output file"
|
|
'''Thread-safe version of Tkinter.
Copyright (c) 2009, Allen B. Taylor
This module is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser Public License for more details.
You should have received a copy of the GNU Lesser Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Usage:
import mtTkinter as Tkinter
# Use "Tkinter." as usual.
or
from mtTkinter import *
# Use Tkinter module definitions as usual.
This module modifies the original Tkinter module in memory, making all
functionality thread-safe. It does this by wrapping the Tk class' tk
instance with an object that diverts calls through an event queue when
the call is issued from a thread other than the thread in which the Tk
instance was created. The events are processed in the creation thread
via an 'after' event.
The modified Tk class accepts two additional keyword parameters on its
__init__ method:
mtDebug:
0 = No debug output (default)
1 = Minimal debug output
...
9 = Full debug output
mtCheckPeriod:
Amount of time in milliseconds (default 100) between checks for
out-of-thread events when things are otherwise idle. Decreasing
this value can improve GUI responsiveness, but at the expense of
consuming more CPU cycles.
Note that, because it modifies the original Tkinter module (in memory),
other modules that use Tkinter (e.g., Pmw) reap the benefits automagically
as long as mtTkinter is imported at some point before extra threads are
created.
Author: Allen B. Taylor, a.b.taylor@gmail.com
'''
from Tkinter import *
import threading
import Queue
class _Tk(object):
"""
Wrapper for underlying attribute tk of class Tk.
"""
def __init__(self, tk, mtDebug = 0, mtCheckPeriod = 10):
self._tk = tk
# Create the incoming event queue.
self._eventQueue = Queue.Queue(1)
# Identify the thread from which this object is being created so we can
# tell later whether an event is coming from another thread.
self._creationThread = threading.currentThread()
# Store remaining values.
self._debug = mtDebug
self._checkPeriod = mtCheckPeriod
def __getattr__(self, name):
# Divert attribute accesses to a wrapper around the underlying tk
# object.
return _TkAttr(self, getattr(self._tk, name))
class _TkAttr(object):
"""
Thread-safe callable attribute wrapper.
"""
def __init__(self, tk, attr):
self._tk = tk
self._attr = attr
def __call__(self, *args, **kwargs):
"""
Thread-safe method invocation.
Diverts out-of-thread calls through the event queue.
Forwards all other method calls to the underlying tk object directly.
"""
# Check if we're in the creation thread.
if threading.currentThread() == self._tk._creationThread:
# We're in the creation thread; just call the event directly.
if self._tk._debug >= 8 or \
self._tk._debug >= 3 and self._attr.__name__ == 'call' and \
len(args) >= 1 and args[0] == 'after':
print 'Calling event directly:', \
self._attr.__name__, args, kwargs
return self._attr(*args, **kwargs)
else:
# We're in a different thread than the creation thread; enqueue
# the event, and then wait for the response.
responseQueue = Queue.Queue(1)
if self._tk._debug >= 1:
print 'Marshalling event:', self._attr.__name__, args, kwargs
self._tk._eventQueue.put((self._attr, args, kwargs, responseQueue))
isException, response = responseQueue.get()
# Handle the response, whether it's a normal return value or
# an exception.
if isException:
exType, exValue, exTb = response
raise exType, exValue, exTb
else:
return response
# Define a hook for class Tk's __init__ method.
def _Tk__init__(self, *args, **kwargs):
# We support some new keyword arguments that the original __init__ method
# doesn't expect, so separate those out before doing anything else.
new_kwnames = ('mtCheckPeriod', 'mtDebug')
new_kwargs = {}
for name, value in kwargs.items():
if name in new_kwnames:
new_kwargs[name] = value
del kwargs[name]
# Call the original __init__ method, creating the internal tk member.
self.__original__init__mtTkinter(*args, **kwargs)
# Replace the internal tk member with a wrapper that handles calls from
# other threads.
self.tk = _Tk(self.tk, **new_kwargs)
# Set up the first event to check for out-of-thread events.
self.after_idle(_CheckEvents, self)
# Replace Tk's original __init__ with the hook.
Tk.__original__init__mtTkinter = Tk.__init__
Tk.__init__ = _Tk__init__
def _CheckEvents(tk):
"Event checker event."
used = False
try:
# Process all enqueued events, then exit.
while True:
try:
# Get an event request from the queue.
method, args, kwargs, responseQueue = \
tk.tk._eventQueue.get_nowait()
except:
# No more events to process.
break
else:
# Call the event with the given arguments, and then return
# the result back to the caller via the response queue.
used = True
if tk.tk._debug >= 2:
print 'Calling event from main thread:', \
method.__name__, args, kwargs
try:
responseQueue.put((False, method(*args, **kwargs)))
except SystemExit, ex:
raise SystemExit, ex
except Exception, ex:
# Calling the event caused an exception; return the
# exception back to the caller so that it can be raised
# in the caller's thread.
from sys import exc_info
exType, exValue, exTb = exc_info()
responseQueue.put((True, (exType, exValue, exTb)))
finally:
# Schedule to check again. If we just processed an event, check
# immediately; if we didn't, check later.
if used:
tk.after_idle(_CheckEvents, tk)
else:
tk.after(tk.tk._checkPeriod, _CheckEvents, tk)
# Test thread entry point.
def _testThread(root):
text = "This is Tcl/Tk version %s" % TclVersion
if TclVersion >= 8.1:
try:
text = text + unicode("\nThis should be a cedilla: \347",
"iso-8859-1")
except NameError:
pass # no unicode support
try:
if root.globalgetvar('tcl_platform(threaded)'):
text = text + "\nTcl is built with thread support"
else:
raise RuntimeError
except:
text = text + "\nTcl is NOT built with thread support"
text = text + "\nmtTkinter works with or without Tcl thread support"
label = Label(root, text=text)
label.pack()
button = Button(root, text="Click me!",
command=lambda root=root: root.button.configure(
text="[%s]" % root.button['text']))
button.pack()
root.button = button
quit = Button(root, text="QUIT", command=root.destroy)
quit.pack()
# The following three commands are needed so the window pops
# up on top on Windows...
root.iconify()
root.update()
root.deiconify()
# Simulate button presses...
button.invoke()
root.after(1000, _pressOk, root, button)
# Test button continuous press event.
def _pressOk(root, button):
button.invoke()
try:
root.after(1000, _pressOk, root, button)
except:
pass # Likely we're exiting
# Test. Mostly borrowed from the Tkinter module, but the important bits moved
# into a separate thread.
if __name__ == '__main__':
import threading
root = Tk(mtDebug = 1)
thread = threading.Thread(target = _testThread, args=(root,))
thread.start()
root.mainloop()
thread.join()
|
|
# Copyright 2017 Brandon T. Gorman
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# BUILT USING PYTHON 3.6.0
import ctypes as ct
import pandas as pd
import numpy as np
import random, csv, sys, os
import math
import classes_water as ENC
def main(water_df):
os_username = os.getlogin()
# --------------
# READ CSV FILES
# --------------
csv_curve = pd.read_csv('./data_water/network-water/2000curve.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_junction = pd.read_csv('./data_water/network-water/2100junction.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_reservoir = pd.read_csv('./data_water/network-water/2101reservoir.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_tank = pd.read_csv('./data_water/network-water/2102tank.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_pipe = pd.read_csv('./data_water/network-water/2200pipe.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_pump = pd.read_csv('./data_water/network-water/2201pump.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_valve = pd.read_csv('./data_water/network-water/2202valve.csv', sep=',', header=1, index_col=None, dtype=np.float64)
# -----------------
# CREATE COMPONENTS
# -----------------
object_curve = ENC.Curve(csv_curve)
object_junction = ENC.Junction(csv_junction)
object_reservoir = ENC.Reservoir(csv_reservoir)
object_tank = ENC.Tank(csv_tank)
object_pipe = ENC.Pipe(csv_pipe)
object_pump = ENC.Pump(csv_pump)
object_valve = ENC.Valve(csv_valve)
# -----------------------
# ADD COMPONENTS TO LISTS
# -----------------------
w_object_list = [object_junction, object_reservoir, object_tank, # Water NODES
object_pipe, object_pump, object_valve, # Water LINKS
object_curve] # Water SYSTEM OPS
interconn_dict = {}
# ---------
# RUN EPANET and OPENDSS
# ---------
def run_EPANET():
filedir = 'data_water/en-inputs/en-input.inp'
with open(filedir, 'w', newline='\n') as csvfile:
writer = csv.writer(csvfile, delimiter=' ')
templist = ['[TITLE]']
writer.writerow(templist)
writer.writerow('')
for water_object in w_object_list:
water_object.createAllEN(writer, interconn_dict)
templist = ['[ENERGY]']
writer.writerow(templist)
templist = ['Global', 'Efficiency', 75]
writer.writerow(templist)
templist = ['Global', 'Price', 0]
writer.writerow(templist)
templist = ['Demand', 'Charge', 0]
writer.writerow(templist)
writer.writerow('')
templist = ['[REACTIONS]']
writer.writerow(templist)
templist = ['Order', 'Bulk', 1]
writer.writerow(templist)
templist = ['Order', 'Tank', 1]
writer.writerow(templist)
templist = ['Order', 'Wall', 1]
writer.writerow(templist)
templist = ['Global', 'Bulk', 0]
writer.writerow(templist)
templist = ['Global', 'Wall', 0]
writer.writerow(templist)
templist = ['Limiting', 'Potential', 0]
writer.writerow(templist)
templist = ['Roughness', 'Correlation', 0]
writer.writerow(templist)
writer.writerow('')
templist = ['[TIMES]']
writer.writerow(templist)
templist = ['Duration', '00:00']
writer.writerow(templist)
templist = ['Hydraulic', 'Timestep', '0:01:00']
writer.writerow(templist)
templist = ['Quality', 'Timestep', '0:06']
writer.writerow(templist)
templist = ['Pattern', 'Timestep', '1:00']
writer.writerow(templist)
templist = ['Pattern', 'Start', '0:00']
writer.writerow(templist)
templist = ['Report', 'Timestep', '1:00']
writer.writerow(templist)
templist = ['Report', 'Start', '0:00']
writer.writerow(templist)
templist = ['Start', 'ClockTime', 12, 'am']
writer.writerow(templist)
templist = ['Statistic', 'NONE']
writer.writerow(templist)
writer.writerow('')
templist = ['[REPORT]']
writer.writerow(templist)
templist = ['Status', 'No']
writer.writerow(templist)
templist = ['Summary', 'No']
writer.writerow(templist)
templist = ['Page', 0]
writer.writerow(templist)
writer.writerow('')
templist = ['[OPTIONS]']
writer.writerow(templist)
templist = ['Units', 'GPM'] #GPM is US Customary units
writer.writerow(templist)
templist = ['Headloss', 'C-M']
writer.writerow(templist)
templist = ['Specific', 'Gravity', 1]
writer.writerow(templist)
templist = ['Viscosity', 1]
writer.writerow(templist)
templist = ['Trials', 40]
writer.writerow(templist)
templist = ['Accuracy', 0.001]
writer.writerow(templist)
templist = ['CHECKFREQ', 2]
writer.writerow(templist)
templist = ['MAXCHECK', 10]
writer.writerow(templist)
templist = ['DAMPLIMIT', 0]
writer.writerow(templist)
templist = ['Unbalanced', 'Continue', 10]
writer.writerow(templist)
templist = ['Pattern', 1]
writer.writerow(templist)
templist = ['Demand', 'Multiplier', 1.0]
writer.writerow(templist)
templist = ['Emitter', 'Exponent', 0.5]
writer.writerow(templist)
templist = ['Quality', 'None', 'mg/L']
writer.writerow(templist)
templist = ['Diffusivity', 1]
writer.writerow(templist)
templist = ['Tolerance', 0.01]
writer.writerow(templist)
writer.writerow('')
templist=['[END]']
writer.writerow(templist)
epalib = ct.cdll.LoadLibrary('data_water/epanet2mingw64.dll')
# Byte objects
en_input_file = ct.c_char_p(filedir.encode('utf-8'))
en_report_file = ct.c_char_p(str('data_water/en-outputs/out.rpt').encode('utf-8'))
en_byte_file = ct.c_char_p(''.encode('utf-8'))
# Send strings as char* to the epalib function
errorcode = epalib.ENopen(en_input_file, en_report_file, en_byte_file)
if errorcode != 0:
print(1, 'ERRORCODE is', errorcode)
errorcode = epalib.ENopenH()
if errorcode != 0:
print(2, 'ERRORCODE is', errorcode)
init_flag = ct.c_int(0)
errorcode = epalib.ENinitH(init_flag)
if errorcode != 0:
print(3, 'ERRORCODE is', errorcode)
time = ct.pointer(ct.c_long(1))
timestep = ct.pointer(ct.c_long(1))
while True:
errorcode = epalib.ENrunH(time)
if errorcode != 0:
pass
# print(4, 'ERRORCODE is', errorcode)
errorcode = epalib.ENnextH(timestep)
if errorcode != 0:
print(5, 'ERRORCODE is', errorcode)
if timestep.contents.value == 0:
break
for water_object in w_object_list:
water_object.readAllENoutputs(epalib)
errorcode = epalib.ENcloseH()
if errorcode != 0:
print(6, 'ERRORCODE is', errorcode)
errorcode = epalib.ENclose()
if errorcode != 0:
print(7, 'ERRORCODE is', errorcode)
input_list_continuous = []
input_list_categorical = []
input_tensor_continuous = np.empty([0,0], dtype=np.float64).flatten()
input_tensor_categorical = np.empty([0,0], dtype=np.float64).flatten()
for object in w_object_list:
list_continuous, list_categorical, tensor_continuous, tensor_categorical = object.convertToInputTensor()
input_list_continuous = input_list_continuous + list_continuous
input_list_categorical = input_list_categorical + list_categorical
input_tensor_continuous = np.concatenate((input_tensor_continuous, tensor_continuous), axis=0)
input_tensor_categorical = np.concatenate((input_tensor_categorical, tensor_categorical), axis=0)
output_list = []
output_tensor = np.empty([0,0], dtype=np.float64).flatten()
for object in w_object_list:
o_list, o_tensor = object.convertToOutputTensor()
output_list = output_list + o_list
output_tensor = np.concatenate((output_tensor, o_tensor), axis=0)
return input_list_continuous, input_list_categorical, output_list, input_tensor_continuous, input_tensor_categorical, output_tensor
# SIM STEP 1: SET LOAD AND DEMAND CURVES
# ------------------------------
# SIM STEP 2: SET LOAD INTERCONNECTIONS
# ----------------------------------
# SIM STEP 3: SET GENERATOR DISPATCH
# ----------------------------------
# SIM STEP 4: SET JUNCTION INTERCONNECTIONS
# -----------------------------------------
# SIM STEP 5:
# Set water tank levels assuming that water tank levels start at 0
# Set water valve flow control
# ----------------------------
# SIM STEP 6: RUN POWER-WATER SIMULATION
# --------------------------------------
base_curve_matrix = np.array(object_curve.matrix, copy=True)
base_junction_matrix = np.array(object_junction.matrix, copy=True)
base_reservoir_matrix = np.array(object_reservoir.matrix, copy=True)
base_tank_matrix = np.array(object_tank.matrix, copy=True)
base_pipe_matrix = np.array(object_pipe.matrix, copy=True)
base_pump_matrix = np.array(object_pump.matrix, copy=True)
base_valve_matrix = np.array(object_valve.matrix, copy=True)
# Begin failure analysis loop
for pipe_fail_id in [pid for pid in object_pipe.matrix[:, ENC.Pipe.ID] if 0.0 <= pid < 1000.0]:
# Reset objects
object_curve.matrix = np.array(base_curve_matrix, copy=True)
object_junction.matrix = np.array(base_junction_matrix, copy=True)
object_reservoir.matrix = np.array(base_reservoir_matrix, copy=True)
object_tank.matrix = np.array(base_tank_matrix, copy=True)
object_pipe.matrix = np.array(base_pipe_matrix, copy=True)
object_pump.matrix = np.array(base_pump_matrix, copy=True)
object_valve.matrix = np.array(base_valve_matrix, copy=True)
for pipe in object_pipe.matrix:
if pipe[ENC.Pipe.ID] == pipe_fail_id:
pipe[ENC.Pipe.OPERATIONAL_STATUS] = 0.0
print('Failing Pipe ID {}'.format(pipe_fail_id))
artificial_reservoir_id_shift = 1000.0
max_groundwater_flow = 12399.0 # GPM
groundwater_id_shift = 2000.0
# Scale reservoir heads using water_df
for reservoir in object_reservoir.matrix:
if reservoir[ENC.Reservoir.ID] == 21.0:
reservoir[ENC.Reservoir.TOTAL_HEAD] = max(reservoir[ENC.Reservoir.TOTAL_HEAD], 864.92*water_df + 817.08)
elif reservoir[ENC.Reservoir.ID] == 22.0:
reservoir[ENC.Reservoir.TOTAL_HEAD] = max(reservoir[ENC.Reservoir.TOTAL_HEAD], 951.11*water_df + 898.89)
elif reservoir[ENC.Reservoir.ID] == 23.0:
reservoir[ENC.Reservoir.TOTAL_HEAD] = max(reservoir[ENC.Reservoir.TOTAL_HEAD], 668.35*water_df + 631.65)
groundwater_list = []
map_to_groundwater_reservoir = {}
map_to_groundwater_pipe = {}
# Track real reservoirs
for reservoir in object_reservoir.matrix:
if reservoir[ENC.Reservoir.ID] >= 3000.0:
groundwater_list.append(reservoir[ENC.Reservoir.ID])
map_to_groundwater_reservoir[reservoir[ENC.Reservoir.ID]] = reservoir
for pipe in object_pipe.matrix:
if reservoir[ENC.Reservoir.ID] == pipe[ENC.Pipe.ID]:
map_to_groundwater_pipe[reservoir[ENC.Reservoir.ID]] = pipe
# WARNING THIS IS HARDCODED
elif reservoir[ENC.Reservoir.ID] == 23.0:
groundwater_list.append(reservoir[ENC.Reservoir.ID])
map_to_groundwater_reservoir[reservoir[ENC.Reservoir.ID]] = reservoir
for pipe in object_pipe.matrix:
# WARNING THIS IS HARDCODED
if pipe[ENC.Pipe.ID] == 36.0:
map_to_groundwater_pipe[reservoir[ENC.Reservoir.ID]] = pipe
# Loop real reservoirs, turn off the ones with water inflow
while len(groundwater_list) > 0:
# initialize relevant demand junctions
demand_list = []
map_to_junction = {}
map_to_junction_groundwater = {}
map_to_reservoir = {}
map_to_pipe = {}
# Track demand junctions
for junction in object_junction.matrix:
if junction[ENC.Junction.BASE_DEMAND_AVERAGE] > 0.0:
demand_list.append(junction[ENC.Junction.ID])
for junction in object_junction.matrix:
if junction[ENC.Junction.ID] in demand_list:
map_to_junction[junction[ENC.Junction.ID]] = junction
found_junction_groundwater = 0
for junction_groundwater in object_junction.matrix:
if junction[ENC.Junction.ID] + groundwater_id_shift == junction_groundwater[ENC.Junction.ID]:
map_to_junction_groundwater[junction[ENC.Junction.ID]] = junction_groundwater
found_junction_groundwater = 1
if found_junction_groundwater == 0:
map_to_junction_groundwater[junction[ENC.Junction.ID]] = junction
# Track artificial reservoirs
for reservoir in object_reservoir.matrix:
if reservoir[ENC.Reservoir.ID]-artificial_reservoir_id_shift in demand_list:
map_to_reservoir[reservoir[ENC.Reservoir.ID]-artificial_reservoir_id_shift] = reservoir
# Track pipes for artifical reservoirs
for pipe in object_pipe.matrix:
if pipe[ENC.Pipe.ID]-artificial_reservoir_id_shift in demand_list:
map_to_pipe[pipe[ENC.Pipe.ID]-artificial_reservoir_id_shift] = pipe
# Reset demand junction demands to 0
for junction_id in demand_list:
map_to_junction[junction_id][ENC.Junction.BASE_DEMAND] = 0.0
# Begin EPANET pressure-driven analysis for artificial reservoirs
while len(demand_list) > 0:
# Close artifical reservoirs pipes
for junction_id in demand_list:
map_to_pipe[junction_id][ENC.Pipe.OPERATIONAL_STATUS] = 0.0
# Set valve pressure loss to 0
for valve in object_valve.matrix:
valve[ENC.Valve.MODEL] = 2.0
valve[ENC.Valve.SETTING] = 0.0
run_EPANET()
# Open demand junctions with positive pressure ratio
# Can take multiple iterations
pos_pres_bool = True
while pos_pres_bool:
pos_pres_bool = False
max_pres_id = demand_list[0]
for junction_id in demand_list:
if max(map_to_junction[junction_id][ENC.Junction.PRESSURE], map_to_junction_groundwater[junction_id][ENC.Junction.PRESSURE]) > map_to_junction[max_pres_id][ENC.Junction.MIN_PRESSURE] and map_to_pipe[junction_id][ENC.Pipe.OPERATIONAL_STATUS] == 0.0:
max_pres_id = junction_id
# this uses the MINIMUM ALLOWABLE PRESSURE
if max(map_to_junction[max_pres_id][ENC.Junction.PRESSURE], map_to_junction_groundwater[max_pres_id][ENC.Junction.PRESSURE]) > (map_to_junction[max_pres_id][ENC.Junction.MIN_PRESSURE] - 0.01) and map_to_pipe[max_pres_id][ENC.Pipe.OPERATIONAL_STATUS] == 0.0:
map_to_pipe[max_pres_id][ENC.Pipe.OPERATIONAL_STATUS] = 1.0
pos_pres_bool = True
run_EPANET()
# Set flow control valves to maximum amount of groundwater flow
for valve in object_valve.matrix:
valve[ENC.Valve.MODEL] = 3.0
valve[ENC.Valve.SETTING] = max_groundwater_flow
run_EPANET()
# Close artifical reservoirs with inflows
# Can take multiple iterations
neg_dem_bool = True
while neg_dem_bool:
neg_dem_bool = False
for junction_id in demand_list:
if map_to_reservoir[junction_id][ENC.Reservoir.DEMAND] < 0.0:
map_to_pipe[junction_id][ENC.Pipe.OPERATIONAL_STATUS] = 0.0
neg_dem_bool = True
run_EPANET()
run_EPANET()
# Set base_demand to maximum if possible
pda_count = 0
demand_list_copy = demand_list.copy()
for junction_id in demand_list_copy:
if map_to_reservoir[junction_id][ENC.Reservoir.DEMAND] >= water_df * map_to_junction[junction_id][ENC.Junction.BASE_DEMAND_AVERAGE]:
map_to_junction[junction_id][ENC.Junction.BASE_DEMAND] = water_df * map_to_junction[junction_id][ENC.Junction.BASE_DEMAND_AVERAGE]
map_to_pipe[junction_id][ENC.Pipe.OPERATIONAL_STATUS] = 0.0
demand_list.remove(junction_id)
pda_count += 1
# Set base_demand to greater than 0 and less than maximum if there are no maximums
if pda_count == 0:
for junction_id in demand_list_copy:
if map_to_reservoir[junction_id][ENC.Reservoir.DEMAND] >= -0.01:
map_to_junction[junction_id][ENC.Junction.BASE_DEMAND] = map_to_reservoir[junction_id][ENC.Reservoir.DEMAND]
map_to_pipe[junction_id][ENC.Pipe.OPERATIONAL_STATUS] = 0.0
demand_list.remove(junction_id)
# End inner loop
run_EPANET()
# Close real reservoirs with inflows if possible
pda_count = 0
groundwater_list_copy = groundwater_list.copy()
for groundwater_id in groundwater_list_copy:
if map_to_groundwater_reservoir[groundwater_id][ENC.Reservoir.DEMAND] > 0.0:
map_to_groundwater_pipe[groundwater_id][ENC.Pipe.OPERATIONAL_STATUS] = 0.0
groundwater_list.remove(groundwater_id)
pda_count += 1
# Delete real reservoirs from being tracked if no real reservoirs have inflows
if pda_count == 0:
for groundwater_id in groundwater_list_copy:
groundwater_list.remove(groundwater_id)
# End middle loop
input_list_continuous1, input_list_categorical1, output_list1, input_tensor_continuous1, input_tensor_categorical1, output_tensor1 = run_EPANET()
# RESULTS STEP 0: Print
# ---------------------
system_deficit = 0.0
j_1_deficit = 0.0
j_2_deficit = 0.0
j_3_deficit = 0.0
j_5_deficit = 0.0
j_6_deficit = 0.0
j_7_deficit = 0.0
j_8_deficit = 0.0
j_9_deficit = 0.0
j_10_deficit = 0.0
j_13_deficit = 0.0
j_14_deficit = 0.0
j_15_deficit = 0.0
j_16_deficit = 0.0
j_18_deficit = 0.0
j_19_deficit = 0.0
j_28_deficit = 0.0
j_33_deficit = 0.0 # j33 only has water consumption in the interconnected power-water model
for row in object_junction.matrix:
if row[ENC.Junction.BASE_DEMAND_AVERAGE] > 0.0:
pass
if row[ENC.Junction.ID] == 1.0:
j_1_deficit = max(0.0, water_df * row[ENC.Junction.BASE_DEMAND_AVERAGE] - row[ENC.Junction.DEMAND])
elif row[ENC.Junction.ID] == 2.0:
j_2_deficit = max(0.0, water_df * row[ENC.Junction.BASE_DEMAND_AVERAGE] - row[ENC.Junction.DEMAND])
elif row[ENC.Junction.ID] == 3.0:
j_3_deficit = max(0.0, water_df * row[ENC.Junction.BASE_DEMAND_AVERAGE] - row[ENC.Junction.DEMAND])
elif row[ENC.Junction.ID] == 5.0:
j_5_deficit = max(0.0, water_df * row[ENC.Junction.BASE_DEMAND_AVERAGE] - row[ENC.Junction.DEMAND])
elif row[ENC.Junction.ID] == 6.0:
j_6_deficit = max(0.0, water_df * row[ENC.Junction.BASE_DEMAND_AVERAGE] - row[ENC.Junction.DEMAND])
elif row[ENC.Junction.ID] == 7.0:
j_7_deficit = max(0.0, water_df * row[ENC.Junction.BASE_DEMAND_AVERAGE] - row[ENC.Junction.DEMAND])
elif row[ENC.Junction.ID] == 8.0:
j_8_deficit = max(0.0, water_df * row[ENC.Junction.BASE_DEMAND_AVERAGE] - row[ENC.Junction.DEMAND])
elif row[ENC.Junction.ID] == 9.0:
j_9_deficit = max(0.0, water_df * row[ENC.Junction.BASE_DEMAND_AVERAGE] - row[ENC.Junction.DEMAND])
elif row[ENC.Junction.ID] == 10.0:
j_10_deficit = max(0.0, water_df * row[ENC.Junction.BASE_DEMAND_AVERAGE] - row[ENC.Junction.DEMAND])
elif row[ENC.Junction.ID] == 13.0:
j_13_deficit = max(0.0, water_df * row[ENC.Junction.BASE_DEMAND_AVERAGE] - row[ENC.Junction.DEMAND])
elif row[ENC.Junction.ID] == 14.0:
j_14_deficit = max(0.0, water_df * row[ENC.Junction.BASE_DEMAND_AVERAGE] - row[ENC.Junction.DEMAND])
elif row[ENC.Junction.ID] == 15.0:
j_15_deficit = max(0.0, water_df * row[ENC.Junction.BASE_DEMAND_AVERAGE] - row[ENC.Junction.DEMAND])
elif row[ENC.Junction.ID] == 16.0:
j_16_deficit = max(0.0, water_df * row[ENC.Junction.BASE_DEMAND_AVERAGE] - row[ENC.Junction.DEMAND])
elif row[ENC.Junction.ID] == 18.0:
j_18_deficit = max(0.0, water_df * row[ENC.Junction.BASE_DEMAND_AVERAGE] - row[ENC.Junction.DEMAND])
elif row[ENC.Junction.ID] == 19.0:
j_19_deficit = max(0.0, water_df * row[ENC.Junction.BASE_DEMAND_AVERAGE] - row[ENC.Junction.DEMAND])
elif row[ENC.Junction.ID] == 28.0:
j_28_deficit = max(0.0, water_df * row[ENC.Junction.BASE_DEMAND_AVERAGE] - row[ENC.Junction.DEMAND])
system_deficit += j_1_deficit
system_deficit += j_2_deficit
system_deficit += j_3_deficit
system_deficit += j_5_deficit
system_deficit += j_6_deficit
system_deficit += j_7_deficit
system_deficit += j_8_deficit
system_deficit += j_9_deficit
system_deficit += j_10_deficit
system_deficit += j_13_deficit
system_deficit += j_14_deficit
system_deficit += j_15_deficit
system_deficit += j_16_deficit
system_deficit += j_18_deficit
system_deficit += j_19_deficit
system_deficit += j_28_deficit
# print('syst {:.2f}'.format(system_deficit))
# print('j_1_ {:.2f}'.format(j_1_deficit))
# print('j_2_ {:.2f}'.format(j_2_deficit))
# print('j_3_ {:.2f}'.format(j_3_deficit))
# print('j_5_ {:.2f}'.format(j_5_deficit))
# print('j_6_ {:.2f}'.format(j_6_deficit))
# print('j_7_ {:.2f}'.format(j_7_deficit))
# print('j_8_ {:.2f}'.format(j_8_deficit))
# print('j_9_ {:.2f}'.format(j_9_deficit))
# print('j_10 {:.2f}'.format(j_10_deficit))
# print('j_13 {:.2f}'.format(j_13_deficit))
# print('j_14 {:.2f}'.format(j_14_deficit))
# print('j_15 {:.2f}'.format(j_15_deficit))
# print('j_16 {:.2f}'.format(j_16_deficit))
# print('j_18 {:.2f}'.format(j_18_deficit))
# print('j_19 {:.2f}'.format(j_19_deficit))
# print('j_28 {:.2f}'.format(j_28_deficit))
with open('model_outputs/analysis_water_failure/water_failure_analysis_pipe_{}.csv'.format(int(pipe_fail_id)), 'a', newline='') as file:
writer = csv.writer(file)
writer.writerow([water_df, system_deficit, j_1_deficit, j_2_deficit, j_3_deficit, j_5_deficit, j_6_deficit, j_7_deficit, j_8_deficit, j_9_deficit, j_10_deficit, j_13_deficit, j_14_deficit, j_15_deficit, j_16_deficit, j_18_deficit, j_19_deficit, j_28_deficit])
# End outer loop
# RESULTS STEP 1: FORMAT INPUT/OUTPUT TENSORS
# -------------------------------------------
# input_list_continuous = input_list_continuous1
# input_list_categorical = input_list_categorical1
# output_list = output_list1
# input_tensor_continuous = input_tensor_continuous1
# input_tensor_categorical = input_tensor_categorical1
# output_tensor = output_tensor1
# RESULTS STEP 2: WRITE INPUT/OUTPUT TENSORS TO FILE
# --------------------------------------------------
# with open('tensor_outputs/input_list_continuous_columns.csv', 'w') as f:
# writer = csv.writer(f, delimiter=',')
# writer.writerow(input_list_continuous)
# with open('tensor_outputs/input_list_categorical_columns.csv', 'w') as f:
# writer = csv.writer(f, delimiter=',')
# writer.writerow(input_list_categorical)
# with open('tensor_outputs/output_list_columns.csv', 'w') as f:
# writer = csv.writer(f, delimiter=',')
# writer.writerow(output_list)
# with open('tensor_outputs/input_tensor_continuous.csv', 'ab') as f:
# np.savetxt(f, input_tensor_continuous[None, :], fmt='%0.6f', delimiter=' ', newline='\n')
# with open('tensor_outputs/input_tensor_categorical.csv', 'ab') as f:
# np.savetxt(f, input_tensor_categorical[None, :], fmt='%0.6f', delimiter=' ', newline='\n')
# with open('tensor_outputs/output_tensor.csv', 'ab') as f:
# np.savetxt(f, output_tensor[None, :], fmt='%0.6f', delimiter=' ', newline='\n')
# END
# ---
if __name__ == '__main__':
water_df = float(sys.argv[1])
main(water_df)
|
|
"""
Test the neural network package.
Yujia Li, 09/2014
"""
import os
os.environ['GNUMPY_CPU_PRECISION'] = '64'
import gnumpy as gnp
import numpy as np
import pynn.layer as ly
import pynn.loss as ls
import pynn.nn as nn
import pynn.rnn as rnn
import time
_GRAD_CHECK_EPS = 1e-6
_BN_GRAD_CHECK_EPS = 1e-5
_FDIFF_EPS = 1e-8
_DIFF_SCALE_OFFSET = 1
_TEMP_FILE_NAME = '_temp_.pdata'
def vec_str(v):
s = '[ '
for i in range(len(v)):
s += '%11.8f ' % v[i]
s += ']'
return s
def test_vec_pair(v1, msg1, v2, msg2, eps=_GRAD_CHECK_EPS, use_rel_err=False):
print msg1 + ' : ' + vec_str(v1)
print msg2 + ' : ' + vec_str(v2)
n_space = len(msg2) - len('diff')
print ' ' * n_space + 'diff' + ' : ' + vec_str(v1 - v2)
if use_rel_err:
scale = np.maximum(np.abs(v1), np.abs(v2))
err = np.abs(v1 - v2) / (scale + _DIFF_SCALE_OFFSET)
n_space = len(msg2) - len('rel err')
print ' ' * n_space + 'rel err' + ' : ' + vec_str(err)
print 'max err : %.8f' % err.max()
success = err.max() < eps
else:
err = np.abs(v1 - v2).max()
print 'err : %.8f' % err
success = err < eps
print '** SUCCESS **' if success else '** FAIL **'
return success
def finite_difference_gradient(f, x):
grad = x * 0
for i in range(len(x)):
x_0 = x[i]
x[i] = x_0 + _FDIFF_EPS
f_plus = f(x)
x[i] = x_0 - _FDIFF_EPS
f_minus = f(x)
grad[i] = (f_plus - f_minus) / (2 * _FDIFF_EPS)
x[i] = x_0
return grad
def fdiff_grad_generator(net, x, t, add_noise=False, seed=None, **kwargs):
if t is not None:
net.load_target(t)
def f(w):
if add_noise and seed is not None:
gnp.seed_rand(seed)
w_0 = net.get_param_vec()
net.set_param_from_vec(w)
net.forward_prop(x, add_noise=add_noise, compute_loss=True, is_test=False, **kwargs)
loss = net.get_loss()
net.set_param_from_vec(w_0)
return loss
return f
def test_net_io(f_create, f_create_void):
net1 = f_create()
print 'Testing %s I/O' % net1.__class__.__name__
net1.save_model_to_file(_TEMP_FILE_NAME)
net2 = f_create_void()
net2.load_model_from_file(_TEMP_FILE_NAME)
os.remove(_TEMP_FILE_NAME)
print 'Net #1: \n' + str(net1)
print 'Net #2: \n' + str(net2)
test_passed = (str(net1) == str(net2))
test_passed = test_passed and test_vec_pair(net1.get_param_vec(), 'Net #1',
net2.get_param_vec(), 'Net #2')
print ''
return test_passed
def test_nonlin(nonlin):
print 'Testing nonlinearity <%s>' % nonlin.get_name()
sx, sy = 3, 4
def f(w):
return nonlin.forward_prop(gnp.garray(w.reshape(sx, sy))).sum()
x = gnp.randn(sx, sy)
y = nonlin.forward_prop(x)
fdiff_grad = finite_difference_gradient(f, x.asarray().ravel())
backprop_grad = nonlin.backward_prop(x, y).asarray().ravel()
test_passed = test_vec_pair(fdiff_grad, 'Finite Difference Gradient',
backprop_grad, ' Backpropagation Gradient')
print ''
return test_passed
def test_nonlin_invert(nonlin):
print 'Testing inverting nonlinearity <%s>' % nonlin.get_name()
sx, sy = 3, 4
x = gnp.rand(sx, sy)
y = nonlin.forward_prop(x)
xx = nonlin.invert_output(y)
test_passed = test_vec_pair(x.asarray().ravel(), '%15s' % 'Input',
xx.asarray().ravel(), '%15s' % 'Inferred Input')
print ''
return test_passed
def test_all_nonlin():
print ''
print '=========================='
print 'Testing all nonlinearities'
print '=========================='
print ''
n_tests = 0
n_success = 0
for nonlin in ly.NONLIN_LIST:
if test_nonlin(nonlin):
n_success += 1
if test_nonlin_invert(nonlin):
n_success += 1
n_tests += 2
print '=============='
print 'Test finished: %d/%d success, %d failed' % (n_success, n_tests, n_tests - n_success)
print ''
return n_success, n_tests
def test_loss(loss, weight=1):
print 'Testing loss <%s>, weight=%g' % (loss.get_name(), weight)
loss.set_weight(weight)
sx, sy = 3, 4
x = gnp.randn(sx, sy)
t = gnp.randn(sx, sy)
if loss.target_should_be_one_hot():
new_t = np.zeros(t.shape)
new_t[np.arange(t.shape[0]), t.argmax(axis=1)] = 1
t = gnp.garray(new_t)
elif loss.target_should_be_normalized():
t = t - t.min(axis=1)[:,gnp.newaxis] + 1
t /= t.sum(axis=1)[:,gnp.newaxis]
elif loss.target_should_be_hinge():
new_t = -np.ones(t.shape)
new_t[np.arange(t.shape[0]), t.argmax(axis=1)] = 1
t = gnp.garray(new_t)
loss.load_target(t)
def f(w):
return loss.compute_loss_and_grad(gnp.garray(w.reshape(sx, sy)))[0]
fdiff_grad = finite_difference_gradient(f, x.asarray().ravel())
backprop_grad = loss.compute_loss_and_grad(x, compute_grad=True)[1].asarray().ravel()
test_passed = test_vec_pair(fdiff_grad, 'Finite Difference Gradient',
backprop_grad, ' Backpropagation Gradient')
print ''
return test_passed
def test_all_loss():
print ''
print '=================='
print 'Testing all losses'
print '=================='
print ''
n_tests = len(ls.LOSS_LIST) * 2
n_success = 0
for loss in ls.LOSS_LIST:
if test_loss(loss, weight=1):
n_success += 1
if test_loss(loss, weight=0.5):
n_success += 1
print '=============='
print 'Test finished: %d/%d success, %d failed' % (n_success, n_tests, n_tests - n_success)
print ''
return n_success, n_tests
def test_layer(add_noise=False, no_loss=False, loss_after_nonlin=False,
sparsity_weight=0, use_batch_normalization=False):
print 'Testing layer ' + ('with noise' if add_noise else 'without noise') \
+ ', ' + ('without loss' if no_loss else 'with loss') \
+ ', ' + ('without sparsity' if sparsity_weight == 0 else 'with sparsity') \
+ ', ' + ('without batch normalization' if not use_batch_normalization else 'with batch normalization')
in_dim = 4
out_dim = 3
n_cases = 3
sparsity = 0.1
x = gnp.randn(n_cases, in_dim)
t = gnp.randn(n_cases, out_dim)
if no_loss:
loss = None
else:
loss = ls.get_loss_from_type_name(ls.LOSS_NAME_SQUARED)
loss.load_target(t)
loss.set_weight(2.5)
seed = 8
dropout_rate = 0.5 if add_noise else 0
nonlin_type = ly.NONLIN_NAME_SIGMOID if sparsity_weight > 0 \
else ly.NONLIN_NAME_TANH
layer = ly.Layer(in_dim, out_dim, nonlin_type=nonlin_type,
dropout=dropout_rate, sparsity=sparsity, sparsity_weight=sparsity_weight,
loss=loss, loss_after_nonlin=loss_after_nonlin, use_batch_normalization=use_batch_normalization)
if sparsity_weight > 0:
# disable smoothing over minibatches
layer._sparsity_smoothing = 1.0
w_0 = layer.params.get_param_vec()
if add_noise:
gnp.seed_rand(seed)
layer.params.clear_gradient()
layer.forward_prop(x, compute_loss=True, is_test=False)
layer.backward_prop()
backprop_grad = layer.params.get_grad_vec()
def f(w):
if add_noise:
# this makes sure the same units are dropped out every time this
# function is called
gnp.seed_rand(seed)
layer.params.set_param_from_vec(w)
layer.forward_prop(x, compute_loss=True, is_test=False)
if layer.sparsity_weight == 0:
return layer.loss_value
else:
return layer.loss_value + layer._sparsity_objective
fdiff_grad = finite_difference_gradient(f, w_0)
test_passed = test_vec_pair(fdiff_grad, 'Finite Difference Gradient',
backprop_grad, ' Backpropagation Gradient',
eps=_GRAD_CHECK_EPS if not use_batch_normalization else _BN_GRAD_CHECK_EPS,
use_rel_err=use_batch_normalization)
print ''
gnp.seed_rand(int(time.time()))
return test_passed
def test_batch_normalization_layer():
print 'Testing Batch Normalization layer'
in_dim = 3
n_cases = 5
x = gnp.randn(n_cases, in_dim) * 2 + 3
t = gnp.randn(n_cases, in_dim) * 2
loss = ls.get_loss_from_type_name(ls.LOSS_NAME_SQUARED)
loss.load_target(t)
bn_layer = ly.BatchNormalizationLayer(in_dim)
bn_layer.params.gamma = gnp.rand(in_dim)
bn_layer.params.beta = gnp.rand(in_dim)
w_0 = bn_layer.params.get_param_vec()
y = bn_layer.forward_prop(x, is_test=False)
_, loss_grad = loss.compute_not_weighted_loss_and_grad(y, True)
bn_layer.backward_prop(loss_grad)
backprop_grad = bn_layer.params.get_grad_vec()
def f(w):
bn_layer.params.set_param_from_vec(w)
y = bn_layer.forward_prop(x, is_test=False)
return loss.compute_not_weighted_loss_and_grad(y)[0]
fdiff_grad = finite_difference_gradient(f, w_0)
test_passed = test_vec_pair(fdiff_grad, 'Finite Difference Gradient',
backprop_grad, ' Backpropagation Gradient', eps=_BN_GRAD_CHECK_EPS,
use_rel_err=True)
print ''
return test_passed
def test_all_layer():
print ''
print '==============='
print 'Testing a layer'
print '==============='
print ''
n_success = 0
if test_layer(add_noise=False):
n_success += 1
if test_layer(add_noise=True):
n_success += 1
if test_layer(add_noise=False, no_loss=True):
n_success += 1
if test_layer(add_noise=False, loss_after_nonlin=True):
n_success += 1
if test_layer(add_noise=True, loss_after_nonlin=True):
n_success += 1
if test_layer(no_loss=True, sparsity_weight=1.0):
n_success += 1
if test_layer(sparsity_weight=1.0):
n_success += 1
if test_layer(add_noise=True, sparsity_weight=1.0):
n_success += 1
if test_batch_normalization_layer():
n_success += 1
if test_layer(add_noise=True, use_batch_normalization=True):
n_success += 1
if test_layer(add_noise=False, use_batch_normalization=True):
n_success += 1
if test_layer(add_noise=True, use_batch_normalization=True, loss_after_nonlin=True):
n_success += 1
if test_layer(add_noise=False, use_batch_normalization=True, loss_after_nonlin=False):
n_success += 1
if test_layer(add_noise=True, use_batch_normalization=True, loss_after_nonlin=True, sparsity_weight=1.0):
n_success += 1
n_tests = 14
print '=============='
print 'Test finished: %d/%d success, %d failed' % (n_success, n_tests, n_tests - n_success)
print ''
return n_success, n_tests
def create_neuralnet(dropout_rate, loss_after_nonlin=False, use_batch_normalization=False):
in_dim = 3
out_dim = 2
h1_dim = 2
h2_dim = 2
h3_dim = 2
net = nn.NeuralNet(in_dim, out_dim)
net.add_layer(h1_dim, nonlin_type=ly.NONLIN_NAME_TANH, dropout=0)
net.add_layer(h2_dim, nonlin_type=ly.NONLIN_NAME_SIGMOID, dropout=dropout_rate,
use_batch_normalization=use_batch_normalization)
#net.add_layer(h3_dim, nonlin_type=ly.NONLIN_NAME_RELU, dropout=dropout_rate)
#net.add_layer(10, nonlin_type=ly.NONLIN_NAME_RELU, dropout=dropout_rate)
#net.add_layer(10, nonlin_type=ly.NONLIN_NAME_RELU, dropout=dropout_rate)
net.add_layer(0, nonlin_type=ly.NONLIN_NAME_LINEAR, dropout=dropout_rate,
use_batch_normalization=use_batch_normalization)
net.set_loss(ls.LOSS_NAME_SQUARED, loss_weight=1.1, loss_after_nonlin=loss_after_nonlin)
return net
def test_neuralnet(add_noise=False, loss_after_nonlin=False, use_batch_normalization=False):
print 'Testing NeuralNet, ' + ('with noise' if add_noise else 'without noise') \
+ ', ' + ('with BN' if use_batch_normalization else 'without BN')
n_cases = 5
seed = 8
dropout_rate = 0.5 if add_noise else 0
net = create_neuralnet(dropout_rate, loss_after_nonlin=loss_after_nonlin, use_batch_normalization=use_batch_normalization)
print net
x = gnp.randn(n_cases, net.in_dim)
t = gnp.randn(n_cases, net.out_dim)
if net.loss.target_should_be_one_hot():
new_t = np.zeros(t.shape)
new_t[np.arange(t.shape[0]), t.argmax(axis=1)] = 1
t = gnp.garray(new_t)
elif net.loss.target_should_be_normalized():
t = t - t.min(axis=1)[:,gnp.newaxis] + 1
t /= t.sum(axis=1)[:,gnp.newaxis]
net.load_target(t)
if add_noise:
gnp.seed_rand(seed)
net.forward_prop(x, add_noise=add_noise, compute_loss=True, is_test=False)
net.clear_gradient()
net.backward_prop()
backprop_grad = net.get_grad_vec()
f = fdiff_grad_generator(net, x, None, add_noise=add_noise, seed=seed)
fdiff_grad = finite_difference_gradient(f, net.get_param_vec())
eps = _BN_GRAD_CHECK_EPS if use_batch_normalization else _GRAD_CHECK_EPS
test_passed = test_vec_pair(fdiff_grad, 'Finite Difference Gradient',
backprop_grad, ' Backpropagation Gradient', eps=eps, use_rel_err=use_batch_normalization)
print ''
gnp.seed_rand(int(time.time()))
return test_passed
def test_neuralnet_io(loss_after_nonlin=False, use_batch_normalization=False):
def f_create():
return create_neuralnet(0.5, loss_after_nonlin=loss_after_nonlin, use_batch_normalization=use_batch_normalization)
def f_create_void():
return nn.NeuralNet(0,0)
return test_net_io(f_create, f_create_void)
def test_all_neuralnet():
print ''
print '================='
print 'Testing NeuralNet'
print '================='
print ''
n_success = 0
if test_neuralnet(add_noise=False):
n_success += 1
if test_neuralnet(add_noise=True):
n_success += 1
if test_neuralnet(add_noise=False, loss_after_nonlin=True):
n_success += 1
if test_neuralnet(add_noise=True, loss_after_nonlin=True):
n_success += 1
if test_neuralnet(add_noise=False, loss_after_nonlin=True, use_batch_normalization=True):
n_success += 1
if test_neuralnet(add_noise=True, loss_after_nonlin=True, use_batch_normalization=True):
n_success += 1
if test_neuralnet_io():
n_success += 1
if test_neuralnet_io(loss_after_nonlin=True):
n_success += 1
if test_neuralnet_io(loss_after_nonlin=True, use_batch_normalization=True):
n_success += 1
n_tests = 9
print '=============='
print 'Test finished: %d/%d success, %d failed' % (n_success, n_tests, n_tests - n_success)
print ''
return n_success, n_tests
def create_stacked_net(in_dim, out_dim, dropout_rate):
net1 = nn.NeuralNet(3,out_dim[0])
net1.add_layer(2, nonlin_type=ly.NONLIN_NAME_TANH, dropout=0)
net1.add_layer(0, nonlin_type=ly.NONLIN_NAME_SIGMOID, dropout=dropout_rate)
net1.set_loss(ls.LOSS_NAME_SQUARED, loss_weight=0.5)
net2 = nn.NeuralNet(out_dim[0], out_dim[1])
net2.add_layer(3, nonlin_type=ly.NONLIN_NAME_RELU, dropout=dropout_rate)
net2.add_layer(0, nonlin_type=ly.NONLIN_NAME_TANH, dropout=0)
net3 = nn.NeuralNet(out_dim[1], out_dim[2])
net3.add_layer(1, nonlin_type=ly.NONLIN_NAME_SIGMOID, dropout=dropout_rate)
net3.add_layer(0, nonlin_type=ly.NONLIN_NAME_LINEAR, dropout=0)
net3.set_loss(ls.LOSS_NAME_SQUARED, loss_weight=1)
return nn.StackedNeuralNet(net1, net2, net3)
def test_stacked_net_gradient(add_noise=False):
print 'Testing StackedNeuralNet'
in_dim = 3
out_dim = [5, 2, 2]
n_cases = 5
seed = 8
dropout_rate = 0.5 if add_noise else 0
stacked_net = create_stacked_net(in_dim, out_dim, dropout_rate)
print stacked_net
x = gnp.randn(n_cases, in_dim)
t1 = gnp.randn(n_cases, out_dim[0])
t3 = gnp.randn(n_cases, out_dim[2])
stacked_net.load_target(t1, None, t3)
if add_noise:
gnp.seed_rand(seed)
stacked_net.clear_gradient()
stacked_net.forward_prop(x, add_noise=add_noise, compute_loss=True, is_test=False)
stacked_net.backward_prop()
backprop_grad = stacked_net.get_grad_vec()
f = fdiff_grad_generator(stacked_net, x, None, add_noise=add_noise, seed=seed)
fdiff_grad = finite_difference_gradient(f, stacked_net.get_param_vec())
test_passed = test_vec_pair(fdiff_grad, 'Finite Difference Gradient',
backprop_grad, ' Backpropagation Gradient')
print ''
gnp.seed_rand(int(time.time()))
return test_passed
def test_stacked_net_io():
def f_create():
return create_stacked_net(3, [5,2,2], 0.5)
def f_create_void():
return nn.StackedNeuralNet()
return test_net_io(f_create, f_create_void)
def test_all_stacked_net():
print ''
print '========================'
print 'Testing StackedNeuralNet'
print '========================'
print ''
n_success = 0
if test_stacked_net_gradient(add_noise=False):
n_success += 1
if test_stacked_net_gradient(add_noise=True):
n_success += 1
if test_stacked_net_io():
n_success += 1
n_tests = 3
print '=============='
print 'Test finished: %d/%d success, %d failed' % (n_success, n_tests, n_tests - n_success)
print ''
return n_success, n_tests
def create_y_net(in_dim, out_dim, dropout_rate):
net01 = nn.NeuralNet(3,2)
net01.add_layer(0, nonlin_type=ly.NONLIN_NAME_SIGMOID, dropout=dropout_rate)
net02 = nn.NeuralNet(2, out_dim[0])
net02.add_layer(0, nonlin_type=ly.NONLIN_NAME_TANH, dropout=dropout_rate)
net02.set_loss(ls.LOSS_NAME_SQUARED, loss_weight=0.5)
net1 = nn.StackedNeuralNet(net01, net02)
net2 = nn.NeuralNet(out_dim[0], out_dim[1])
net2.add_layer(0, nonlin_type=ly.NONLIN_NAME_TANH, dropout=0)
net2.set_loss(ls.LOSS_NAME_SQUARED, loss_weight=0)
net3 = nn.NeuralNet(out_dim[0], out_dim[2])
net3.add_layer(1, nonlin_type=ly.NONLIN_NAME_SIGMOID, dropout=dropout_rate)
net3.add_layer(0, nonlin_type=ly.NONLIN_NAME_LINEAR, dropout=0)
net3.set_loss(ls.LOSS_NAME_SQUARED, loss_weight=1.5)
ynet = nn.YNeuralNet(net1, net2, net3)
return ynet
def test_y_net_gradient(add_noise=False):
print 'Testing YNeuralNet ' + ('with noise' if add_noise else 'without noise')
in_dim = 3
out_dim = [2, 2, 2]
n_cases = 5
seed = 8
dropout_rate = 0.5 if add_noise else 0
ynet = create_y_net(in_dim, out_dim, dropout_rate)
print ynet
x = gnp.randn(n_cases, in_dim)
t1 = gnp.randn(n_cases, out_dim[0])
t2 = gnp.randn(n_cases, out_dim[1])
t3 = gnp.randn(n_cases, out_dim[2])
ynet.load_target([None, t1], t2, t3)
if add_noise:
gnp.seed_rand(seed)
ynet.clear_gradient()
ynet.forward_prop(x, add_noise=add_noise, compute_loss=True, is_test=False)
ynet.backward_prop()
backprop_grad = ynet.get_grad_vec()
f = fdiff_grad_generator(ynet, x, None, add_noise=add_noise, seed=seed)
fdiff_grad = finite_difference_gradient(f, ynet.get_param_vec())
test_passed = test_vec_pair(fdiff_grad, 'Finite Difference Gradient',
backprop_grad, ' Backpropagation Gradient')
print ''
gnp.seed_rand(int(time.time()))
return test_passed
def test_y_net_io():
def f_create():
in_dim = 3
out_dim = [2, 2, 2]
n_cases = 5
seed = 8
dropout_rate = 0.5
net1 = nn.NeuralNet(3,2)
net1.add_layer(2, nonlin_type=ly.NONLIN_NAME_SIGMOID, dropout=dropout_rate)
net1.add_layer(0, nonlin_type=ly.NONLIN_NAME_TANH, dropout=dropout_rate)
net1.set_loss(ls.LOSS_NAME_SQUARED)
net2 = nn.NeuralNet(out_dim[0], out_dim[1])
net2.add_layer(0, nonlin_type=ly.NONLIN_NAME_TANH, dropout=0)
net2.set_loss(ls.LOSS_NAME_SQUARED)
net3 = nn.NeuralNet(out_dim[0], out_dim[2])
net3.add_layer(1, nonlin_type=ly.NONLIN_NAME_SIGMOID, dropout=dropout_rate)
net3.add_layer(0, nonlin_type=ly.NONLIN_NAME_LINEAR, dropout=0)
net3.set_loss(ls.LOSS_NAME_SQUARED)
return nn.YNeuralNet(net1, net2, net3)
def f_create_void():
return nn.YNeuralNet()
return test_net_io(f_create, f_create_void)
def test_all_y_net():
print ''
print '=================='
print 'Testing YNeuralNet'
print '=================='
print ''
n_success = 0
if test_y_net_gradient(add_noise=False):
n_success += 1
if test_y_net_gradient(add_noise=True):
n_success += 1
if test_y_net_io():
n_success += 1
n_tests = 3
print '=============='
print 'Test finished: %d/%d success, %d failed' % (n_success, n_tests, n_tests - n_success)
print ''
return n_success, n_tests
def create_autoencoder(dropout_rate=0):
in_dim = 3
h_dim = 2
net1 = nn.NeuralNet(in_dim, h_dim)
net1.add_layer(2, nonlin_type=ly.NONLIN_NAME_SIGMOID, dropout=0)
net1.add_layer(0, nonlin_type=ly.NONLIN_NAME_SIGMOID, dropout=dropout_rate)
net2 = nn.NeuralNet(h_dim, in_dim)
net2.add_layer(2, nonlin_type=ly.NONLIN_NAME_TANH, dropout=0)
net2.add_layer(1, nonlin_type=ly.NONLIN_NAME_TANH, dropout=dropout_rate)
net2.add_layer(0, nonlin_type=ly.NONLIN_NAME_LINEAR, dropout=dropout_rate)
net2.set_loss(ls.LOSS_NAME_SQUARED, loss_weight=1.5)
autoencoder = nn.AutoEncoder(net1, net2)
return autoencoder
def test_autoencoder(add_noise=False):
print 'Testing AutoEncoder ' + ('with noise' if add_noise else 'without noise')
n_cases = 5
seed = 8
dropout_rate = 0.5 if add_noise else 0
autoencoder = create_autoencoder(dropout_rate)
print autoencoder
x = gnp.randn(n_cases, autoencoder.in_dim)
if add_noise:
gnp.seed_rand(seed)
autoencoder.clear_gradient()
autoencoder.forward_prop(x, add_noise=add_noise, compute_loss=True, is_test=False)
autoencoder.backward_prop()
backprop_grad = autoencoder.get_grad_vec()
f = fdiff_grad_generator(autoencoder, x, None, add_noise=add_noise, seed=seed)
fdiff_grad = finite_difference_gradient(f, autoencoder.get_param_vec())
test_passed = test_vec_pair(fdiff_grad, 'Finite Difference Gradient',
backprop_grad, ' Backpropagation Gradient')
print ''
gnp.seed_rand(int(time.time()))
return test_passed
def test_autoencoder_io():
def f_create():
return create_autoencoder(0.5)
def f_create_void():
return nn.AutoEncoder()
return test_net_io(f_create, f_create_void)
def test_all_autoencoder():
print ''
print '==================='
print 'Testing AutoEncoder'
print '==================='
print ''
n_success = 0
if test_autoencoder(add_noise=False):
n_success += 1
if test_autoencoder(add_noise=True):
n_success += 1
if test_autoencoder_io():
n_success += 1
n_tests = 3
print '=============='
print 'Test finished: %d/%d success, %d failed' % (n_success, n_tests, n_tests - n_success)
print ''
return n_success, n_tests
def test_rnn_io(has_input=True):
def f_create():
return rnn.RNN(in_dim=(3 if has_input else None),out_dim=3)
def f_create_void():
return rnn.RNN()
return test_net_io(f_create, f_create_void)
def test_rnn():
print 'Testing RNN'
n_cases = 5
in_dim = 3
out_dim = 2
label_dim = 2
x = gnp.randn(n_cases, in_dim)
t = gnp.randn(n_cases, label_dim)
net = nn.NeuralNet(out_dim, label_dim)
net.add_layer(0, nonlin_type=ly.NONLIN_NAME_LINEAR)
net.set_loss(ls.LOSS_NAME_SQUARED)
net.load_target(t)
rnn_net = rnn.RNN(in_dim, out_dim)
print rnn_net
print net
rnn_net.clear_gradient()
net.clear_gradient()
h = rnn_net.forward_prop(x)
net.forward_prop(h, add_noise=False, compute_loss=True, is_test=False)
dh = net.backward_prop()
rnn_net.backward_prop(dh)
backprop_grad = rnn_net.get_grad_vec()
def f(w):
rnn_net.clear_gradient()
rnn_net.set_param_from_vec(w)
h = rnn_net.forward_prop(x)
net.forward_prop(h, add_noise=False, compute_loss=True, is_test=False)
return net.get_loss()
fdiff_grad = finite_difference_gradient(f, rnn_net.get_param_vec())
test_passed = test_vec_pair(fdiff_grad, 'Finite Difference Gradient',
backprop_grad, ' Backpropagation Gradient')
print ''
return test_passed
def create_default_rnn_hybrid(add_noise=False, has_input=True):
in_dim = 3 if has_input else None
hid_dim = 2
out_dim = 2
net_hid_dim = 2
net = nn.NeuralNet(hid_dim, out_dim)
net.add_layer(net_hid_dim, nonlin_type=ly.NONLIN_NAME_TANH, dropout=(0 if not add_noise else 0.5))
net.add_layer(0, nonlin_type=ly.NONLIN_NAME_LINEAR)
net.set_loss(ls.LOSS_NAME_SQUARED)
rnn_net = rnn.RNN(in_dim, hid_dim)
if not has_input:
rnn_net.b = gnp.randn(hid_dim)
return rnn.RnnHybridNetwork(rnn_net, net)
def test_rnn_hybrid(add_noise=False, has_input=True):
print 'Testing RNN hybrid, ' + ('with' if add_noise else 'without') + ' noise, ' \
+ ('with' if has_input else 'without') + ' input'
n_cases = 5
net = create_default_rnn_hybrid(add_noise=add_noise, has_input=has_input)
print net
x = gnp.randn(n_cases, net.in_dim) if has_input else None
t = gnp.randn(n_cases, net.out_dim)
net.load_target(t)
seed = 8
gnp.seed_rand(seed)
net.clear_gradient()
"""
if not has_input:
import ipdb
ipdb.set_trace()
"""
net.forward_prop(X=x, T=n_cases, add_noise=add_noise, compute_loss=True, is_test=False)
net.backward_prop()
backprop_grad = net.get_grad_vec()
f = fdiff_grad_generator(net, x, None, add_noise=add_noise, seed=seed, T=n_cases)
fdiff_grad = finite_difference_gradient(f, net.get_param_vec())
test_passed = test_vec_pair(fdiff_grad, 'Finite Difference Gradient',
backprop_grad, ' Backpropagation Gradient')
print ''
gnp.seed_rand(int(time.time()))
return test_passed
def test_rnn_hybrid_io(add_noise=False):
def f_create():
return create_default_rnn_hybrid(add_noise=add_noise)
def f_create_void():
return rnn.RnnHybridNetwork()
return test_net_io(f_create, f_create_void)
def create_default_rnn_ae(add_noise=False):
in_dim = 3
hid_dim = 2
net_hid_dim = 2
net = nn.NeuralNet(hid_dim, in_dim)
net.add_layer(net_hid_dim, ly.NONLIN_NAME_TANH, dropout=(0.5 if add_noise else 0))
net.add_layer(0, ly.NONLIN_NAME_LINEAR)
net.set_loss(ls.LOSS_NAME_SQUARED)
dec = rnn.RnnHybridNetwork(rnn.RNN(out_dim=hid_dim), net)
enc = rnn.RNN(in_dim=in_dim, out_dim=hid_dim)
return rnn.RnnAutoEncoder(encoder=enc, decoder=dec)
def test_rnn_ae(add_noise=False):
print 'Testing RnnAutoEncoder, ' + ('with' if add_noise else 'without') + ' noise'
n_cases = 5
net = create_default_rnn_ae(add_noise=add_noise)
print net
x = gnp.randn(n_cases, net.in_dim)
seed = 8
gnp.seed_rand(seed)
net.clear_gradient()
net.forward_prop(x, add_noise=add_noise, compute_loss=True, is_test=False)
net.backward_prop()
backprop_grad = net.get_grad_vec()
f = fdiff_grad_generator(net, x, None, add_noise=add_noise, seed=seed)
fdiff_grad = finite_difference_gradient(f, net.get_param_vec())
test_passed = test_vec_pair(fdiff_grad, 'Finite Difference Gradient',
backprop_grad, ' Backpropagation Gradient')
print ''
gnp.seed_rand(int(time.time()))
return test_passed
def test_rnn_ae_io(add_noise=False):
def f_create():
return create_default_rnn_ae(add_noise=add_noise)
def f_create_void():
return rnn.RnnAutoEncoder()
return test_net_io(f_create, f_create_void)
def create_default_rnn_on_nn(add_noise=False):
in_dim = 3
net_hid_dim = 2
rnn_in_dim = 2
hid_dim = 2
out_dim = 3
net = nn.NeuralNet(in_dim, hid_dim)
net.add_layer(net_hid_dim, nonlin_type=ly.NONLIN_NAME_TANH, dropout=(0.5 if add_noise else 0))
net.add_layer(0, nonlin_type=ly.NONLIN_NAME_SIGMOID)
rnn_net = rnn.RnnOnNeuralNet(net, rnn.RNN(in_dim=rnn_in_dim, out_dim=hid_dim, nonlin_type=ly.NONLIN_NAME_TANH))
predict_net = nn.NeuralNet(hid_dim, out_dim)
predict_net.add_layer(0, nonlin_type=ly.NONLIN_NAME_LINEAR)
predict_net.set_loss(ls.LOSS_NAME_SQUARED)
rnn_predict_net = rnn.RnnHybridNetwork(rnn_net, predict_net)
return rnn_predict_net
def test_rnn_on_nn(add_noise=False):
print 'Testing RnnOnNeuralNet, ' + ('with' if add_noise else 'without') + ' noise'
n_cases = 5
net = create_default_rnn_on_nn(add_noise=add_noise)
print net
x = gnp.randn(n_cases, net.in_dim)
t = gnp.randn(n_cases, net.out_dim)
seed = 8
gnp.seed_rand(seed)
net.load_target(t)
net.clear_gradient()
net.forward_prop(x, add_noise=add_noise, compute_loss=True, is_test=False)
net.backward_prop()
backprop_grad = net.get_grad_vec()
f = fdiff_grad_generator(net, x, None, add_noise=add_noise, seed=seed)
fdiff_grad = finite_difference_gradient(f, net.get_param_vec())
test_passed = test_vec_pair(fdiff_grad, 'Finite Difference Gradient',
backprop_grad, ' Backpropagation Gradient')
print ''
gnp.seed_rand(int(time.time()))
return test_passed
def test_rnn_on_nn_io(add_noise=False):
def f_create():
return create_default_rnn_on_nn(add_noise=add_noise).rnn
def f_create_void():
return rnn.RnnOnNeuralNet()
return test_net_io(f_create, f_create_void)
def test_all_rnn():
print ''
print '==================='
print 'Testing RNN'
print '==================='
print ''
n_success = 1 if test_rnn() else 0
n_success += 1 if test_rnn_io(has_input=True) else 0
n_success += 1 if test_rnn_io(has_input=False) else 0
n_success += 1 if test_rnn_hybrid(add_noise=False, has_input=True) else 0
n_success += 1 if test_rnn_hybrid(add_noise=True, has_input=True) else 0
n_success += 1 if test_rnn_hybrid(add_noise=False, has_input=False) else 0
n_success += 1 if test_rnn_hybrid(add_noise=True, has_input=False) else 0
n_success += 1 if test_rnn_hybrid_io(add_noise=False) else 0
n_success += 1 if test_rnn_hybrid_io(add_noise=True) else 0
n_success += 1 if test_rnn_ae(add_noise=False) else 0
n_success += 1 if test_rnn_ae(add_noise=True) else 0
n_success += 1 if test_rnn_ae_io(add_noise=False) else 0
n_success += 1 if test_rnn_ae_io(add_noise=True) else 0
n_success += 1 if test_rnn_on_nn(add_noise=False) else 0
n_success += 1 if test_rnn_on_nn(add_noise=True) else 0
n_success += 1 if test_rnn_on_nn_io(add_noise=False) else 0
n_success += 1 if test_rnn_on_nn_io(add_noise=True) else 0
n_tests = 17
print '=============='
print 'Test finished: %d/%d success, %d failed' % (n_success, n_tests, n_tests - n_success)
print ''
return n_success, n_tests
def run_all_tests():
gnp.seed_rand(int(time.time()))
n_success = 0
n_tests = 0
test_list = [test_all_nonlin, test_all_loss, test_all_layer,
test_all_neuralnet, test_all_stacked_net, test_all_y_net,
test_all_autoencoder, test_all_rnn]
for batch_test in test_list:
success_in_batch, tests_in_batch = batch_test()
n_success += success_in_batch
n_tests += tests_in_batch
print ''
print '==================='
print 'All tests finished: %d/%d success, %d failed' % (n_success, n_tests, n_tests - n_success)
print ''
if __name__ == '__main__':
run_all_tests()
|
|
#!/usr/bin/env python
"""
@package coverage_model.test.test_complex_coverage
@file coverage_model/test/test_R2_complex_coverage.py
@author Christopher Mueller
@brief Unit & Integration tests for AggregateCoverage
"""
from ooi.logging import log
import os
import numpy as np
import random
from coverage_model import *
from nose.plugins.attrib import attr
import mock
import unittest
from copy import deepcopy
from coverage_model.hdf_utils import HDFLockingFile
from coverage_test_base import CoverageIntTestBase, get_props
from coverage_model.coverages.aggregate_coverage import AggregateCoverage
import time
def _make_cov(root_dir, params, nt=10, data_dict=None, make_temporal=True):
# Construct temporal and spatial Coordinate Reference System objects
tcrs = CRS([AxisTypeEnum.TIME])
scrs = CRS([AxisTypeEnum.LON, AxisTypeEnum.LAT])
# Construct temporal and spatial Domain objects
tdom = GridDomain(GridShape('temporal', [0]), tcrs, MutabilityEnum.EXTENSIBLE) # 1d (timeline)
sdom = GridDomain(GridShape('spatial', [0]), scrs, MutabilityEnum.IMMUTABLE) # 0d spatial topology (station/trajectory)
if isinstance(params, ParameterDictionary):
pdict = params
else:
# Instantiate a ParameterDictionary
pdict = ParameterDictionary()
if make_temporal:
# Create a set of ParameterContext objects to define the parameters in the coverage, add each to the ParameterDictionary
t_ctxt = ParameterContext('time', param_type=QuantityType(value_encoding=np.dtype('float32')))
t_ctxt.uom = 'seconds since 01-01-1970'
pdict.add_context(t_ctxt, is_temporal=True)
for p in params:
if isinstance(p, ParameterContext):
pdict.add_context(p)
elif isinstance(params, tuple):
pdict.add_context(ParameterContext(p[0], param_type=QuantityType(value_encoding=np.dtype(p[1]))))
else:
pdict.add_context(ParameterContext(p, param_type=QuantityType(value_encoding=np.dtype('float32'))))
scov = SimplexCoverage(root_dir, create_guid(), 'sample coverage_model', parameter_dictionary=pdict, temporal_domain=tdom, spatial_domain=sdom)
p_dict = {}
for p in scov.list_parameters():
if data_dict is not None and p in data_dict:
if data_dict[p] is None:
continue
dat = data_dict[p]
else:
dat = range(nt)
try:
p_dict[p] = np.array(dat)
except Exception as ex:
import sys
raise Exception('Error setting values for {0}: {1}'.format(p, data_dict[p])), None, sys.exc_traceback
scov.set_parameter_values(make_parameter_data_dict(p_dict))
scov.close()
return os.path.realpath(scov.persistence_dir)
# class CoverageEnvironment(CoverageModelIntTestCase, CoverageIntTestBase):
# @attr('UTIL', group='cov')
# def test_cov_params(self):
# contexts = create_all_params()
# del contexts['time']
# del contexts['density']
# contexts = contexts.values()
# cova_pth = _make_cov(self.working_dir, contexts, nt=2)
# cov = SimplexCoverage.load(cova_pth, mode='r+')
# cov._range_value['time'][:] = [1, 2]
# cov._range_value['temperature'][:] = [205378, 289972]
# cov._range_value['conductivity'][:] = [410913, 417588]
# cov._range_value['pressure'][:] = [3939, 13616]
# cov._range_value['p_range'][:] = 1000.
# cov._range_value['lat'][:] = 40.
# cov._range_value['lon'][:] = -70.
#
#
# # Make a new function
# owner = 'ion_functions.data.ctd_functions'
# dens_func = 'ctd_density'
# dens_arglist = ['SP', 't', 'p', 'lat', 'lon']
# dens_pmap = {'SP':'pracsal', 't':'seawater_temperature', 'p':'seawater_pressure', 'lat':'lat', 'lon':'lon'}
# dens_expr = PythonFunction('density', owner, dens_func, dens_arglist, None, dens_pmap)
# dens_ctxt = ParameterContext('density', param_type=ParameterFunctionType(dens_expr), variability=VariabilityEnum.TEMPORAL)
# dens_ctxt.uom = 'kg m-3'
#
# cov.append_parameter(dens_ctxt)
#
# # Make sure it worked
# np.testing.assert_array_equal(cov._range_value['density'][:],
# np.array([ 1024.98205566, 1019.4932251 ], dtype=np.float32))
#
# @attr('UTIL', group='cov')
# def test_something(self):
#
# # Create a large dataset spanning a year
# # Each coverage represents a week
#
#
# cova_pth = _make_cov(self.working_dir, ['value_set'], nt=1000, data_dict={'time': np.arange(1000,2000),'value_set':np.arange(1000)})
# cov = AbstractCoverage.load(cova_pth)
#
# results = self.simple_search(cov, 1212, 1390)
# np.testing.assert_array_equal(results['time'], np.arange(1212, 1391))
# np.testing.assert_array_equal(results['value_set'], np.arange(212, 391))
#
# from pyon.util.breakpoint import breakpoint
# breakpoint(locals(), globals())
#
# def simple_search(self, coverage, start, stop):
# from coverage_model.search.search_parameter import ParamValueRange, ParamValue, SearchCriteria
# from coverage_model.search.coverage_search import CoverageSearch
# from coverage_model.search.search_constants import IndexedParameters
# pdir, guid = os.path.split(coverage.persistence_dir)
# time_param = ParamValueRange(IndexedParameters.Time, (start, stop))
# criteria = SearchCriteria(time_param)
# search = CoverageSearch(criteria, order_by=['time'])
# results = search.select()
# cov = results.get_view_coverage(guid, pdir)
# retval = cov.get_observations()
# return retval
#
# @attr('UTIL', group='cov')
# def test_aggregates(self):
#
# array_stuff = ParameterContext('array_stuff', param_type=ArrayType(inner_encoding='float32'))
# x, y = np.mgrid[0:10, 0:10]
#
# cova_pth = _make_cov(self.working_dir, ['value_set', array_stuff], data_dict={'time': np.arange(10),'value_set' : np.ones(10), 'array_stuff' : x})
# covb_pth = _make_cov(self.working_dir, ['value_set', array_stuff], data_dict={'time': np.arange(20,30), 'value_set': np.ones(10) * 2, 'array_stuff' : y})
# covc_pth = _make_cov(self.working_dir, ['value_set', array_stuff], data_dict={'time': np.arange(15,25), 'value_set' : np.ones(10) * 3, 'array_stuff' : x})
#
# cov = SimplexCoverage.load(cova_pth, mode='r+')
#
# cov_pths = [cova_pth, covb_pth]
#
#
# ccov = AggregateCoverage(self.working_dir, create_guid(), 'complex coverage',
# reference_coverage_locs=[covb_pth],
# parameter_dictionary=ParameterDictionary(),
# complex_type=ComplexCoverageType.TEMPORAL_AGGREGATION)
#
# ccov.append_reference_coverage(cova_pth)
# ccov.append_reference_coverage(covc_pth)
#
# # TODO: correct this once ViewCoverage is worked out
# # View coverage construction doesn't work for DB-based metadata. View Coverage will be modified in the future
# # vcov = ViewCoverage(self.working_dir, create_guid(), 'view coverage', reference_coverage_location = ccov.persistence_dir)
# @attr('INT',group='cov')
# class TestAggregateCoverageInt(CoverageModelIntTestCase, CoverageIntTestBase):
#
# # Make a deep copy of the base TESTING_PROPERTIES dict and then modify for this class
# TESTING_PROPERTIES = deepcopy(CoverageIntTestBase.TESTING_PROPERTIES)
# TESTING_PROPERTIES['test_props_decorator'] = {'test_props': 10}
# TESTING_PROPERTIES['test_get_time_data_metrics'] = {'time_data_size': 0.01907348}
#
# @get_props()
# def test_props_decorator(self):
# props = self.test_props_decorator.props
# self.assertIsInstance(props, dict)
# expected = {'time_steps': 30, 'test_props': 10, 'brick_size': 1000}
# self.assertEqual(props, expected)
#
# def setUp(self):
# pass
#
# def tearDown(self):
# pass
#
# @classmethod
# def get_cov(cls, only_time=False, save_coverage=False, in_memory=False, inline_data_writes=True, brick_size=None, make_empty=False, nt=30, auto_flush_values=True):
# # Many tests assume nt is the 'total' number of timesteps, must divide between the 3 coverages
# sz1 = sz2 = sz3 = int(nt) / 3
# sz3 += nt - sum([sz1, sz2, sz3])
#
# first_times = np.arange(0, sz1, dtype='float32')
# first_data = np.arange(0, sz1, dtype='float32')
#
# second_times = np.arange(sz1, sz1+sz2, dtype='float32')
# second_data = np.arange(sz1, sz1+sz2, dtype='float32')
#
# third_times = np.arange(sz1+sz2, nt, dtype='float32')
# third_data = np.arange(sz1+sz2, nt, dtype='float32')
#
# cova_pth = _make_cov(cls.working_dir, ['data_all', 'data_a'], nt=sz1,
# data_dict={'time': first_times, 'data_all': first_data, 'data_a': first_data})
# covb_pth = _make_cov(cls.working_dir, ['data_all', 'data_b'], nt=sz2,
# data_dict={'time': second_times, 'data_all': second_data, 'data_b': second_data})
# covc_pth = _make_cov(cls.working_dir, ['data_all', 'data_c'], nt=sz3,
# data_dict={'time': third_times, 'data_all': third_data, 'data_c': third_data})
#
# comp_cov = AggregateCoverage(cls.working_dir, create_guid(), 'sample temporal aggregation coverage',
# reference_coverage_locs=[cova_pth, covb_pth, covc_pth],
# complex_type=ComplexCoverageType.TEMPORAL_AGGREGATION)
#
# return comp_cov, 'TestAggregateCoverageInt'
#
# ######################
# # Overridden base tests
# ######################
#
# def _insert_set_get(self, scov=None, timesteps=None, data=None, _slice=None, param='all'):
# # Cannot set values against a AggregateCoverage - just return True
# return True
#
# def test_append_parameter(self):
# nt = 60
# ccov, cov_name = self.get_cov(inline_data_writes=True, nt=nt)
#
# parameter_name = 'a*b'
# func = NumexprFunction('a*b', 'a*b', ['a', 'b'], {'a': 'data_a', 'b': 'data_b'})
# pc_in = ParameterContext(parameter_name, param_type=ParameterFunctionType(function=func, value_encoding=np.dtype('float32')))
#
# ccov.append_parameter(pc_in)
#
# sample_values = ccov.get_parameter_values('data_a') * ccov.get_parameter_values('data_b')
#
# self.assertTrue(np.array_equal(sample_values, ccov.get_parameter_values(parameter_name)))
#
# self.assertEqual(len(ccov.get_parameter_values(parameter_name).get_data()[parameter_name]), nt + 100)
#
# nvals = np.arange(nt, nt + 100, dtype='f')
# p_vals = {
# 'time': np.arange(10000, 10000+nt),
# 'data_a': nvals,
# 'data_b': nvals
# }
# ccov.set_parameter_values(make_parameter_data_dict(p_vals))
#
# sample_values = ccov.get_parameter_values('data_a') * ccov.get_parameter_values('data_b')
#
# self.assertTrue(np.array_equal(sample_values, ccov.get_parameter_values(parameter_name).get_data()[parameter_name]))
#
# with self.assertRaises(ValueError):
# ccov.append_parameter(pc_in)
#
# @unittest.skip('Functionality verified in \'test_temporal_aggregation\'')
# def test_refresh(self):
# pass
#
# @unittest.skip('Does not apply to AggregateCoverage')
# def test_create_multi_bricks(self):
# pass
#
# @unittest.skip('Does not apply to AggregateCoverage')
# def test_coverage_pickle_and_in_memory(self):
# pass
#
# @unittest.skip('Does not apply to AggregateCoverage')
# def test_coverage_mode_expand_domain(self):
# pass
#
# @unittest.skip('Does not apply to AggregateCoverage')
# def test_coverage_mode_set_value(self):
# pass
#
# @unittest.skip('Does not apply to AggregateCoverage')
# def test_pickle_problems_in_memory(self):
# pass
#
# @unittest.skip('Does not apply to AggregateCoverage')
# def test_set_allparams_five_bricks(self):
# pass
#
# @unittest.skip('Does not apply to AggregateCoverage')
# def test_set_allparams_one_brick(self):
# pass
#
# @unittest.skip('Does not apply to AggregateCoverage')
# def test_set_time_five_bricks(self):
# pass
#
# @unittest.skip('Does not apply to AggregateCoverage')
# def test_set_time_five_bricks_strided(self):
# pass
#
# @unittest.skip('Does not apply to AggregateCoverage')
# def test_set_time_one_brick(self):
# pass
#
# @unittest.skip('Does not apply to AggregateCoverage')
# def test_set_time_one_brick_strided(self):
# pass
#
# ######################
# # Additional tests specific to Complex Coverage
# ######################
#
# def test_file_mode(self):
# # Construct temporal and spatial Coordinate Reference System objects
# tcrs = CRS([AxisTypeEnum.TIME])
# scrs = CRS([AxisTypeEnum.LON, AxisTypeEnum.LAT])
#
# # Construct temporal and spatial Domain objects
# tdom = GridDomain(GridShape('temporal', [0]), tcrs, MutabilityEnum.EXTENSIBLE) # 1d (timeline)
# sdom = GridDomain(GridShape('spatial', [0]), scrs, MutabilityEnum.IMMUTABLE) # 0d spatial topology (station/trajectory)
#
# rcov_locs = [_make_cov('test_data', ['first_param']),
# _make_cov('test_data', ['second_param']),
# _make_cov('test_data', ['third_param', 'fourth_param']),
# ]
#
# # Instantiate a ParameterDictionary
# pdict = ParameterDictionary()
#
# # Create a set of ParameterContext objects to define the parameters in the coverage, add each to the ParameterDictionary
# func = NumexprFunction('a*b', 'a*b', ['a', 'b'], {'a': 'first_param', 'b': 'second_param'})
# val_ctxt = ParameterContext('a*b', param_type=ParameterFunctionType(function=func, value_encoding=np.dtype('float32')))
# pdict.add_context(val_ctxt)
#
# # Instantiate the SimplexCoverage providing the ParameterDictionary, spatial Domain and temporal Domain
# ccov = AggregateCoverage('test_data', create_guid(), 'sample complex coverage', parameter_dictionary=pdict,
# mode='w', reference_coverage_locs=rcov_locs)
#
# ccov_pth = ccov.persistence_dir
# ccov_masterfile_pth = ccov._persistence_layer.master_manager.file_path
#
# storage_type = ccov._persistence_layer.master_manager.storage_type()
# # Close the CC
# ccov.close()
# del(ccov)
#
# # Open AggregateCoverage in write mode
# w_ccov = AbstractCoverage.load(ccov_pth)
#
# # Loop over opening and reading data out of CC 10 times
# rpt = 20
# while rpt > 0:
# read_ccov = AbstractCoverage.load(ccov_pth, mode='r')
# self.assertIsInstance(read_ccov, AbstractCoverage)
# time_value = read_ccov.get_parameter_values('time')
# self.assertEqual(time_value, 1.0)
# read_ccov.close()
# del(read_ccov)
# rpt = rpt - 1
#
# w_ccov.close()
# del(w_ccov)
#
# if storage_type == 'hdf':
# # Only for file-based metadata
# # Open AggregateCoverage's master file using locking
# # with HDFLockingFile(ccov_masterfile_pth, 'r+') as f:
#
# # Test ability to read from AggregateCoverage in readonly mode
# locked_ccov = AbstractCoverage.load(ccov_pth, mode='r')
# self.assertIsInstance(locked_ccov, AbstractCoverage)
# time_value = locked_ccov.get_parameter_values('time', [1])
# self.assertEqual(time_value, 1.0)
#
# # Test inability to open AggregateCoverage for writing
# with self.assertRaises(IOError):
# AbstractCoverage.load(ccov_pth)
#
# with self.assertRaises(IOError):
# AbstractCoverage.load(ccov_pth, mode='w')
#
# with self.assertRaises(IOError):
# AbstractCoverage.load(ccov_pth, mode='a')
#
# with self.assertRaises(IOError):
# AbstractCoverage.load(ccov_pth, mode='r+')
#
# locked_ccov.close()
# del(locked_ccov)
#
# @unittest.skip("Parametric depricated")
# def test_parametric_strict(self):
# num_times = 10
#
# first_data = np.arange(num_times, dtype='float32') * 0.2
# second_data = np.random.random_sample(num_times) * (50 - 10) + 10
# apple_data = np.arange(num_times, dtype='float32')
# orange_data = np.arange(num_times, dtype='float32') * 2
#
# cova_pth = _make_cov(self.working_dir, ['first_param'], data_dict={'first_param': first_data})
# covb_pth = _make_cov(self.working_dir, ['second_param'], data_dict={'second_param': second_data})
# covc_pth = _make_cov(self.working_dir, ['apples', 'oranges'], data_dict={'apples': apple_data, 'oranges': orange_data})
#
# # Instantiate a ParameterDictionary
# pdict = ParameterDictionary()
#
# # Create a set of ParameterContext objects to define the parameters in the coverage, add each to the ParameterDictionary
# ab_func = NumexprFunction('aXb', 'a*b', ['a', 'b'], {'a': 'first_param', 'b': 'second_param'})
# ab_ctxt = ParameterContext('aXb', param_type=ParameterFunctionType(function=ab_func, value_encoding=np.dtype('float32')))
# pdict.add_context(ab_ctxt)
#
# aplorng_func = NumexprFunction('apples_to_oranges', 'a*cos(sin(b))+c', ['a', 'b', 'c'], {'a': 'apples', 'b': 'oranges', 'c': 'first_param'})
# aplorng_ctxt = ParameterContext('apples_to_oranges', param_type=ParameterFunctionType(function=aplorng_func, value_encoding=np.dtype('float32')))
# pdict.add_context(aplorng_ctxt)
#
# # Instantiate the AggregateCoverage
# ccov = AggregateCoverage(self.working_dir, create_guid(), 'sample complex coverage',
# reference_coverage_locs=[cova_pth, covb_pth, covc_pth],
# parameter_dictionary=pdict,
# complex_type=ComplexCoverageType.PARAMETRIC_STRICT)
#
# self.assertEqual(ccov.list_parameters(),
# ['aXb', 'apples', 'apples_to_oranges', 'first_param', 'oranges', 'second_param', 'time'])
#
# self.assertEqual(ccov.temporal_parameter_name, 'time')
#
# self.assertTrue(np.array_equal(ccov.get_parameter_values('first_param'), first_data))
# self.assertTrue(np.allclose(ccov.get_parameter_values('second_param'), second_data))
# self.assertTrue(np.array_equal(ccov.get_parameter_values('apples'), apple_data))
# self.assertTrue(np.array_equal(ccov.get_parameter_values('oranges'), orange_data))
#
# aXb_want = first_data * second_data
# self.assertTrue(np.allclose(ccov.get_parameter_values('aXb'), aXb_want))
# aplorng_want = apple_data * np.cos(np.sin(orange_data)) + first_data
# self.assertTrue(np.allclose(ccov.get_parameter_values('apples_to_oranges'), aplorng_want))
#
# @unittest.skip("Parametric depricated")
# def test_parametric_strict_warnings(self):
# num_times = 10
#
# first_data = np.arange(num_times, dtype='float32') * 0.2
# second_data = np.random.random_sample(num_times) * (50 - 10) + 10
# apple_data = np.arange(num_times, dtype='float32')
# orange_data = np.arange(num_times, dtype='float32') * 2
#
# cova_pth = _make_cov(self.working_dir, ['first_param'], data_dict={'first_param': first_data})
# covb_pth = _make_cov(self.working_dir, ['second_param'], data_dict={'second_param': second_data, 'time': np.arange(123, 133, dtype='int64')})
# covc_pth = _make_cov(self.working_dir, ['apples', 'oranges'], data_dict={'apples': apple_data, 'oranges': orange_data})
#
# # Instantiate a ParameterDictionary
# pdict = ParameterDictionary()
#
# # Create a set of ParameterContext objects to define the parameters in the coverage, add each to the ParameterDictionary
# ab_func = NumexprFunction('aXb', 'a*b', ['a', 'b'], {'a': 'first_param', 'b': 'second_param'})
# ab_ctxt = ParameterContext('aXb', param_type=ParameterFunctionType(function=ab_func, value_encoding=np.dtype('float32')))
# pdict.add_context(ab_ctxt)
#
# aplorng_func = NumexprFunction('apples_to_oranges', 'a*cos(sin(b))+c', ['a', 'b', 'c'], {'a': 'apples', 'b': 'oranges', 'c': 'first_param'})
# aplorng_ctxt = ParameterContext('apples_to_oranges', param_type=ParameterFunctionType(function=aplorng_func, value_encoding=np.dtype('float32')))
# pdict.add_context(aplorng_ctxt)
#
# with mock.patch('coverage_model.coverage.log') as log_mock:
# ccov = AggregateCoverage(self.working_dir, create_guid(), 'sample complex coverage',
# reference_coverage_locs=[cova_pth, covb_pth, covc_pth],
# parameter_dictionary=pdict,
# complex_type=ComplexCoverageType.PARAMETRIC_STRICT)
#
# self.assertEquals(log_mock.warn.call_args_list[0],
# mock.call('Coverage timestamps do not match; cannot include: %s', covb_pth))
# self.assertEquals(log_mock.info.call_args_list[0],
# mock.call("Parameter '%s' from coverage '%s' already present, skipping...", 'time', covc_pth))
#
# with mock.patch('coverage_model.coverage.log') as log_mock:
# ccov = AggregateCoverage(self.working_dir, create_guid(), 'sample complex coverage',
# reference_coverage_locs=[cova_pth, cova_pth],
# parameter_dictionary=pdict,
# complex_type=ComplexCoverageType.PARAMETRIC_STRICT)
#
# self.assertEquals(log_mock.info.call_args_list[0],
# mock.call("Coverage '%s' already present; ignoring", cova_pth))
#
# with mock.patch('coverage_model.coverage.log') as log_mock:
# covb_pth = _make_cov(self.working_dir, ['second_param'], data_dict={'second_param': second_data}, make_temporal=False)
# ccov = AggregateCoverage(self.working_dir, create_guid(), 'sample complex coverage',
# reference_coverage_locs=[cova_pth, covb_pth],
# parameter_dictionary=pdict,
# complex_type=ComplexCoverageType.PARAMETRIC_STRICT)
#
# self.assertEquals(log_mock.warn.call_args_list[0],
# mock.call("Coverage '%s' does not have a temporal_parameter; ignoring", covb_pth))
#
# with mock.patch('coverage_model.coverage.log') as log_mock:
# pdict.add_context(ParameterContext('discard_me'))
# ccov = AggregateCoverage(self.working_dir, create_guid(), 'sample complex coverage',
# reference_coverage_locs=[cova_pth, cova_pth],
# parameter_dictionary=pdict,
# complex_type=ComplexCoverageType.PARAMETRIC_STRICT)
# self.assertEqual(log_mock.warn.call_args_list[0],
# mock.call("Parameters stored in a AggregateCoverage must be ParameterFunctionType parameters: discarding '%s'", 'discard_me'))
#
# def test_temporal_aggregation(self):
# size = 100000
# first_times = np.arange(0, size, dtype='float32')
# first_data = np.arange(size, size*2, dtype='float32')
#
# second_times = np.arange(size, size*2, dtype='float32')
# second_data = np.arange(size*4, size*5, dtype='float32')
#
# third_times = np.arange(size*2, size*3, dtype='float32')
# third_data = np.arange(size*7, size*8, dtype='float32')
#
# cova_pth = _make_cov(self.working_dir, ['data_all', 'data_a'], nt=size,
# data_dict={'time': first_times, 'data_all': first_data, 'data_a': first_data})
# covb_pth = _make_cov(self.working_dir, ['data_all', 'data_b'], nt=size,
# data_dict={'time': second_times, 'data_all': second_data, 'data_b': second_data})
# covc_pth = _make_cov(self.working_dir, ['data_all', 'data_c'], nt=size,
# data_dict={'time': third_times, 'data_all': third_data, 'data_c': third_data})
#
# comp_cov = AggregateCoverage(self.working_dir, create_guid(), 'sample temporal aggregation coverage',
# reference_coverage_locs=[cova_pth, covb_pth, covc_pth],
# complex_type=ComplexCoverageType.TEMPORAL_AGGREGATION)
#
# tvals = comp_cov.get_time_values()
# self.assertTrue(np.array_equal(tvals, np.arange(3*size, dtype='float32')))
# self.assertEqual(tvals.dtype, np.dtype('float32')) # np.array_equal does NOT check dtype!!
#
# all_data = np.empty(0, dtype='float32')
# all_data = np.append(all_data, first_data)
# all_data = np.append(all_data, second_data)
# all_data = np.append(all_data, third_data)
# np.testing.assert_array_equal(comp_cov.get_parameter_values('data_all').get_data()['data_all'], all_data)
# np.testing.assert_array_equal(comp_cov.get_parameter_values('data_all', time_segment=(0, size-1)).get_data()['data_all'], first_data)
#
# fill_arr = np.empty(size, dtype='float32')
# fill_arr[:] = -9999.0
# np.testing.assert_array_equal(comp_cov.get_parameter_values('data_a').get_data()['data_a'], first_data)
#
# np.testing.assert_array_equal(comp_cov.get_parameter_values('data_b').get_data()['data_b'], second_data)
#
# np.testing.assert_array_equal(comp_cov.get_parameter_values('data_c').get_data()['data_c'], third_data)
#
# # Check that the head_coverage_path is correct
# self.assertEqual(comp_cov.head_coverage_path, covc_pth)
#
# # Add some data to the last coverage (covc) and make sure it comes in
# cov_c = AbstractCoverage.load(covc_pth, mode='a')
# addnl_c_data = np.arange(size*8, size*9, dtype='float32')
# p_dict = {
# 'time': np.arange(size*3, size*4),
# 'data_all': addnl_c_data,
# 'data_c': addnl_c_data
# }
# cov_c.set_parameter_values(make_parameter_data_dict(p_dict))
# cov_c.close()
#
# # # Refresh the complex coverage
# # comp_cov.refresh()
#
# all_data = np.append(all_data, addnl_c_data)
# np.testing.assert_array_equal(comp_cov.get_parameter_values('data_all').get_data()['data_all'], all_data)
#
# third_data = np.append(third_data, addnl_c_data)
# np.testing.assert_array_equal(comp_cov.get_parameter_values('data_c').get_data()['data_c'], third_data)
#
# # Check that the head_coverage_path is still correct
# self.assertEqual(comp_cov.head_coverage_path, covc_pth)
#
# def test_temporal_aggregation_warnings(self):
# size = 100000
# first_times = np.arange(0, size, dtype='float32')
# first_data = np.arange(size, size*2, dtype='float32')
#
# second_times = np.arange(size, size*2, dtype='float32')
# second_data = np.arange(size*4, size*5, dtype='float32')
#
# third_times = np.arange(size*2, size*3, dtype='float32')
# third_data = np.arange(size*7, size*8, dtype='float32')
#
# cova_pth = _make_cov(self.working_dir, ['data_all', 'data_a'], nt=size,
# data_dict={'time': first_times, 'data_all': first_data, 'data_a': first_data})
# covb_pth = _make_cov(self.working_dir, ['data_all', 'data_b'], nt=size,
# data_dict={'time': first_times, 'data_all': second_data, 'data_b': second_data})
# covc_pth = _make_cov(self.working_dir, ['data_all', 'data_c'], nt=size,
# data_dict={'time': third_times, 'data_all': third_data, 'data_c': third_data})
#
# with mock.patch('coverage_model.coverage.log') as log_mock:
# comp_cov = AggregateCoverage(self.working_dir, create_guid(), 'sample temporal aggregation coverage',
# reference_coverage_locs=[cova_pth, covb_pth, covc_pth],
# complex_type=ComplexCoverageType.TEMPORAL_AGGREGATION)
#
# self.assertEquals(log_mock.warn.call_args_list[0],
# mock.call("Coverage with time bounds '%s' already present; ignoring", (first_times.min(), first_times.max(), 0)))
#
# self.assertEquals(log_mock.info.call_args_list[4],
# mock.call("Parameter '%s' from coverage '%s' already present, skipping...", 'data_all', covc_pth))
#
# self.assertEquals(log_mock.info.call_args_list[5],
# mock.call("Parameter '%s' from coverage '%s' already present, skipping...", 'time', covc_pth))
#
# def _setup_allparams(self, size=10, num_covs=2, sequential_covs=True):
# # Setup types
# types = []
# types.append(('qtype', QuantityType()))
# types.append(('atype_n', ArrayType()))
# types.append(('atype_s', ArrayType()))
# letts='abcdefghijklmnopqrstuvwxyz'
# while len(letts) < size:
# letts += letts
# types.append(('rtype', RecordType()))
# types.append(('btype', BooleanType()))
# types.append(('ctype_n', ConstantType(QuantityType(value_encoding=np.dtype('int32')))))
# types.append(('ctype_s', ConstantType(QuantityType(value_encoding=np.dtype('S21')))))
# types.append(('crtype', ConstantRangeType(QuantityType(value_encoding=np.dtype('int16')))))
# types.append(('pftype', ParameterFunctionType(NumexprFunction('v*10', 'v*10', ['v'], {'v': 'ctype_n'}))))
# cat = {99:'empty',0:'turkey',1:'duck',2:'chicken'}
# catkeys = cat.keys()
# types.append(('cattype', CategoryType(categories=cat)))
# types.append(('sctype', SparseConstantType(fill_value=-998, value_encoding='int32')))
#
# # Make coverages
# covs = []
# cov_data = []
# for i in xrange(num_covs):
# ii = i + 1
# # Make parameters
# pdict = ParameterDictionary()
# tpc = ParameterContext('time', param_type=QuantityType(value_encoding='float32'))
# tpc.axis = AxisTypeEnum.TIME
# pdict.add_context(tpc)
# for t in types:
# pdict.add_context(ParameterContext(t[0], param_type=t[1], variability=VariabilityEnum.TEMPORAL))
#
# # Make the data
# data_dict = {}
# if sequential_covs:
# tmax = ii * size
# tmin = i * size
# tdata = np.random.random_sample(size) * (tmax - tmin) + tmin
# tdata.sort()
# else:
# tdata = np.random.random_sample(size) * (200 - 0) + 0
# tdata.sort()
# data_dict['time'] = tdata
# data_dict['atype_n'] = [[ii for a in xrange(random.choice(range(1,size)))] for r in xrange(size)]
# data_dict['atype_s'] = [np.random.bytes(np.random.randint(1,20)) for r in xrange(size)]
# data_dict['qtype'] = np.random.random_sample(size) * (50 - 10) + 10
# data_dict['rtype'] = [{letts[r]: letts[r:]} for r in xrange(size)]
# data_dict['btype'] = [random.choice([True, False]) for r in xrange(size)]
# data_dict['ctype_n'] = [ii*20] * size
# data_dict['ctype_s'] = ['const_str_{0}'.format(i)] * size
# crarr = np.empty(size, dtype=object)
# crarr[:] = [(ii*10, ii*20)]
# data_dict['crtype'] = crarr
# # data_dict['pftype'] # Calculated on demand, nothing assigned!!
# data_dict['cattype'] = [random.choice(catkeys) for r in xrange(size)]
# data_dict['sctype'] = [ii*30] * size
#
# # Create the coverage
# covs.append(_make_cov(self.working_dir, pdict, nt=size, data_dict=data_dict))
#
# # Now add values for pftype, for later comparison
# data_dict['pftype'] = [x*10 for x in data_dict['ctype_n']]
# # And update the values for cattype
# data_dict['cattype'] = [cat[k] for k in data_dict['cattype']]
#
# # Add the data_dict to the cov_data list
# cov_data.append(data_dict)
#
# return covs, cov_data
#
# def test_temporal_aggregation_all_param_types(self):
# size = 10
#
# covs, cov_data = self._setup_allparams(size=size)
#
# comp_cov = AggregateCoverage(self.working_dir, create_guid(), 'sample temporal aggregation coverage',
# reference_coverage_locs=covs,
# complex_type=ComplexCoverageType.TEMPORAL_AGGREGATION)
#
# for p in comp_cov.list_parameters():
# for i in xrange(len(covs)):
# ddict = cov_data[i]
# if p in ['qtype', 'time']:
# self.assertTrue(np.allclose(comp_cov.get_parameter_values(p, slice(i*size, (i+1)*size)), ddict[p]))
# elif p == 'ctype_s':
# self.assertTrue(np.atleast_1d(comp_cov.get_parameter_values(p, slice(i*size, (i+1)*size)) == ddict[p]).all())
# else:
# self.assertTrue(np.array_equal(comp_cov.get_parameter_values(p, slice(i*size, (i+1)*size)), ddict[p]))
#
# def test_temporal_interleaved(self):
# num_times = 200
# tpc = num_times / 2
#
# first_times = np.random.random_sample(tpc) * (20 - 0) + 0
# # first_times = np.array([0,1,2,5,6,10,11,13,14,16], dtype='float32')
# first_times.sort()
# first_data = np.arange(tpc, dtype='float32') * 0.2
# first_full = np.random.random_sample(tpc) * (80 - 60) + 60
#
# second_times = np.random.random_sample(tpc) * (20 - 0) + 0
# # second_times = np.array([3,4,7,8,9,12,15,17,18,19], dtype='float32')
# second_times.sort()
# second_data = np.random.random_sample(tpc) * (50 - 10) + 10
# second_full = np.random.random_sample(tpc) * (80 - 60) + 60
#
# log.debug('\nCov A info:\n%s\n%s\n%s\n---------', first_times, first_data, first_full)
# log.debug('\nCov B info:\n%s\n%s\n%s\n---------', second_times, second_data, second_full)
#
# cova_pth = _make_cov(self.working_dir,
# ['first_param', 'full_param'], nt=tpc,
# data_dict={'time': first_times, 'first_param': first_data, 'full_param': first_full})
# covb_pth = _make_cov(self.working_dir,
# ['second_param', 'full_param'], nt=tpc,
# data_dict={'time': second_times, 'second_param': second_data, 'full_param': second_full})
#
# # Instantiate the AggregateCoverage
# ccov = AggregateCoverage(self.working_dir, create_guid(), 'sample complex coverage',
# reference_coverage_locs=[cova_pth, covb_pth],
# complex_type=ComplexCoverageType.TEMPORAL_INTERLEAVED)
#
# self.assertEqual(ccov.list_parameters(), ['first_param', 'full_param', 'second_param', 'time'])
#
# self.assertEqual(ccov.temporal_parameter_name, 'time')
#
# time_interleave = np.append(first_times, second_times)
# sort_i = np.argsort(time_interleave)
# self.assertTrue(np.allclose(ccov.get_time_values(), time_interleave[sort_i]))
#
# full_interleave = np.append(first_full, second_full)
# self.assertTrue(np.allclose(ccov.get_parameter_values('full_param'), full_interleave[sort_i]))
#
# first_interleave = np.empty((num_times,))
# first_interleave.fill(ccov.get_parameter_context('first_param').fill_value)
# first_interleave[:tpc] = first_data
# self.assertTrue(np.allclose(ccov.get_parameter_values('first_param'), first_interleave[sort_i]))
#
# second_interleave = np.empty((num_times,))
# second_interleave.fill(ccov.get_parameter_context('second_param').fill_value)
# second_interleave[tpc:] = second_data
# self.assertTrue(np.allclose(ccov.get_parameter_values('second_param'), second_interleave[sort_i]))
#
# def test_temporal_interleaved_all_param_types(self):
# size = 10
# covs, cov_data = self._setup_allparams(size=size, num_covs=5, sequential_covs=False)
#
# comp_cov = AggregateCoverage(self.working_dir, create_guid(), 'sample temporal aggregation coverage',
# reference_coverage_locs=covs,
# complex_type=ComplexCoverageType.TEMPORAL_INTERLEAVED)
#
# time_interleave = np.empty(0)
# for c in cov_data:
# time_interleave = np.append(time_interleave, c['time'])
# sort_i = np.argsort(time_interleave)
#
# self.assertTrue(np.allclose(comp_cov.get_time_values(), time_interleave[sort_i]))
#
# def test_temporal_broadcast(self):
# first_num_times = 14
# first_time_data= np.arange(first_num_times, dtype='int64')
# first_data = utils.get_random_sample(first_num_times, 10, 20)
# first_common_data = utils.get_random_sample(first_num_times, 20, 40)
#
# second_num_times = 4
# second_time_data = np.array([0, 4, 11, 12], dtype='int64')
# second_data = utils.get_random_sample(second_num_times, 40, 50)
# second_common_data = utils.get_random_sample(second_num_times, 20, 40)
#
# third_num_times = 5
# third_time_data = np.array([3, 6, 8, 15, 19], dtype='int64')
# third_data = utils.get_random_sample(third_num_times, 70, 80)
# third_common_data = utils.get_random_sample(third_num_times, 20, 40)
#
#
# cova_pth = _make_cov(self.working_dir, ['A', 'Common'], data_dict={'A': first_data, 'Common': first_common_data, 'time': first_time_data}, nt=first_num_times)
# covb_pth = _make_cov(self.working_dir, ['B', 'Common'], data_dict={'B': second_data, 'Common': second_common_data, 'time': second_time_data}, nt=second_num_times)
# covc_pth = _make_cov(self.working_dir, ['C', 'Common'], data_dict={'C': third_data, 'Common': third_common_data, 'time': third_time_data}, nt=third_num_times)
#
# cov = AggregateCoverage(self.working_dir, create_guid(), 'sample temporal broadcast coverage',
# reference_coverage_locs=[cova_pth, covb_pth, covc_pth],
# complex_type=ComplexCoverageType.TEMPORAL_BROADCAST)
#
# a_data = first_data
# b_data = np.empty(first_num_times, dtype='float32')
# b_data[:4] = second_data[0]
# b_data[4:11] = second_data[1]
# b_data[11:12] = second_data[2]
# b_data[12:] = second_data[3]
#
# c_data = np.empty(first_num_times, dtype='float32')
# c_data[0:3] = cov.get_parameter_context('B').param_type.fill_value
# c_data[3:6] = third_data[0]
# c_data[6:8] = third_data[1]
# c_data[8:] = third_data[2]
#
# self.assertTrue(np.allclose(cov.get_parameter_values('A'), a_data))
# self.assertTrue(np.allclose(cov.get_parameter_values('B'), b_data))
# self.assertTrue(np.allclose(cov.get_parameter_values('C'), c_data))
#
# bnds = cov.get_data_bounds()
# self.assertEqual(bnds['A'], (a_data.min(), a_data.max()))
# self.assertEqual(bnds['B'], (b_data.min(), b_data.max()))
# self.assertEqual(bnds['C'], (c_data[3:].min(), c_data.max()))
#
# def test_append_reference_coverage(self):
# size = 100000
# first_times = np.arange(0, size, dtype='float32')
# first_data = np.arange(size, size*2, dtype='float32')
#
# second_times = np.arange(size, size*2, dtype='float32')
# second_data = np.arange(size*4, size*5, dtype='float32')
#
# third_times = np.arange(size*2, size*3, dtype='float32')
# third_data = np.arange(size*7, size*8, dtype='float32')
#
# cova_pth = _make_cov(self.working_dir, ['data_all', 'data_a'], nt=size,
# data_dict={'time': first_times, 'data_all': first_data, 'data_a': first_data})
# covb_pth = _make_cov(self.working_dir, ['data_all', 'data_b'], nt=size,
# data_dict={'time': second_times, 'data_all': second_data, 'data_b': second_data})
# covc_pth = _make_cov(self.working_dir, ['data_all', 'data_c'], nt=size,
# data_dict={'time': third_times, 'data_all': third_data, 'data_c': third_data})
#
# comp_cov = AggregateCoverage(self.working_dir, create_guid(), 'sample temporal aggregation coverage',
# reference_coverage_locs=[cova_pth, covb_pth],
# complex_type=ComplexCoverageType.TEMPORAL_AGGREGATION)
#
# # Verify stuff worked normally...
# tvals = comp_cov.get_time_values()
# self.assertTrue(np.array_equal(tvals, np.arange(2*size, dtype='float32')))
#
# # Append the new coverage
# comp_cov.append_reference_coverage(covc_pth)
#
# # Now make sure the new data is there!
# tvals = comp_cov.get_time_values()
# self.assertTrue(np.array_equal(tvals, np.arange(3*size, dtype='float32')))
#
# def test_head_coverage_path(self):
# size = 10
# first_times = np.arange(0, size, dtype='float32')
# first_data = np.arange(size, size*2, dtype='float32')
#
# second_times = np.arange(size, size*2, dtype='float32')
# second_data = np.arange(size*4, size*5, dtype='float32')
#
# cova_pth = _make_cov(self.working_dir, ['data_all', 'data_a'], nt=size,
# data_dict={'time': first_times, 'data_all': first_data, 'data_a': first_data})
# covb_pth = _make_cov(self.working_dir, ['data_all', 'data_b'], nt=size,
# data_dict={'time': second_times, 'data_all': second_data, 'data_b': second_data})
#
# comp_cov = AggregateCoverage(self.working_dir, create_guid(), 'sample temporal aggregation coverage',
# reference_coverage_locs=[cova_pth, covb_pth],
# complex_type=ComplexCoverageType.TEMPORAL_AGGREGATION)
#
# if comp_cov._persistence_layer.master_manager.storage_type() != 'db':
# # TODO: correct this once ViewCoverage is worked out
# # View coverage construction doesn't work for DB-based metadata. View Coverage will be modified in the future
# self.assertTrue(True)
# else:
# comp_cov2 = AggregateCoverage(self.working_dir, create_guid(), 'sample temporal aggregation coverage',
# reference_coverage_locs=[cova_pth],
# complex_type=ComplexCoverageType.TEMPORAL_AGGREGATION)
#
# comp_cov3 = AggregateCoverage(self.working_dir, create_guid(), 'sample temporal broadcast coverage',
# reference_coverage_locs=[comp_cov2.persistence_dir, covb_pth],
# complex_type=ComplexCoverageType.TEMPORAL_BROADCAST)
#
# # Ensure the correct path is returned from AggregateCoverage.head_coverage_path in CC --> [SC & SC] scenario
# self.assertEqual(comp_cov.head_coverage_path, covb_pth)
#
# # Ensure the correct path is returned from AggregateCoverage.head_coverage_path in CC --> [SC & VC] scenario
# self.assertEqual(comp_cov2.head_coverage_path, cova_pth)
# self.assertEqual(comp_cov3.head_coverage_path, covb_pth)
#
# # Ensure the correct path is returned from AggregateCoverage.head_coverage_path in CC --> [SC & CC --> [VC & SC]] scenario
# self.assertEqual(comp_cov3.head_coverage_path, covb_pth)
#
# def make_timeseries_cov(self):
# cova_pth = _make_cov(self.working_dir, ['value_set'], data_dict={'time': np.arange(10,20),'value_set':np.ones(10)})
# cov = AbstractCoverage.load(cova_pth)
# pdict = cov.parameter_dictionary
#
# ccov = AggregateCoverage(self.working_dir, create_guid(), 'complex coverage',
# reference_coverage_locs=[],
# parameter_dictionary=pdict,
# complex_type=ComplexCoverageType.TIMESERIES)
# return ccov
#
# def test_striding(self):
# pass
#
# def test_get_all_parameters(self):
# size = 10
# first_times = np.arange(0, size, dtype='float32')
# first_data = np.arange(size, size*2, dtype='float32')
#
# second_times = np.arange(size, size*2, dtype='float32')
# second_data = np.arange(size*4, size*5, dtype='float32')
#
# cova_pth = _make_cov(self.working_dir, ['data_all', 'data_a'], nt=size,
# data_dict={'time': first_times, 'data_all': first_data, 'data_a': first_data})
# covb_pth = _make_cov(self.working_dir, ['data_all', 'data_b'], nt=size,
# data_dict={'time': second_times, 'data_all': second_data, 'data_b': second_data})
#
# comp_cov = AggregateCoverage(self.working_dir, create_guid(), 'sample temporal aggregation coverage',
# reference_coverage_locs=[cova_pth, covb_pth],
# complex_type=ComplexCoverageType.TEMPORAL_AGGREGATION)
#
# pvals = comp_cov.get_parameter_values().get_data()
# expected_data_all = np.empty(size*2)
# expected_data_all[0:size] = first_data
# expected_data_all[size:size*2] = second_data
# np.testing.assert_array_equal(expected_data_all, pvals['data_all'])
# expected_times = np.empty(size*2)
# expected_times[0:size] = first_times
# expected_times[size:size*2] = second_times
# np.testing.assert_array_equal(expected_times, pvals['time'])
# expected_data_a = np.empty(size*2)
# expected_data_a[0:size] = first_data
# expected_data_a[size:size*2] = np.NaN
# np.testing.assert_array_equal(expected_data_a, pvals['data_a'])
# expected_data_b = np.empty(size*2)
# expected_data_b[0:size] = np.NaN
# expected_data_b[size:size*2] = second_data
# np.testing.assert_array_equal(expected_data_b, pvals['data_b'])
#
# def test_get_some_parameters(self):
# size = 10
# first_times = np.arange(0, size, dtype='float32')
# first_data = np.arange(size, size*2, dtype='float32')
#
# second_times = np.arange(size, size*2, dtype='float32')
# second_data = np.arange(size*4, size*5, dtype='float32')
#
# cova_pth = _make_cov(self.working_dir, ['data_all', 'data_a'], nt=size,
# data_dict={'time': first_times, 'data_all': first_data, 'data_a': first_data})
# covb_pth = _make_cov(self.working_dir, ['data_all', 'data_b'], nt=size,
# data_dict={'time': second_times, 'data_all': second_data, 'data_b': second_data})
#
# comp_cov = AggregateCoverage(self.working_dir, create_guid(), 'sample temporal aggregation coverage',
# reference_coverage_locs=[cova_pth, covb_pth],
# complex_type=ComplexCoverageType.TEMPORAL_AGGREGATION)
#
# pvals = comp_cov.get_parameter_values('data_a').get_data()
# np.testing.assert_array_equal(first_times, pvals['time'])
# np.testing.assert_array_equal(first_data, pvals['data_a'])
#
# pvals = comp_cov.get_parameter_values('data_b').get_data()
# np.testing.assert_array_equal(second_times, pvals['time'])
# np.testing.assert_array_equal(second_data, pvals['data_b'])
#
# pvals = comp_cov.get_parameter_values(['data_b', 'data_all']).get_data()
# expected_data_all = np.empty(size*2)
# expected_data_all[0:size] = first_data
# expected_data_all[size:size*2] = second_data
# np.testing.assert_array_equal(expected_data_all, pvals['data_all'])
# expected_times = np.empty(size*2)
# expected_times[0:size] = first_times
# expected_times[size:size*2] = second_times
# np.testing.assert_array_equal(expected_times, pvals['time'])
# expected_data_b = np.empty(size*2)
# expected_data_b[0:size] = np.NaN
# expected_data_b[size:size*2] = second_data
# np.testing.assert_array_equal(expected_data_b, pvals['data_b'])
def create_all_params():
'''
[
'density',
'time',
'lon',
'tempwat_l1',
'tempwat_l0',
'condwat_l1',
'condwat_l0',
'preswat_l1',
'preswat_l0',
'lat',
'pracsal'
]
@return:
'''
contexts = {}
t_ctxt = ParameterContext('time', param_type=QuantityType(value_encoding=np.dtype('int64')))
t_ctxt.axis = AxisTypeEnum.TIME
t_ctxt.uom = 'seconds since 01-01-1900'
contexts['time'] = t_ctxt
lat_ctxt = ParameterContext('lat', param_type=ConstantType(QuantityType(value_encoding=np.dtype('float32'))), fill_value=-9999)
lat_ctxt.axis = AxisTypeEnum.LAT
lat_ctxt.uom = 'degree_north'
contexts['lat'] = lat_ctxt
lon_ctxt = ParameterContext('lon', param_type=ConstantType(QuantityType(value_encoding=np.dtype('float32'))), fill_value=-9999)
lon_ctxt.axis = AxisTypeEnum.LON
lon_ctxt.uom = 'degree_east'
contexts['lon'] = lon_ctxt
p_range_ctxt = ParameterContext('p_range', param_type=ConstantType(QuantityType(value_encoding=np.dtype('float32'))), fill_value=-9999)
p_range_ctxt.uom = '1'
contexts['p_range'] = p_range_ctxt
# Independent Parameters
# Temperature - values expected to be the decimal results of conversion from hex
temp_ctxt = ParameterContext('temperature', param_type=QuantityType(value_encoding=np.dtype('float32')), fill_value=-9999)
temp_ctxt.uom = 'counts'
contexts['temperature'] = temp_ctxt
# Conductivity - values expected to be the decimal results of conversion from hex
cond_ctxt = ParameterContext('conductivity', param_type=QuantityType(value_encoding=np.dtype('float32')), fill_value=-9999)
cond_ctxt.uom = 'counts'
contexts['conductivity'] = cond_ctxt
# Pressure - values expected to be the decimal results of conversion from hex
press_ctxt = ParameterContext('pressure', param_type=QuantityType(value_encoding=np.dtype('float32')), fill_value=-9999)
press_ctxt.uom = 'counts'
contexts['pressure'] = press_ctxt
# Dependent Parameters
# tempwat_l1 = (tempwat_l0 / 10000) - 10
tl1_func = '(T / 10000) - 10'
tl1_pmap = {'T': 'temperature'}
expr = NumexprFunction('seawater_temperature', tl1_func, ['T'], param_map=tl1_pmap)
tempL1_ctxt = ParameterContext('seawater_temperature', param_type=ParameterFunctionType(function=expr), variability=VariabilityEnum.TEMPORAL)
tempL1_ctxt.uom = 'deg_C'
contexts['seawater_temperature'] = tempL1_ctxt
# condwat_l1 = (condwat_l0 / 100000) - 0.5
cl1_func = '(C / 100000) - 0.5'
cl1_pmap = {'C': 'conductivity'}
expr = NumexprFunction('seawater_conductivity', cl1_func, ['C'], param_map=cl1_pmap)
condL1_ctxt = ParameterContext('seawater_conductivity', param_type=ParameterFunctionType(function=expr), variability=VariabilityEnum.TEMPORAL)
condL1_ctxt.uom = 'S m-1'
contexts['seawater_conductivity'] = condL1_ctxt
# Equation uses p_range, which is a calibration coefficient - Fixing to 679.34040721
# preswat_l1 = (preswat_l0 * p_range / (0.85 * 65536)) - (0.05 * p_range)
pl1_func = '(P * p_range / (0.85 * 65536)) - (0.05 * p_range)'
pl1_pmap = {'P': 'pressure', 'p_range': 'p_range'}
expr = NumexprFunction('seawter_pressure', pl1_func, ['P', 'p_range'], param_map=pl1_pmap)
presL1_ctxt = ParameterContext('seawater_pressure', param_type=ParameterFunctionType(function=expr), variability=VariabilityEnum.TEMPORAL)
presL1_ctxt.uom = 'S m-1'
contexts['seawater_pressure'] = presL1_ctxt
# Density & practical salinity calucluated using the Gibbs Seawater library - available via python-gsw project:
# https://code.google.com/p/python-gsw/ & http://pypi.python.org/pypi/gsw/3.0.1
# pracsal = gsw.SP_from_C((condwat_l1 * 10), tempwat_l1, preswat_l1)
owner = 'ion_functions.data.ctd_functions'
sal_func = 'ctd_pracsal'
sal_arglist = ['C', 't', 'p']
sal_pmap = {'C': 'seawater_conductivity', 't': 'seawater_temperature', 'p': 'seawater_pressure'}
sal_kwargmap = None
expr = PythonFunction('pracsal', owner, sal_func, sal_arglist, sal_kwargmap, sal_pmap)
sal_ctxt = ParameterContext('pracsal', param_type=ParameterFunctionType(expr), variability=VariabilityEnum.TEMPORAL)
sal_ctxt.uom = 'g kg-1'
contexts['pracsal'] = sal_ctxt
# absolute_salinity = gsw.SA_from_SP(pracsal, preswat_l1, longitude, latitude)
# conservative_temperature = gsw.CT_from_t(absolute_salinity, tempwat_l1, preswat_l1)
# density = gsw.rho(absolute_salinity, conservative_temperature, preswat_l1)
owner = 'ion_functions.data.ctd_functions'
dens_func = 'ctd_density'
dens_arglist = ['SP', 't', 'p', 'lat', 'lon']
dens_pmap = {'SP':'pracsal', 't':'seawater_temperature', 'p':'seawater_pressure', 'lat':'lat', 'lon':'lon'}
dens_expr = PythonFunction('density', owner, dens_func, dens_arglist, None, dens_pmap)
dens_ctxt = ParameterContext('density', param_type=ParameterFunctionType(dens_expr), variability=VariabilityEnum.TEMPORAL)
dens_ctxt.uom = 'kg m-3'
contexts['density'] = dens_ctxt
return contexts
|
|
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2012-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferUI
QtCore = GafferUI._qtImport( "QtCore" )
QtGui = GafferUI._qtImport( "QtGui" )
## This Widget simply displays an IECore.Spline object. For representation and editing
# of SplinePlugs use a SplineEditor instead.
class SplineWidget( GafferUI.Widget ) :
DrawMode = IECore.Enum.create( "Invalid", "Ramp", "Splines" )
def __init__( self, spline=None, drawMode=DrawMode.Splines, **kw ) :
# using QFrame rather than QWidget because it supports computing the contentsRect() based on
# the stylesheet.
GafferUI.Widget.__init__( self, QtGui.QFrame(), **kw )
self._qtWidget().setSizePolicy( QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding )
self._qtWidget().setObjectName( "gafferSplineWidget" )
self.setDrawMode( drawMode )
if spline==None :
spline = IECore.Splineff(
IECore.CubicBasisf.catmullRom(),
(
( 0, 0 ),
( 0, 0 ),
( 1, 1 ),
( 1, 1 ),
)
)
self.setSpline( spline )
self.__displayTransformChangedConnection = GafferUI.DisplayTransform.changedSignal().connect( Gaffer.WeakMethod( self.__displayTransformChanged ) )
self._qtWidget().paintEvent = Gaffer.WeakMethod( self.__paintEvent )
def setSpline( self, spline ) :
try :
if spline==self.__spline :
return
except :
pass
self.__spline = spline
self.__splinesToDraw = None
self.__gradientToDraw = None
self._qtWidget().update()
def getSpline( self ) :
return self.__spline
def setDrawMode( self, drawMode ) :
try :
if drawMode==self.__drawMode :
return
except :
pass
self.__drawMode = drawMode
self._qtWidget().update()
def getDrawMode( self ) :
return self.__drawMode
def __paintEvent( self, event ) :
painter = QtGui.QPainter( self._qtWidget() )
painter.setRenderHint( QtGui.QPainter.Antialiasing )
o = QtGui.QStyleOption()
o.initFrom( self._qtWidget() )
self._qtWidget().style().drawPrimitive( QtGui.QStyle.PE_Widget, o, painter, self._qtWidget() )
if self.__drawMode == self.DrawMode.Ramp :
self.__paintRamp( painter )
elif self.__drawMode == self.DrawMode.Splines :
self.__paintSplines( painter )
def __paintRamp( self, painter ) :
if self.__gradientToDraw is None :
self.__gradientToDraw = QtGui.QLinearGradient( 0, 0, 1, 0 )
self.__gradientToDraw.setCoordinateMode( self.__gradientToDraw.ObjectBoundingMode )
displayTransform = GafferUI.DisplayTransform.get()
numStops = 500
for i in range( 0, numStops ) :
t = float( i ) / ( numStops - 1 )
c = self.__spline( t )
if isinstance( c, float ) :
c = IECore.Color3f( c, c, c )
else :
c = IECore.Color3f( c[0], c[1], c[2] )
c = displayTransform( c )
self.__gradientToDraw.setColorAt( t, self._qtColor( c ) )
brush = QtGui.QBrush( self.__gradientToDraw )
rect = self._qtWidget().contentsRect()
painter.fillRect( rect, brush )
def __paintSplines( self, painter ) :
# update the evaluation of our splines if necessary
numPoints = 50
if not self.__splinesToDraw :
self.__splinesToDraw = []
interval = self.__spline.interval()
if isinstance( self.__spline, IECore.Splineff ) :
spline = IECore.Struct()
spline.color = IECore.Color3f( 1 )
spline.path = QtGui.QPainterPath()
for i in range( 0, numPoints ) :
t = float( i ) / ( numPoints - 1 )
tt = interval[0] + (interval[1] - interval[0]) * t
c = self.__spline( tt )
if i==0 :
spline.path.moveTo( t, c )
else :
spline.path.lineTo( t, c )
self.__splinesToDraw.append( spline )
else :
for i in range( 0, self.__spline( 0 ).dimensions() ) :
spline = IECore.Struct()
if i==3 :
spline.color = IECore.Color3f( 1 )
else :
c = IECore.Color3f( 0 )
c[i] = 1
spline.color = c
spline.path = QtGui.QPainterPath()
self.__splinesToDraw.append( spline )
for i in range( 0, numPoints ) :
t = float( i ) / ( numPoints - 1 )
tt = interval[0] + (interval[1] - interval[0]) * t
c = self.__spline( tt )
for j in range( 0, c.dimensions() ) :
if i == 0 :
self.__splinesToDraw[j].path.moveTo( t, c[j] )
else :
self.__splinesToDraw[j].path.lineTo( t, c[j] )
self.__splineBound = QtCore.QRectF()
for s in self.__splinesToDraw :
self.__splineBound = self.__splineBound.united( s.path.controlPointRect() )
# draw the splines
rect = self._qtWidget().contentsRect()
transform = QtGui.QTransform()
if self.__splineBound.width() :
transform.translate( rect.x(), 0 )
transform.scale( rect.width() / self.__splineBound.width(), 1 )
if self.__splineBound.height() :
transform.translate( 0, rect.y() + rect.height() )
transform.scale( 1, -rect.height() / self.__splineBound.height() )
painter.setTransform( transform )
for s in self.__splinesToDraw :
pen = QtGui.QPen( self._qtColor( s.color ) )
painter.setPen( pen )
painter.drawPath( s.path )
def __displayTransformChanged( self ) :
self._qtWidget().update()
|
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""General functions which are useful throughout this project."""
import json
import logging
import os
import re
import time
import urllib
from apiclient import discovery
from apiclient import errors
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.api import urlfetch_errors
from google.appengine.api import users
from google.appengine.ext import ndb
import httplib2
from oauth2client import client
from dashboard.common import stored_object
SHERIFF_DOMAINS_KEY = 'sheriff_domains_key'
IP_WHITELIST_KEY = 'ip_whitelist'
SERVICE_ACCOUNT_KEY = 'service_account'
EMAIL_SCOPE = 'https://www.googleapis.com/auth/userinfo.email'
_PROJECT_ID_KEY = 'project_id'
_DEFAULT_CUSTOM_METRIC_VAL = 1
def _GetNowRfc3339():
"""Returns the current time formatted per RFC 3339."""
return time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
def TickMonitoringCustomMetric(metric_name):
"""Increments the stackdriver custom metric with the given name.
This is used for cron job monitoring; if these metrics stop being received
an alert mail is sent. For more information on custom metrics, see
https://cloud.google.com/monitoring/custom-metrics/using-custom-metrics
Args:
metric_name: The name of the metric being monitored.
"""
credentials = client.GoogleCredentials.get_application_default()
monitoring = discovery.build(
'monitoring', 'v3', credentials=credentials)
now = _GetNowRfc3339()
project_id = stored_object.Get(_PROJECT_ID_KEY)
points = [{
'interval': {
'startTime': now,
'endTime': now,
},
'value': {
'int64Value': _DEFAULT_CUSTOM_METRIC_VAL,
},
}]
write_request = monitoring.projects().timeSeries().create(
name='projects/%s' %project_id,
body={'timeSeries': [{
'metric': {
'type': 'custom.googleapis.com/%s' % metric_name,
},
'points': points
}]})
write_request.execute()
def TestPath(key):
"""Returns the test path for a TestMetadata from an ndb.Key.
A "test path" is just a convenient string representation of an ndb.Key.
Each test path corresponds to one ndb.Key, which can be used to get an
entity.
Args:
key: An ndb.Key where all IDs are string IDs.
Returns:
A test path string.
"""
if key.kind() == 'Test':
# The Test key looks like ('Master', 'name', 'Bot', 'name', 'Test' 'name'..)
# Pull out every other entry and join with '/' to form the path.
return '/'.join(key.flat()[1::2])
assert key.kind() == 'TestMetadata' or key.kind() == 'TestContainer'
return key.id()
def TestSuiteName(test_key):
"""Returns the test suite name for a given TestMetadata key."""
assert test_key.kind() == 'TestMetadata'
parts = test_key.id().split('/')
return parts[2]
def TestKey(test_path):
"""Returns the ndb.Key that corresponds to a test path."""
if test_path is None:
return None
path_parts = test_path.split('/')
if path_parts is None:
return None
if len(path_parts) < 3:
key_list = [('Master', path_parts[0])]
if len(path_parts) > 1:
key_list += [('Bot', path_parts[1])]
return ndb.Key(pairs=key_list)
return ndb.Key('TestMetadata', test_path)
def TestMetadataKey(key_or_string):
"""Convert the given (Test or TestMetadata) key or test_path string to a
TestMetadata key.
We are in the process of converting from Test entities to TestMetadata.
Unfortunately, we haver trillions of Row entities which have a parent_test
property set to a Test, and it's not possible to migrate them all. So we
use the Test key in Row queries, and convert between the old and new format.
Note that the Test entities which the keys refer to may be deleted; the
queries over keys still work.
"""
if key_or_string is None:
return None
if isinstance(key_or_string, basestring):
return ndb.Key('TestMetadata', key_or_string)
if key_or_string.kind() == 'TestMetadata':
return key_or_string
if key_or_string.kind() == 'Test':
return ndb.Key('TestMetadata', TestPath(key_or_string))
def OldStyleTestKey(key_or_string):
"""Get the key for the old style Test entity corresponding to this key or
test_path.
We are in the process of converting from Test entities to TestMetadata.
Unfortunately, we haver trillions of Row entities which have a parent_test
property set to a Test, and it's not possible to migrate them all. So we
use the Test key in Row queries, and convert between the old and new format.
Note that the Test entities which the keys refer to may be deleted; the
queries over keys still work.
"""
if key_or_string is None:
return None
elif isinstance(key_or_string, ndb.Key) and key_or_string.kind() == 'Test':
return key_or_string
if (isinstance(key_or_string, ndb.Key) and
key_or_string.kind() == 'TestMetadata'):
key_or_string = key_or_string.id()
assert isinstance(key_or_string, basestring)
path_parts = key_or_string.split('/')
key_parts = ['Master', path_parts[0], 'Bot', path_parts[1]]
for part in path_parts[2:]:
key_parts += ['Test', part]
return ndb.Key(*key_parts)
def MostSpecificMatchingPattern(test, pattern_data_tuples):
"""Takes a test and a list of (pattern, data) tuples and returns the data
for the pattern which most closely matches the test. It does this by
ordering the matching patterns, and choosing the one with the most specific
top level match.
For example, if there was a test Master/Bot/Foo/Bar, then:
*/*/*/Bar would match more closely than */*/*/*
*/*/*/Bar would match more closely than */*/*/Bar.*
*/*/*/Bar.* would match more closely than */*/*/*
"""
matching_patterns = []
for p, v in pattern_data_tuples:
if not TestMatchesPattern(test, p):
continue
matching_patterns.append([p, v])
if not matching_patterns:
return None
if type(test) is ndb.Key:
test_path = TestPath(test)
else:
test_path = test.test_path
test_path_parts = test_path.split('/')
# This ensures the ordering puts the closest match at index 0
def CmpPatterns(a, b):
a_parts = a[0].split('/')
b_parts = b[0].split('/')
for a_part, b_part, test_part in reversed(
zip(a_parts, b_parts, test_path_parts)):
# We favour a specific match over a partial match, and a partial
# match over a catch-all * match.
if a_part == b_part:
continue
if a_part == test_part:
return -1
if b_part == test_part:
return 1
if a_part != '*':
return -1
if b_part != '*':
return 1
return 0
matching_patterns.sort(cmp=CmpPatterns)
return matching_patterns[0][1]
def TestMatchesPattern(test, pattern):
"""Checks whether a test matches a test path pattern.
Args:
test: A TestMetadata entity or a TestMetadata key.
pattern: A test path which can include wildcard characters (*).
Returns:
True if it matches, False otherwise.
"""
if not test:
return False
if type(test) is ndb.Key:
test_path = TestPath(test)
else:
test_path = test.test_path
test_path_parts = test_path.split('/')
pattern_parts = pattern.split('/')
if len(test_path_parts) != len(pattern_parts):
return False
for test_path_part, pattern_part in zip(test_path_parts, pattern_parts):
if not _MatchesPatternPart(pattern_part, test_path_part):
return False
return True
def _MatchesPatternPart(pattern_part, test_path_part):
"""Checks whether a pattern (possibly with a *) matches the given string.
Args:
pattern_part: A string which may contain a wildcard (*).
test_path_part: Another string.
Returns:
True if it matches, False otherwise.
"""
if pattern_part == '*' or pattern_part == test_path_part:
return True
if '*' not in pattern_part:
return False
# Escape any other special non-alphanumeric characters.
pattern_part = re.escape(pattern_part)
# There are not supposed to be any other asterisk characters, so all
# occurrences of backslash-asterisk can now be replaced with dot-asterisk.
re_pattern = re.compile('^' + pattern_part.replace('\\*', '.*') + '$')
return re_pattern.match(test_path_part)
def TimestampMilliseconds(datetime):
"""Returns the number of milliseconds since the epoch."""
return int(time.mktime(datetime.timetuple()) * 1000)
def GetTestContainerKey(test):
"""Gets the TestContainer key for the given TestMetadata.
Args:
test: Either a TestMetadata entity or its ndb.Key.
Returns:
ndb.Key('TestContainer', test path)
"""
test_path = None
if type(test) is ndb.Key:
test_path = TestPath(test)
else:
test_path = test.test_path
return ndb.Key('TestContainer', test_path)
def GetMulti(keys):
"""Gets a list of entities from a list of keys.
If this user is logged in, this is the same as ndb.get_multi. However, if the
user is logged out and any of the data is internal only, an AssertionError
will be raised.
Args:
keys: A list of ndb entity keys.
Returns:
A list of entities, but no internal_only ones if the user is not logged in.
"""
if IsInternalUser():
return ndb.get_multi(keys)
# Not logged in. Check each key individually.
entities = []
for key in keys:
try:
entities.append(key.get())
except AssertionError:
continue
return entities
def MinimumAlertRange(alerts):
"""Returns the intersection of the revision ranges for a set of alerts.
Args:
alerts: An iterable of Alerts.
Returns:
A pair (start, end) if there is a valid minimum range,
or None if the ranges are not overlapping.
"""
ranges = [(a.start_revision, a.end_revision) for a in alerts if a]
return MinimumRange(ranges)
def MinimumRange(ranges):
"""Returns the intersection of the given ranges, or None."""
if not ranges:
return None
starts, ends = zip(*ranges)
start, end = (max(starts), min(ends))
if start > end:
return None
return start, end
def IsInternalUser():
"""Checks whether the user should be able to see internal-only data."""
username = users.get_current_user()
if not username:
return False
cached = GetCachedIsInternalUser(username)
if cached is not None:
return cached
is_internal_user = IsGroupMember(identity=username, group='chromeperf-access')
SetCachedIsInternalUser(username, is_internal_user)
return is_internal_user
def GetCachedIsInternalUser(username):
return memcache.get(_IsInternalUserCacheKey(username))
def SetCachedIsInternalUser(username, value):
memcache.add(_IsInternalUserCacheKey(username), value, time=60*60*24)
def _IsInternalUserCacheKey(username):
return 'is_internal_user_%s' % username
def IsGroupMember(identity, group):
"""Checks if a user is a group member of using chrome-infra-auth.appspot.com.
Args:
identity: User email address.
group: Group name.
Returns:
True if confirmed to be a member, False otherwise.
"""
cached = GetCachedIsGroupMember(identity, group)
if cached is not None:
return cached
try:
discovery_url = ('https://chrome-infra-auth.appspot.com'
'/_ah/api/discovery/v1/apis/{api}/{apiVersion}/rest')
service = discovery.build(
'auth', 'v1', discoveryServiceUrl=discovery_url,
http=ServiceAccountHttp())
request = service.membership(identity=identity, group=group)
response = request.execute()
is_member = response['is_member']
SetCachedIsGroupMember(identity, group, is_member)
return is_member
except (errors.HttpError, KeyError, AttributeError) as e:
logging.error('Failed to check membership of %s: %s', identity, e)
return False
def GetCachedIsGroupMember(identity, group):
return memcache.get(_IsGroupMemberCacheKey(identity, group))
def SetCachedIsGroupMember(identity, group, value):
memcache.add(_IsGroupMemberCacheKey(identity, group), value, time=60*60*24)
def _IsGroupMemberCacheKey(identity, group):
return 'is_group_member_%s_%s' % (identity, group)
def ServiceAccountHttp(*args, **kwargs):
"""Returns the Credentials of the service account if available."""
account_details = stored_object.Get(SERVICE_ACCOUNT_KEY)
if not account_details:
raise KeyError('Service account credentials not found.')
client.logger.setLevel(logging.WARNING)
credentials = client.SignedJwtAssertionCredentials(
service_account_name=account_details['client_email'],
private_key=account_details['private_key'],
scope=EMAIL_SCOPE)
http = httplib2.Http(*args, **kwargs)
credentials.authorize(http)
return http
def IsValidSheriffUser():
"""Checks whether the user should be allowed to triage alerts."""
user = users.get_current_user()
sheriff_domains = stored_object.Get(SHERIFF_DOMAINS_KEY)
if user:
domain_matched = sheriff_domains and any(
user.email().endswith('@' + domain) for domain in sheriff_domains)
return domain_matched or IsGroupMember(
identity=user, group='project-chromium-tryjob-access')
return False
def GetIpWhitelist():
"""Returns a list of IP address strings in the whitelist."""
return stored_object.Get(IP_WHITELIST_KEY)
def BisectConfigPythonString(config):
"""Turns a bisect config dict into a properly formatted Python string.
Args:
config: A bisect config dict (see start_try_job.GetBisectConfig)
Returns:
A config string suitable to store in a TryJob entity.
"""
return 'config = %s\n' % json.dumps(
config, sort_keys=True, indent=2, separators=(',', ': '))
def GetRequestId():
"""Returns the request log ID which can be used to find a specific log."""
return os.environ.get('REQUEST_LOG_ID')
def Validate(expected, actual):
"""Generic validator for expected keys, values, and types.
Values are also considered equal if |actual| can be converted to |expected|'s
type. For instance:
_Validate([3], '3') # Returns True.
See utils_test.py for more examples.
Args:
expected: Either a list of expected values or a dictionary of expected
keys and type. A dictionary can contain a list of expected values.
actual: A value.
"""
def IsValidType(expected, actual):
if type(expected) is type and type(actual) is not expected:
try:
expected(actual)
except ValueError:
return False
return True
def IsInList(expected, actual):
for value in expected:
try:
if type(value)(actual) == value:
return True
except ValueError:
pass
return False
if not expected:
return
expected_type = type(expected)
actual_type = type(actual)
if expected_type is list:
if not IsInList(expected, actual):
raise ValueError('Invalid value. Expected one of the following: '
'%s. Actual: %s.' % (','.join(expected), actual))
elif expected_type is dict:
if actual_type is not dict:
raise ValueError('Invalid type. Expected: %s. Actual: %s.'
% (expected_type, actual_type))
missing = set(expected.keys()) - set(actual.keys())
if missing:
raise ValueError('Missing the following properties: %s'
% ','.join(missing))
for key in expected:
Validate(expected[key], actual[key])
elif not IsValidType(expected, actual):
raise ValueError('Invalid type. Expected: %s. Actual: %s.' %
(expected, actual_type))
def FetchURL(request_url, skip_status_code=False):
"""Wrapper around URL fetch service to make request.
Args:
request_url: URL of request.
skip_status_code: Skips return code check when True, default is False.
Returns:
Response object return by URL fetch, otherwise None when there's an error.
"""
logging.info('URL being fetched: ' + request_url)
try:
response = urlfetch.fetch(request_url)
except urlfetch_errors.DeadlineExceededError:
logging.error('Deadline exceeded error checking %s', request_url)
return None
except urlfetch_errors.DownloadError as err:
# DownloadError is raised to indicate a non-specific failure when there
# was not a 4xx or 5xx status code.
logging.error(err)
return None
if skip_status_code:
return response
elif response.status_code != 200:
logging.error(
'ERROR %s checking %s', response.status_code, request_url)
return None
return response
def GetBuildDetailsFromStdioLink(stdio_link):
no_details = (None, None, None, None, None)
m = re.match(r'\[(.+?)\]\((.+?)\)', stdio_link)
if not m:
# This wasn't the markdown-style link we were expecting.
return no_details
_, link = m.groups()
m = re.match(
r'(https{0,1}://.*/([^\/]*)/builders/)'
r'([^\/]+)/builds/(\d+)/steps/([^\/]+)', link)
if not m:
# This wasn't a buildbot formatted link.
return no_details
base_url, master, bot, buildnumber, step = m.groups()
bot = urllib.unquote(bot)
return base_url, master, bot, buildnumber, step
def GetStdioLinkFromRow(row):
"""Returns the markdown-style buildbot stdio link.
Due to crbug.com/690630, many row entities have this set to "a_a_stdio_uri"
instead of "a_stdio_uri".
"""
return(getattr(row, 'a_stdio_uri', None) or
getattr(row, 'a_a_stdio_uri', None))
def GetBuildbotStatusPageUriFromStdioLink(stdio_link):
base_url, _, bot, buildnumber, _ = GetBuildDetailsFromStdioLink(
stdio_link)
if not base_url:
# Can't parse status page
return None
return '%s%s/builds/%s' % (base_url, urllib.quote(bot), buildnumber)
def GetLogdogLogUriFromStdioLink(stdio_link):
base_url, master, bot, buildnumber, step = GetBuildDetailsFromStdioLink(
stdio_link)
if not base_url:
# Can't parse status page
return None
bot = re.sub(r'[ \(\)]', '_', bot)
s_param = urllib.quote('chrome/bb/%s/%s/%s/+/recipes/steps/%s/0/stdout' % (
master, bot, buildnumber, step), safe='')
return 'https://luci-logdog.appspot.com/v/?s=%s' % s_param
def GetRowKey(testmetadata_key, revision):
test_container_key = GetTestContainerKey(testmetadata_key)
return ndb.Key('Row', revision, parent=test_container_key)
|
|
# coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
# Current Operation Coverage:
# SqlResources: 28/28
import unittest
import azure.mgmt.cosmosdb
from devtools_testutils import AzureMgmtTestCase, RandomNameResourceGroupPreparer, ResourceGroupPreparer
AZURE_LOCATION = 'eastus'
class MgmtCosmosDBTest(AzureMgmtTestCase):
def setUp(self):
super(MgmtCosmosDBTest, self).setUp()
self.mgmt_client = self.create_mgmt_client(
azure.mgmt.cosmosdb.CosmosDBManagementClient
)
@unittest.skip('hard to test')
@ResourceGroupPreparer(location=AZURE_LOCATION)
def test_sql_resource(self, resource_group):
RESOURCE_GROUP = resource_group.name
ACCOUNT_NAME = "myaccountxxyyzzz"
DATABASE_NAME = "myDatabase"
#--------------------------------------------------------------------------
# /DatabaseAccounts/put/CosmosDBDatabaseAccountCreateMin[put]
#--------------------------------------------------------------------------
BODY = {
"location": AZURE_LOCATION,
"kind": "GlobalDocumentDB",
"database_account_offer_type": "Standard",
"locations": [
{
"location_name": "eastus",
"is_zone_redundant": False,
"failover_priority": "0"
}
],
"api_properties": {}
}
result = self.mgmt_client.database_accounts.begin_create_or_update(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, create_update_parameters=BODY)
result = result.result()
#--------------------------------------------------------------------------
# /SqlResources/put/CosmosDBSqlDatabaseCreateUpdate[put]
#--------------------------------------------------------------------------
BODY = {
"location": AZURE_LOCATION,
"resource": {
"id": DATABASE_NAME
},
"options": {
"throughput": "2000"
}
}
result = self.mgmt_client.sql_resources.begin_create_update_sql_database(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME, create_update_sql_database_parameters=BODY)
result = result.result()
#--------------------------------------------------------------------------
# /SqlResources/put/CosmosDBSqlDatabaseThroughputUpdate[put]
#--------------------------------------------------------------------------
BODY = {
"location": AZURE_LOCATION,
"resource": {
"throughput": "400"
}
}
result = self.mgmt_client.sql_resources.begin_update_sql_database_throughput(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME, update_throughput_parameters=BODY)
result = result.result()
#--------------------------------------------------------------------------
# /SqlResources/get/CosmosDBSqlDatabaseThroughputGet[get]
#--------------------------------------------------------------------------
result = self.mgmt_client.sql_resources.get_sql_database_throughput(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME)
#--------------------------------------------------------------------------
# /SqlResources/get/CosmosDBSqlDatabaseGet[get]
#--------------------------------------------------------------------------
result = self.mgmt_client.sql_resources.get_sql_database(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME)
#--------------------------------------------------------------------------
# /SqlResources/get/CosmosDBSqlDatabaseList[get]
#--------------------------------------------------------------------------
result = self.mgmt_client.sql_resources.list_sql_databases(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME)
#--------------------------------------------------------------------------
# /SqlResources/post/CosmosDBSqlDatabaseMigrateToAutoscale[post]
#--------------------------------------------------------------------------
result = self.mgmt_client.sql_resources.begin_migrate_sql_database_to_autoscale(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME)
result = result.result()
#--------------------------------------------------------------------------
# /SqlResources/post/CosmosDBSqlDatabaseMigrateToManualThroughput[post]
#--------------------------------------------------------------------------
result = self.mgmt_client.sql_resources.begin_migrate_sql_database_to_manual_throughput(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME)
result = result.result()
#--------------------------------------------------------------------------
# /SqlResources/delete/CosmosDBSqlDatabaseDelete[delete]
#--------------------------------------------------------------------------
result = self.mgmt_client.sql_resources.begin_delete_sql_database(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME)
result = result.result()
#--------------------------------------------------------------------------
# /DatabaseAccounts/delete/CosmosDBDatabaseAccountDelete[delete]
#--------------------------------------------------------------------------
result = self.mgmt_client.database_accounts.begin_delete(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME)
result = result.result()
@unittest.skip('hard to test')
@ResourceGroupPreparer(location=AZURE_LOCATION)
def test_sql_container(self, resource_group):
RESOURCE_GROUP = resource_group.name
ACCOUNT_NAME = "myaccountxxyyzzz"
DATABASE_NAME = "myDatabase"
CONTAINER_NAME = "myContainer"
#--------------------------------------------------------------------------
# /DatabaseAccounts/put/CosmosDBDatabaseAccountCreateMin[put]
#--------------------------------------------------------------------------
BODY = {
"location": AZURE_LOCATION,
"kind": "GlobalDocumentDB",
"database_account_offer_type": "Standard",
"locations": [
{
"location_name": "eastus",
"is_zone_redundant": False,
"failover_priority": "0"
}
],
"api_properties": {}
}
result = self.mgmt_client.database_accounts.begin_create_or_update(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, create_update_parameters=BODY)
result = result.result()
#--------------------------------------------------------------------------
# /SqlResources/put/CosmosDBSqlDatabaseCreateUpdate[put]
#--------------------------------------------------------------------------
BODY = {
"location": AZURE_LOCATION,
"resource": {
"id": DATABASE_NAME
},
"options": {
"throughput": 1000
}
}
result = self.mgmt_client.sql_resources.begin_create_update_sql_database(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME, create_update_sql_database_parameters=BODY)
result = result.result()
#--------------------------------------------------------------------------
# /SqlResources/put/CosmosDBSqlContainerCreateUpdate[put]
#--------------------------------------------------------------------------
BODY = {
"location": AZURE_LOCATION,
"resource": {
"id": CONTAINER_NAME,
"indexing_policy": {
"indexing_mode": "Consistent",
"automatic": True,
"included_paths": [
{
"path": "/*",
"indexes": [
{
"kind": "Range",
"data_type": "String",
"precision": "-1"
},
{
"kind": "Range",
"data_type": "Number",
"precision": "-1"
}
]
}
],
"excluded_paths": []
},
"partition_key": {
"paths": [
"/AccountNumber"
],
"kind": "Hash"
},
"default_ttl": "100",
"unique_key_policy": {
"unique_keys": [
{
"paths": [
"/testPath"
]
}
]
},
"conflict_resolution_policy": {
"mode": "LastWriterWins",
"conflict_resolution_path": "/path"
}
},
"options": {
"throughput": "2000"
}
}
result = self.mgmt_client.sql_resources.begin_create_update_sql_container(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME, container_name=CONTAINER_NAME, create_update_sql_container_parameters=BODY)
result = result.result()
#--------------------------------------------------------------------------
# /SqlResources/put/CosmosDBSqlContainerThroughputUpdate[put]
#--------------------------------------------------------------------------
BODY = {
"location": AZURE_LOCATION,
"resource": {
"throughput": "400"
}
}
result = self.mgmt_client.sql_resources.begin_update_sql_container_throughput(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME, container_name=CONTAINER_NAME, update_throughput_parameters=BODY)
result = result.result()
#--------------------------------------------------------------------------
# /SqlResources/get/CosmosDBSqlContainerThroughputGet[get]
#--------------------------------------------------------------------------
result = self.mgmt_client.sql_resources.get_sql_container_throughput(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME, container_name=CONTAINER_NAME)
#--------------------------------------------------------------------------
# /SqlResources/get/CosmosDBSqlContainerGet[get]
#--------------------------------------------------------------------------
result = self.mgmt_client.sql_resources.get_sql_container(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME, container_name=CONTAINER_NAME)
#--------------------------------------------------------------------------
# /SqlResources/get/CosmosDBSqlContainerList[get]
#--------------------------------------------------------------------------
result = self.mgmt_client.sql_resources.list_sql_containers(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME)
#--------------------------------------------------------------------------
# /SqlResources/post/CosmosDBSqlContainerMigrateToAutoscale[post]
#--------------------------------------------------------------------------
result = self.mgmt_client.sql_resources.begin_migrate_sql_container_to_autoscale(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME, container_name=CONTAINER_NAME)
result = result.result()
#--------------------------------------------------------------------------
# /SqlResources/post/CosmosDBSqlContainerMigrateToManualThroughput[post]
#--------------------------------------------------------------------------
result = self.mgmt_client.sql_resources.begin_migrate_sql_container_to_manual_throughput(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME, container_name=CONTAINER_NAME)
result = result.result()
#--------------------------------------------------------------------------
# /SqlResources/delete/CosmosDBSqlContainerDelete[delete]
#--------------------------------------------------------------------------
result = self.mgmt_client.sql_resources.begin_delete_sql_container(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME, container_name=CONTAINER_NAME)
result = result.result()
#--------------------------------------------------------------------------
# /SqlResources/delete/CosmosDBSqlDatabaseDelete[delete]
#--------------------------------------------------------------------------
result = self.mgmt_client.sql_resources.begin_delete_sql_database(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME)
result = result.result()
#--------------------------------------------------------------------------
# /DatabaseAccounts/delete/CosmosDBDatabaseAccountDelete[delete]
#--------------------------------------------------------------------------
result = self.mgmt_client.database_accounts.begin_delete(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME)
result = result.result()
@unittest.skip('hard to test')
@ResourceGroupPreparer(location=AZURE_LOCATION)
def test_sql_trigger(self, resource_group):
RESOURCE_GROUP = resource_group.name
ACCOUNT_NAME = "myaccountxxyyzzz"
DATABASE_NAME = "myDatabase"
CONTAINER_NAME = "myContainer"
TRIGGER_NAME = "myTrigger"
#--------------------------------------------------------------------------
# /DatabaseAccounts/put/CosmosDBDatabaseAccountCreateMin[put]
#--------------------------------------------------------------------------
BODY = {
"location": AZURE_LOCATION,
"kind": "GlobalDocumentDB",
"database_account_offer_type": "Standard",
"locations": [
{
"location_name": "eastus",
"is_zone_redundant": False,
"failover_priority": "0"
}
],
"api_properties": {}
}
result = self.mgmt_client.database_accounts.begin_create_or_update(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, create_update_parameters=BODY)
result = result.result()
#--------------------------------------------------------------------------
# /SqlResources/put/CosmosDBSqlDatabaseCreateUpdate[put]
#--------------------------------------------------------------------------
BODY = {
"location": AZURE_LOCATION,
"resource": {
"id": DATABASE_NAME
},
"options": {
"throughput": 1000
}
}
result = self.mgmt_client.sql_resources.begin_create_update_sql_database(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME, create_update_sql_database_parameters=BODY)
result = result.result()
#--------------------------------------------------------------------------
# /SqlResources/put/CosmosDBSqlContainerCreateUpdate[put]
#--------------------------------------------------------------------------
BODY = {
"location": AZURE_LOCATION,
"resource": {
"id": CONTAINER_NAME,
"indexing_policy": {
"indexing_mode": "Consistent",
"automatic": True,
"included_paths": [
{
"path": "/*",
"indexes": [
{
"kind": "Range",
"data_type": "String",
"precision": "-1"
},
{
"kind": "Range",
"data_type": "Number",
"precision": "-1"
}
]
}
],
"excluded_paths": []
},
"partition_key": {
"paths": [
"/AccountNumber"
],
"kind": "Hash"
},
"default_ttl": "100",
"unique_key_policy": {
"unique_keys": [
{
"paths": [
"/testPath"
]
}
]
},
"conflict_resolution_policy": {
"mode": "LastWriterWins",
"conflict_resolution_path": "/path"
}
},
"options": {}
}
result = self.mgmt_client.sql_resources.begin_create_update_sql_container(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME, container_name=CONTAINER_NAME, create_update_sql_container_parameters=BODY)
result = result.result()
#--------------------------------------------------------------------------
# /SqlResources/put/CosmosDBSqlTriggerCreateUpdate[put]
#--------------------------------------------------------------------------
BODY = {
"resource": {
"id": TRIGGER_NAME,
"body": "body",
"trigger_type": "Pre",
"trigger_operation": "All"
},
"options": {}
}
result = self.mgmt_client.sql_resources.begin_create_update_sql_trigger(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME, container_name=CONTAINER_NAME, trigger_name=TRIGGER_NAME, create_update_sql_trigger_parameters=BODY)
result = result.result()
#--------------------------------------------------------------------------
# /SqlResources/get/CosmosDBSqlTriggerGet[get]
#--------------------------------------------------------------------------
result = self.mgmt_client.sql_resources.get_sql_trigger(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME, container_name=CONTAINER_NAME, trigger_name=TRIGGER_NAME)
#--------------------------------------------------------------------------
# /SqlResources/get/CosmosDBSqlTriggerList[get]
#--------------------------------------------------------------------------
result = self.mgmt_client.sql_resources.list_sql_triggers(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME, container_name=CONTAINER_NAME)
#--------------------------------------------------------------------------
# /SqlResources/delete/CosmosDBSqlTriggerDelete[delete]
#--------------------------------------------------------------------------
result = self.mgmt_client.sql_resources.begin_delete_sql_trigger(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME, container_name=CONTAINER_NAME, trigger_name=TRIGGER_NAME)
result = result.result()
#--------------------------------------------------------------------------
# /SqlResources/delete/CosmosDBSqlContainerDelete[delete]
#--------------------------------------------------------------------------
result = self.mgmt_client.sql_resources.begin_delete_sql_container(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME, container_name=CONTAINER_NAME)
result = result.result()
#--------------------------------------------------------------------------
# /SqlResources/delete/CosmosDBSqlDatabaseDelete[delete]
#--------------------------------------------------------------------------
result = self.mgmt_client.sql_resources.begin_delete_sql_database(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME)
result = result.result()
#--------------------------------------------------------------------------
# /DatabaseAccounts/delete/CosmosDBDatabaseAccountDelete[delete]
#--------------------------------------------------------------------------
result = self.mgmt_client.database_accounts.begin_delete(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME)
result = result.result()
@unittest.skip('hard to test')
@ResourceGroupPreparer(location=AZURE_LOCATION)
def test_sql_stored_procedure(self, resource_group):
RESOURCE_GROUP = resource_group.name
ACCOUNT_NAME = "myaccountxxyyzzz"
DATABASE_NAME = "myDatabase"
CONTAINER_NAME = "myContainer"
STORED_PROCEDURE_NAME = "myStoredProcedure"
#--------------------------------------------------------------------------
# /DatabaseAccounts/put/CosmosDBDatabaseAccountCreateMin[put]
#--------------------------------------------------------------------------
BODY = {
"location": AZURE_LOCATION,
"kind": "GlobalDocumentDB",
"database_account_offer_type": "Standard",
"locations": [
{
"location_name": "eastus",
"is_zone_redundant": False,
"failover_priority": "0"
}
],
"api_properties": {}
}
result = self.mgmt_client.database_accounts.begin_create_or_update(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, create_update_parameters=BODY)
result = result.result()
#--------------------------------------------------------------------------
# /SqlResources/put/CosmosDBSqlDatabaseCreateUpdate[put]
#--------------------------------------------------------------------------
BODY = {
"location": AZURE_LOCATION,
"resource": {
"id": DATABASE_NAME
},
"options": {
"throughput": 1000
}
}
result = self.mgmt_client.sql_resources.begin_create_update_sql_database(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME, create_update_sql_database_parameters=BODY)
result = result.result()
#--------------------------------------------------------------------------
# /SqlResources/put/CosmosDBSqlContainerCreateUpdate[put]
#--------------------------------------------------------------------------
BODY = {
"location": AZURE_LOCATION,
"resource": {
"id": CONTAINER_NAME,
"indexing_policy": {
"indexing_mode": "Consistent",
"automatic": True,
"included_paths": [
{
"path": "/*",
"indexes": [
{
"kind": "Range",
"data_type": "String",
"precision": "-1"
},
{
"kind": "Range",
"data_type": "Number",
"precision": "-1"
}
]
}
],
"excluded_paths": []
},
"partition_key": {
"paths": [
"/AccountNumber"
],
"kind": "Hash"
},
"default_ttl": "100",
"unique_key_policy": {
"unique_keys": [
{
"paths": [
"/testPath"
]
}
]
},
"conflict_resolution_policy": {
"mode": "LastWriterWins",
"conflict_resolution_path": "/path"
}
},
"options": {}
}
result = self.mgmt_client.sql_resources.begin_create_update_sql_container(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME, container_name=CONTAINER_NAME, create_update_sql_container_parameters=BODY)
result = result.result()
#--------------------------------------------------------------------------
# /SqlResources/put/CosmosDBSqlStoredProcedureCreateUpdate[put]
#--------------------------------------------------------------------------
BODY = {
"resource": {
"id": STORED_PROCEDURE_NAME,
"body": "body"
},
"options": {}
}
result = self.mgmt_client.sql_resources.begin_create_update_sql_stored_procedure(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME, container_name=CONTAINER_NAME, stored_procedure_name=STORED_PROCEDURE_NAME, create_update_sql_stored_procedure_parameters=BODY)
result = result.result()
#--------------------------------------------------------------------------
# /SqlResources/get/CosmosDBSqlStoredProcedureGet[get]
#--------------------------------------------------------------------------
result = self.mgmt_client.sql_resources.get_sql_stored_procedure(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME, container_name=CONTAINER_NAME, stored_procedure_name=STORED_PROCEDURE_NAME)
#--------------------------------------------------------------------------
# /SqlResources/get/CosmosDBSqlStoredProcedureList[get]
#--------------------------------------------------------------------------
result = self.mgmt_client.sql_resources.list_sql_stored_procedures(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME, container_name=CONTAINER_NAME)
#--------------------------------------------------------------------------
# /SqlResources/delete/CosmosDBSqlStoredProcedureDelete[delete]
#--------------------------------------------------------------------------
result = self.mgmt_client.sql_resources.begin_delete_sql_stored_procedure(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME, container_name=CONTAINER_NAME, stored_procedure_name=STORED_PROCEDURE_NAME)
result = result.result()
#--------------------------------------------------------------------------
# /SqlResources/delete/CosmosDBSqlContainerDelete[delete]
#--------------------------------------------------------------------------
result = self.mgmt_client.sql_resources.begin_delete_sql_container(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME, container_name=CONTAINER_NAME)
result = result.result()
#--------------------------------------------------------------------------
# /SqlResources/delete/CosmosDBSqlDatabaseDelete[delete]
#--------------------------------------------------------------------------
result = self.mgmt_client.sql_resources.begin_delete_sql_database(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME)
result = result.result()
#--------------------------------------------------------------------------
# /DatabaseAccounts/delete/CosmosDBDatabaseAccountDelete[delete]
#--------------------------------------------------------------------------
result = self.mgmt_client.database_accounts.begin_delete(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME)
result = result.result()
@unittest.skip('hard to test')
@ResourceGroupPreparer(location=AZURE_LOCATION)
def test_sql_defined_function(self, resource_group):
RESOURCE_GROUP = resource_group.name
ACCOUNT_NAME = "myaccountxxyyzzz"
DATABASE_NAME = "myDatabase"
CONTAINER_NAME = "myContainer"
USER_DEFINED_FUNCTION_NAME = "myUserDefinedFunction"
#--------------------------------------------------------------------------
# /DatabaseAccounts/put/CosmosDBDatabaseAccountCreateMin[put]
#--------------------------------------------------------------------------
BODY = {
"location": AZURE_LOCATION,
"kind": "GlobalDocumentDB",
"database_account_offer_type": "Standard",
"locations": [
{
"location_name": "eastus",
"is_zone_redundant": False,
"failover_priority": "0"
}
],
"api_properties": {}
}
result = self.mgmt_client.database_accounts.begin_create_or_update(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, create_update_parameters=BODY)
result = result.result()
#--------------------------------------------------------------------------
# /SqlResources/put/CosmosDBSqlDatabaseCreateUpdate[put]
#--------------------------------------------------------------------------
BODY = {
"location": AZURE_LOCATION,
"resource": {
"id": DATABASE_NAME
},
"options": {
"throughput": 1000
}
}
result = self.mgmt_client.sql_resources.begin_create_update_sql_database(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME, create_update_sql_database_parameters=BODY)
result = result.result()
#--------------------------------------------------------------------------
# /SqlResources/put/CosmosDBSqlContainerCreateUpdate[put]
#--------------------------------------------------------------------------
BODY = {
"location": AZURE_LOCATION,
"resource": {
"id": CONTAINER_NAME,
"indexing_policy": {
"indexing_mode": "Consistent",
"automatic": True,
"included_paths": [
{
"path": "/*",
"indexes": [
{
"kind": "Range",
"data_type": "String",
"precision": "-1"
},
{
"kind": "Range",
"data_type": "Number",
"precision": "-1"
}
]
}
],
"excluded_paths": []
},
"partition_key": {
"paths": [
"/AccountNumber"
],
"kind": "Hash"
},
"default_ttl": "100",
"unique_key_policy": {
"unique_keys": [
{
"paths": [
"/testPath"
]
}
]
},
"conflict_resolution_policy": {
"mode": "LastWriterWins",
"conflict_resolution_path": "/path"
}
},
"options": {}
}
result = self.mgmt_client.sql_resources.begin_create_update_sql_container(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME, container_name=CONTAINER_NAME, create_update_sql_container_parameters=BODY)
result = result.result()
#--------------------------------------------------------------------------
# /SqlResources/put/CosmosDBSqlUserDefinedFunctionCreateUpdate[put]
#--------------------------------------------------------------------------
BODY = {
"resource": {
"id": USER_DEFINED_FUNCTION_NAME,
"body": "body"
},
"options": {}
}
result = self.mgmt_client.sql_resources.begin_create_update_sql_user_defined_function(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME, container_name=CONTAINER_NAME, user_defined_function_name=USER_DEFINED_FUNCTION_NAME, create_update_sql_user_defined_function_parameters=BODY)
result = result.result()
#--------------------------------------------------------------------------
# /SqlResources/get/CosmosDBSqlUserDefinedFunctionGet[get]
#--------------------------------------------------------------------------
result = self.mgmt_client.sql_resources.get_sql_user_defined_function(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME, container_name=CONTAINER_NAME, user_defined_function_name=USER_DEFINED_FUNCTION_NAME)
#--------------------------------------------------------------------------
# /SqlResources/get/CosmosDBSqlUserDefinedFunctionList[get]
#--------------------------------------------------------------------------
result = self.mgmt_client.sql_resources.list_sql_user_defined_functions(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME, container_name=CONTAINER_NAME)
#--------------------------------------------------------------------------
# /SqlResources/delete/CosmosDBSqlUserDefinedFunctionDelete[delete]
#--------------------------------------------------------------------------
result = self.mgmt_client.sql_resources.begin_delete_sql_user_defined_function(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME, container_name=CONTAINER_NAME, user_defined_function_name=USER_DEFINED_FUNCTION_NAME)
result = result.result()
#--------------------------------------------------------------------------
# /SqlResources/delete/CosmosDBSqlContainerDelete[delete]
#--------------------------------------------------------------------------
result = self.mgmt_client.sql_resources.begin_delete_sql_container(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME, container_name=CONTAINER_NAME)
result = result.result()
#--------------------------------------------------------------------------
# /SqlResources/delete/CosmosDBSqlDatabaseDelete[delete]
#--------------------------------------------------------------------------
result = self.mgmt_client.sql_resources.begin_delete_sql_database(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, database_name=DATABASE_NAME)
result = result.result()
#--------------------------------------------------------------------------
# /DatabaseAccounts/delete/CosmosDBDatabaseAccountDelete[delete]
#--------------------------------------------------------------------------
result = self.mgmt_client.database_accounts.begin_delete(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME)
result = result.result()
|
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
sys.path.append("..")
import unittest
import math
import numpy as np
import paddle.fluid.core as core
from op_test import OpTest, skip_check_grad_ci
from op_test_xpu import XPUOpTest
import paddle
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
class TestROIAlignOp(XPUOpTest):
def set_data(self):
self.init_test_case()
self.make_rois()
self.calc_roi_align()
self.inputs = {
'X': self.x,
'ROIs': (self.rois[:, 1:5], self.rois_lod),
}
self.attrs = {
'spatial_scale': self.spatial_scale,
'pooled_height': self.pooled_height,
'pooled_width': self.pooled_width,
'sampling_ratio': self.sampling_ratio
}
self.outputs = {'Out': self.out_data}
def init_test_case(self):
self.batch_size = 3
self.channels = 3
self.height = 8
self.width = 6
# n, c, h, w
self.x_dim = (self.batch_size, self.channels, self.height, self.width)
self.spatial_scale = 1.0 / 2.0
self.pooled_height = 2
self.pooled_width = 2
self.sampling_ratio = -1
self.x = np.random.random(self.x_dim).astype('float32')
def pre_calc(self, x_i, roi_xmin, roi_ymin, roi_bin_grid_h, roi_bin_grid_w,
bin_size_h, bin_size_w):
count = roi_bin_grid_h * roi_bin_grid_w
bilinear_pos = np.zeros(
[self.channels, self.pooled_height, self.pooled_width, count, 4],
np.float32)
bilinear_w = np.zeros(
[self.pooled_height, self.pooled_width, count, 4], np.float32)
for ph in range(self.pooled_width):
for pw in range(self.pooled_height):
c = 0
for iy in range(roi_bin_grid_h):
y = roi_ymin + ph * bin_size_h + (iy + 0.5) * \
bin_size_h / roi_bin_grid_h
for ix in range(roi_bin_grid_w):
x = roi_xmin + pw * bin_size_w + (ix + 0.5) * \
bin_size_w / roi_bin_grid_w
if y < -1.0 or y > self.height or \
x < -1.0 or x > self.width:
continue
if y <= 0:
y = 0
if x <= 0:
x = 0
y_low = int(y)
x_low = int(x)
if y_low >= self.height - 1:
y = y_high = y_low = self.height - 1
else:
y_high = y_low + 1
if x_low >= self.width - 1:
x = x_high = x_low = self.width - 1
else:
x_high = x_low + 1
ly = y - y_low
lx = x - x_low
hy = 1 - ly
hx = 1 - lx
for ch in range(self.channels):
bilinear_pos[ch, ph, pw, c, 0] = x_i[ch, y_low,
x_low]
bilinear_pos[ch, ph, pw, c, 1] = x_i[ch, y_low,
x_high]
bilinear_pos[ch, ph, pw, c, 2] = x_i[ch, y_high,
x_low]
bilinear_pos[ch, ph, pw, c, 3] = x_i[ch, y_high,
x_high]
bilinear_w[ph, pw, c, 0] = hy * hx
bilinear_w[ph, pw, c, 1] = hy * lx
bilinear_w[ph, pw, c, 2] = ly * hx
bilinear_w[ph, pw, c, 3] = ly * lx
c = c + 1
return bilinear_pos, bilinear_w
def calc_roi_align(self):
self.out_data = np.zeros(
(self.rois_num, self.channels, self.pooled_height,
self.pooled_width)).astype('float32')
for i in range(self.rois_num):
roi = self.rois[i]
roi_batch_id = int(roi[0])
x_i = self.x[roi_batch_id]
roi_xmin = roi[1] * self.spatial_scale
roi_ymin = roi[2] * self.spatial_scale
roi_xmax = roi[3] * self.spatial_scale
roi_ymax = roi[4] * self.spatial_scale
roi_width = max(roi_xmax - roi_xmin, 1)
roi_height = max(roi_ymax - roi_ymin, 1)
bin_size_h = float(roi_height) / float(self.pooled_height)
bin_size_w = float(roi_width) / float(self.pooled_width)
roi_bin_grid_h = self.sampling_ratio if self.sampling_ratio > 0 else \
math.ceil(roi_height / self.pooled_height)
roi_bin_grid_w = self.sampling_ratio if self.sampling_ratio > 0 else \
math.ceil(roi_width / self.pooled_width)
count = int(roi_bin_grid_h * roi_bin_grid_w)
pre_size = count * self.pooled_width * self.pooled_height
bilinear_pos, bilinear_w = self.pre_calc(x_i, roi_xmin, roi_ymin,
int(roi_bin_grid_h),
int(roi_bin_grid_w),
bin_size_h, bin_size_w)
for ch in range(self.channels):
align_per_bin = (bilinear_pos[ch] * bilinear_w).sum(axis=-1)
output_val = align_per_bin.mean(axis=-1)
self.out_data[i, ch, :, :] = output_val
def make_rois(self):
rois = []
self.rois_lod = [[]]
for bno in range(self.batch_size):
self.rois_lod[0].append(bno + 1)
for i in range(bno + 1):
x1 = np.random.random_integers(
0, self.width // self.spatial_scale - self.pooled_width)
y1 = np.random.random_integers(
0, self.height // self.spatial_scale - self.pooled_height)
x2 = np.random.random_integers(x1 + self.pooled_width,
self.width // self.spatial_scale)
y2 = np.random.random_integers(
y1 + self.pooled_height, self.height // self.spatial_scale)
roi = [bno, x1, y1, x2, y2]
rois.append(roi)
self.rois_num = len(rois)
self.rois = np.array(rois).astype("float32")
def setUp(self):
self.op_type = "roi_align"
self.set_data()
def test_check_output(self):
if paddle.is_compiled_with_xpu():
paddle.enable_static()
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad(self):
if core.is_compiled_with_xpu():
paddle.enable_static()
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, {'X'}, 'Out')
class TestROIAlignInLodOp(TestROIAlignOp):
def set_data(self):
self.init_test_case()
self.make_rois()
self.calc_roi_align()
seq_len = self.rois_lod[0]
self.inputs = {
'X': self.x,
'ROIs': (self.rois[:, 1:5], self.rois_lod),
'RoisNum': np.asarray(seq_len).astype('int32')
}
self.attrs = {
'spatial_scale': self.spatial_scale,
'pooled_height': self.pooled_height,
'pooled_width': self.pooled_width,
'sampling_ratio': self.sampling_ratio
}
self.outputs = {'Out': self.out_data}
if __name__ == '__main__':
unittest.main()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ApplicationsOperations(object):
"""ApplicationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.servicefabric.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
cluster_name, # type: str
application_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ApplicationResource"
"""Gets a Service Fabric application resource.
Get a Service Fabric application resource created or in the process of being created in the
Service Fabric cluster resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster resource.
:type cluster_name: str
:param application_name: The name of the application resource.
:type application_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationResource, or the result of cls(response)
:rtype: ~azure.mgmt.servicefabric.models.ApplicationResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'applicationName': self._serialize.url("application_name", application_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorModel, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}/applications/{applicationName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
cluster_name, # type: str
application_name, # type: str
parameters, # type: "_models.ApplicationResource"
**kwargs # type: Any
):
# type: (...) -> "_models.ApplicationResource"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'applicationName': self._serialize.url("application_name", application_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ApplicationResource')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorModel, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}/applications/{applicationName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
cluster_name, # type: str
application_name, # type: str
parameters, # type: "_models.ApplicationResource"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ApplicationResource"]
"""Creates or updates a Service Fabric application resource.
Create or update a Service Fabric application resource with the specified name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster resource.
:type cluster_name: str
:param application_name: The name of the application resource.
:type application_name: str
:param parameters: The application resource.
:type parameters: ~azure.mgmt.servicefabric.models.ApplicationResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ApplicationResource or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.servicefabric.models.ApplicationResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
application_name=application_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ApplicationResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'applicationName': self._serialize.url("application_name", application_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}/applications/{applicationName}'} # type: ignore
def _update_initial(
self,
resource_group_name, # type: str
cluster_name, # type: str
application_name, # type: str
parameters, # type: "_models.ApplicationResourceUpdate"
**kwargs # type: Any
):
# type: (...) -> "_models.ApplicationResource"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'applicationName': self._serialize.url("application_name", application_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ApplicationResourceUpdate')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorModel, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}/applications/{applicationName}'} # type: ignore
def begin_update(
self,
resource_group_name, # type: str
cluster_name, # type: str
application_name, # type: str
parameters, # type: "_models.ApplicationResourceUpdate"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ApplicationResource"]
"""Updates a Service Fabric application resource.
Update a Service Fabric application resource with the specified name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster resource.
:type cluster_name: str
:param application_name: The name of the application resource.
:type application_name: str
:param parameters: The application resource for patch operations.
:type parameters: ~azure.mgmt.servicefabric.models.ApplicationResourceUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ApplicationResource or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.servicefabric.models.ApplicationResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
application_name=application_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ApplicationResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'applicationName': self._serialize.url("application_name", application_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}/applications/{applicationName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
cluster_name, # type: str
application_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'applicationName': self._serialize.url("application_name", application_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorModel, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}/applications/{applicationName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
cluster_name, # type: str
application_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a Service Fabric application resource.
Delete a Service Fabric application resource with the specified name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster resource.
:type cluster_name: str
:param application_name: The name of the application resource.
:type application_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
application_name=application_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'applicationName': self._serialize.url("application_name", application_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}/applications/{applicationName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
cluster_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ApplicationResourceList"
"""Gets the list of application resources created in the specified Service Fabric cluster resource.
Gets all application resources created or in the process of being created in the Service Fabric
cluster resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster resource.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationResourceList, or the result of cls(response)
:rtype: ~azure.mgmt.servicefabric.models.ApplicationResourceList
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationResourceList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorModel, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationResourceList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}/applications'} # type: ignore
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2020) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: oneview_storage_volume_attachment_facts
short_description: Retrieve facts about the OneView Storage Volume Attachments.
description:
- "Retrieve facts about the OneView Storage Volume Attachments. To gather facts about a specific Storage Volume
Attachment it is required to inform the option I(storageVolumeAttachmentUri). It is also possible to retrieve a
specific Storage Volume Attachment by the Server Profile and the Volume. For this option, it is required to inform
the option I(serverProfileName) and the param I(storageVolumeName) or I(storageVolumeUri)."
version_added: "2.3"
requirements:
- "python >= 2.7.9"
- "hpeOneView >= 5.4.0"
author: "Camila Balestrin (@balestrinc)"
options:
storageVolumeAttachmentUri:
description:
- Storage Volume Attachment uri.
required: false
storageVolumeUri:
description:
- Storage Volume uri.
required: false
storageVolumeName:
description:
- Storage Volume name.
required: false
serverProfileName:
description:
- Server Profile name.
required: false
options:
description:
- "Retrieve additional facts. Options available:
C(extraUnmanagedStorageVolumes) retrieve the list of extra unmanaged storage volumes.
C(paths) retrieve all paths or a specific attachment path for the specified volume attachment. To retrieve a
specific path a C(pathUri) or a C(pathId) must be informed"
required: false
extends_documentation_fragment:
- oneview
- oneview.factsparams
'''
EXAMPLES = '''
- name: Gather facts about all Storage Volume Attachments
oneview_storage_volume_attachment_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
delegate_to: localhost
- debug: var=storage_volume_attachments
- name: Gather paginated, filtered and sorted facts about Storage Volume Attachments
oneview_storage_volume_attachment_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
params:
start: 0
count: 2
sort: 'name:descending'
filter: "storageVolumeUri='/rest/storage-volumes/E5B84BC8-75CF-4305-8DB5-7585A2979351'"
- debug: var=storage_volume_attachments
- name: Gather facts about a Storage Volume Attachment by Server Profile and Volume
oneview_storage_volume_attachment_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
serverProfileName: "sp-web"
storageVolumeName: "volume-test" # You could inform either the volume name or the volume uri
# storageVolumeUri: "volume-test"
delegate_to: localhost
- debug: var=storage_volume_attachments
- name: Gather facts about extra unmanaged storage volumes
oneview_storage_volume_attachment_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
options:
- extraUnmanagedStorageVolumes:
start: 0 # optional
count: '-1' # optional
filter: '' # optional
sort: '' # optional
delegate_to: localhost
- debug: var=storage_volume_attachments
- debug: var=extra_unmanaged_storage_volumes
- name: Gather facts about all paths for the specified volume attachment
oneview_storage_volume_attachment_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
serverProfileName: "sp-web"
storageVolumeUri: "/rest/storage-volumes/12345-AAA-BBBB-CCCC-121212AA"
options:
- paths
delegate_to: localhost
- debug: var=storage_volume_attachments
- debug: var=storage_volume_attachment_paths
- name: Gather facts about a specific attachment path
oneview_storage_volume_attachment_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
serverProfileName: "sp-web"
storageVolumeUri: "/rest/storage-volumes/12345-AAA-BBBB-CCCC-121212AA"
options:
- paths:
# You could inform either the path id or the path uri
pathId: '9DFC8953-15A4-4EA9-AB65-23AB12AB23' # optional
# pathUri: '/rest/storage-volume-attachments/123-123-123/paths/123-123-123'
delegate_to: localhost
- debug: var=storage_volume_attachments
- debug: var=storage_volume_attachment_paths
'''
RETURN = '''
storage_volume_attachments:
description: Has all the OneView facts about the Storage Volume Attachments.
returned: Always, but can be null.
type: dict
extra_unmanaged_storage_volumes:
description: Has facts about the extra unmanaged storage volumes.
returned: When requested, but can be null.
type: dict
storage_volume_attachment_paths:
description: Has facts about all paths or a specific attachment path for the specified volume attachment.
returned: When requested, but can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModule, OneViewModuleValueError
SPECIFIC_ATTACHMENT_OPTIONS = ['storageVolumeAttachmentUri', 'storageVolumeUri', 'storageVolumeName',
'serverProfileName']
class StorageVolumeAttachmentFactsModule(OneViewModule):
ATTACHMENT_KEY_REQUIRED = "Server Profile Name and Volume Name or Volume Uri are required."
def __init__(self):
argument_spec = dict(
serverProfileName=dict(required=False, type='str'),
storageVolumeAttachmentUri=dict(required=False, type='str'),
storageVolumeUri=dict(required=False, type='str'),
storageVolumeName=dict(required=False, type='str'),
options=dict(required=False, type='list'),
params=dict(required=False, type='dict'),
)
super(StorageVolumeAttachmentFactsModule, self).__init__(additional_arg_spec=argument_spec)
self.set_resource_object(self.oneview_client.storage_volume_attachments)
resource_uri = self.oneview_client.storage_volume_attachments.URI
self.__search_attachment_uri = str(resource_uri) + "?filter=storageVolumeUri='{}'"
def execute_module(self):
facts = {}
params = self.module.params
param_specific_attachment = [entry for entry in SPECIFIC_ATTACHMENT_OPTIONS if params.get(entry)]
if param_specific_attachment:
attachments = self.__get_specific_attachment(params)
self.__get_paths(attachments, self.options, facts)
else:
attachments = self.resource_client.get_all(**self.facts_params)
facts['storage_volume_attachments'] = attachments
if self.options.get('extraUnmanagedStorageVolumes'):
volumes_options = self.__get_sub_options(self.options['extraUnmanagedStorageVolumes'])
facts['extra_unmanaged_storage_volumes'] = self.resource_client.get_extra_unmanaged_storage_volumes(**volumes_options)
return dict(changed=False, ansible_facts=facts)
def __get_specific_attachment(self, params):
attachment_uri = params.get('storageVolumeAttachmentUri')
if attachment_uri:
return [self.resource_client.get_by_uri(attachment_uri)]
else:
volume_uri = params.get('storageVolumeUri')
profile_name = params.get('serverProfileName')
if not profile_name or not (volume_uri or params.get('storageVolumeName')):
raise OneViewModuleValueError(self.ATTACHMENT_KEY_REQUIRED)
if not volume_uri and params.get('storageVolumeName'):
volumes = self.oneview_client.volumes.get_by('name', params.get('storageVolumeName'))
if volumes:
volume_uri = volumes[0]['uri']
uri = self.__search_attachment_uri.format(volume_uri, profile_name)
attachments = self.resource_client.get_by_uri(uri) or {}
return [attachments.data]
def __get_paths(self, attachments, options, facts):
if attachments and 'paths' in options:
paths_options = self.__get_sub_options(options['paths'])
path_id_or_uri = paths_options.get('pathId') or paths_options.get('pathUri')
if path_id_or_uri:
paths = [self.resource_client.get_paths(path_id_or_uri)]
else:
paths = self.resource_client.get_paths()
facts['storage_volume_attachment_paths'] = paths
def __get_sub_options(self, option):
return option if isinstance(option, dict) else {}
def main():
StorageVolumeAttachmentFactsModule().run()
if __name__ == '__main__':
main()
|
|
import sys, os
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from utils import load_externals
from mobilenets import *
from mobilenets import _obtain_input_shape, __conv_block, __depthwise_conv_block
from keras.layers import Lambda
def scaling_tf(X, input_range_type):
"""
Convert to [-1, 1].
"""
if input_range_type == 1:
# The input data range is [0, 1]. Convert to [-1, 1] by
X = X - 0.5
X = X * 2.
elif input_range_type == 2:
# The input data range is [-0.5, 0.5]. Convert to [-1,1] by
X = X * 2.
elif input_range_type == 3:
# The input data range is [-1, 1].
X = X
return X
def __create_mobilenet(classes, img_input, include_top, alpha, depth_multiplier, dropout, pooling, logits):
''' Creates a MobileNet model with specified parameters
Args:
classes: Number of output classes
img_input: Input tensor or layer
include_top: Flag to include the last dense layer
alpha: width multiplier of the MobileNet.
depth_multiplier: depth multiplier for depthwise convolution
(also called the resolution multiplier)
dropout: dropout rate
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
Returns: a Keras Model
'''
x = __conv_block(img_input, 32, alpha, strides=(2, 2))
x = __depthwise_conv_block(x, 64, alpha, depth_multiplier, id=1)
x = __depthwise_conv_block(x, 128, alpha, depth_multiplier, strides=(2, 2), id=2)
x = __depthwise_conv_block(x, 128, alpha, depth_multiplier, id=3)
x = __depthwise_conv_block(x, 256, alpha, depth_multiplier, strides=(2, 2), id=4)
x = __depthwise_conv_block(x, 256, alpha, depth_multiplier, id=5)
x = __depthwise_conv_block(x, 512, alpha, depth_multiplier, strides=(2, 2), id=6)
x = __depthwise_conv_block(x, 512, alpha, depth_multiplier, id=7)
x = __depthwise_conv_block(x, 512, alpha, depth_multiplier, id=8)
x = __depthwise_conv_block(x, 512, alpha, depth_multiplier, id=9)
x = __depthwise_conv_block(x, 512, alpha, depth_multiplier, id=10)
x = __depthwise_conv_block(x, 512, alpha, depth_multiplier, id=11)
x = __depthwise_conv_block(x, 1024, alpha, depth_multiplier, strides=(2, 2), id=12)
x = __depthwise_conv_block(x, 1024, alpha, depth_multiplier, id=13)
if include_top:
if K.image_data_format() == 'channels_first':
shape = (int(1024 * alpha), 1, 1)
else:
shape = (1, 1, int(1024 * alpha))
x = GlobalAveragePooling2D()(x)
x = Reshape(shape, name='reshape_1')(x)
x = Dropout(dropout, name='dropout')(x)
x = Convolution2D(classes, (1, 1), padding='same', name='conv_preds')(x)
# Reshape from (?, 1, 1, 1000) to (?, 1000)
x = Reshape((classes,), name='reshape_2')(x)
# Move Reshape before Actionvation. Otherwise, Cleverhans gets confused in fetching logits output.
if not logits:
x = Activation('softmax', name='activation')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
return x
def MobileNets(input_shape=None, alpha=1.0, depth_multiplier=1,
dropout=1e-3, include_top=True, weights='imagenet',
input_tensor=None, pooling=None, classes=1000,
logits=False, input_range_type=1, pre_filter=lambda x:x):
''' Instantiate the MobileNet architecture.
Note that only TensorFlow is supported for now,
therefore it only works with the data format
`image_data_format='channels_last'` in your Keras config
at ~/.keras/keras.json.
# Arguments
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or (3, 224, 224) (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
alpha: width multiplier of the MobileNet.
depth_multiplier: depth multiplier for depthwise convolution
(also called the resolution multiplier)
dropout: dropout rate
include_top: whether to include the fully-connected
layer at the top of the network.
weights: `None` (random initialization) or
`imagenet` (ImageNet weights)
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
'''
if K.backend() == 'theano':
raise AttributeError('Theano backend is not currently supported, '
'as Theano does not support depthwise convolution yet.')
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as ImageNet with `include_top`'
' as true, `classes` should be 1001')
if weights == 'imagenet':
assert depth_multiplier == 1, "If imagenet weights are being loaded, depth multiplier must be 1"
assert alpha in [0.25, 0.50, 0.75, 1.0], "If imagenet weights are being loaded, alpha can be one of" \
"`0.25`, `0.50`, `0.75` or `1.0` only."
if alpha == 1.0:
alpha_text = "1_0"
elif alpha == 0.75:
alpha_text = "7_5"
elif alpha == 0.50:
alpha_text = "5_0"
else:
alpha_text = "2_5"
rows, cols = (0, 1) if K.image_data_format() == 'channels_last' else (1, 2)
rows = int(input_shape[rows])
cols = int(input_shape[cols])
assert rows == cols and rows in [None, 128, 160, 192, 224], "If imagenet weights are being loaded," \
"image must be square and be one of " \
"(128,128), (160,160), (192,192), or (224, 224)." \
"Given (%d, %d)" % (rows, cols)
# Determine proper input shape. Note, include_top is False by default, as
# input shape can be anything larger than 32x32 and the same number of parameters
# will be used.
input_shape = _obtain_input_shape(input_shape,
default_size=224,
min_size=32,
data_format=K.image_data_format(),
include_top=False)
# If input shape is still None, provide a default input shape
if input_shape is None:
input_shape = (224, 224, 3) if K.image_data_format() == 'channels_last' else (3, 224, 224)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Scaling
# x = __create_mobilenet(classes, img_input, include_top, alpha, depth_multiplier, dropout, pooling, logits)
x = Lambda(lambda x: scaling_tf(x, input_range_type))(img_input)
x = Lambda(pre_filter, output_shape=input_shape)(x)
x = __create_mobilenet(classes, x, include_top, alpha, depth_multiplier, dropout, pooling, logits)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='mobilenet')
# load weights
if weights == 'imagenet':
if K.image_data_format() == 'channels_first':
raise AttributeError('Weights for Channels Last format are not available')
if (alpha == 1.) and (depth_multiplier == 1.):
if include_top:
model_name = "mobilenet_%s_%d_tf.h5" % (alpha_text, rows)
weigh_path = BASE_WEIGHT_PATH + model_name
weights_path = get_file(model_name,
weigh_path,
cache_subdir='models')
else:
model_name = "mobilenet_%s_%d_tf_no_top.h5" % (alpha_text, rows)
weigh_path = BASE_WEIGHT_PATH + model_name
weights_path = get_file(model_name,
weigh_path,
cache_subdir='models')
model.load_weights(weights_path)
return model
def mobilenet_imagenet_model(logits=False, input_range_type=1, pre_filter=None):
input_shape = (224, 224, 3)
model = MobileNets(input_shape=input_shape, alpha=1.0, depth_multiplier=1,
dropout=1e-3, include_top=True, weights='imagenet',
input_tensor=None, pooling=None, classes=1000,
logits=logits, input_range_type=input_range_type, pre_filter=pre_filter)
return model
|
|
"""
pygn.py
pygn (pronounced "pigeon") is a simple Python client for the Gracenote Music
Web API, which can retrieve Artist, Album and Track metadata with the most
common options.
You will need a Gracenote Client ID to use this module. Please contact
developers@gracenote.com to get one.
"""
from __future__ import print_function
import xml.etree.ElementTree, json
try:
import urllib.request as urllib_request #for python 3
import urllib.parse as urllib_parse
except ImportError:
import urllib2 as urllib_request # for python 2
import urllib as urllib_parse # for python 2
# Set DEBUG to True if you want this module to print out the query and response XML
DEBUG = False
class gnmetadata(dict):
"""
This class is a dictionary containing metadata fields that are available
for the queried item.
"""
def __init__(self):
# Basic Metadata
self['track_artist_name'] = ''
self['album_artist_name'] = ''
self['album_title'] = ''
self['album_year'] = ''
self['track_title'] = ''
self['track_number'] = ''
# Descriptors
self['genre'] = {}
self['artist_origin'] = {}
self['artist_era'] = {}
self['artist_type'] = {}
self['mood'] = {}
self['tempo'] = {}
# Related Content
self['album_art_url'] = ''
self['artist_image_url'] = ''
self['artist_bio_url'] = ''
self['review_url'] = ''
# Gracenote IDs
self['album_gnid'] = ''
self['track_gnid'] = ''
#Radio ID
self['radio_id'] = ''
# External IDs: Special content rights in license required
self['xid'] =''
def register(clientID):
"""
This function registers an application as a user of the Gracenote service
It takes as a parameter a clientID string in the form of
"NNNNNNN-NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN" and returns a userID in a
similar format.
As the quota of number of users (installed applications or devices) is
typically much lower than the number of queries, best practices are for a
given installed application to call this only once, store the UserID in
persistent storage (e.g. filesystem), and then use these IDs for all
subsequent calls to the service.
"""
# Create XML request
query = _gnquery()
query.addQuery('REGISTER')
query.addQueryClient(clientID)
queryXML = query.toString()
# POST query
response = urllib_request.urlopen(_gnurl(clientID), queryXML)
responseXML = response.read()
# Parse response
responseTree = xml.etree.ElementTree.fromstring(responseXML)
responseElem = responseTree.find('RESPONSE')
if responseElem.attrib['STATUS'] == 'OK':
userElem = responseElem.find('USER')
userID = userElem.text
return userID
#*****************************************************************************************************************************************
# Added by Fabian in order to cover the Rhythm API
# Returns a list of gnmetadata dictionaries
def createRadio(clientID='', userID='', artist='', track='', mood='', era='', genre='', popularity ='', similarity = '', count='10'):
"""
Queries a Radio playlist
"""
if clientID=='' or userID=='':
print('ClientID and UserID are required')
return None
if artist=='' and track=='' and mood=='' and era=='' and genre=='':
print('Must query with at least one field (artist, track, genre, mood, era)')
return None
#Create XML request
query = _gnquery()
# Build the user header
query.addAuth(clientID, userID)
query.addQuery('RADIO_CREATE')
if artist!='' or track!='':
query.addTextSeed(artist,track)
if mood!='' or era!='' or genre!='':
query.addAttributeSeed(mood,era,genre)
query.addQueryOption('SELECT_EXTENDED', 'COVER,REVIEW,ARTIST_BIOGRAPHY,ARTIST_IMAGE,ARTIST_OET,MOOD,TEMPO,LINK')
query.addQueryOption('SELECT_DETAIL', 'GENRE:3LEVEL,MOOD:2LEVEL,TEMPO:3LEVEL,ARTIST_ORIGIN:4LEVEL,ARTIST_ERA:2LEVEL,ARTIST_TYPE:2LEVEL')
if popularity!='':
query.addQueryOption('FOCUS_POPULARITY', popularity)
if similarity!='':
query.addQueryOption('FOCUS_SIMILARITY', similarity)
query.addQueryOption('RETURN_COUNT', count)
queryXML = query.toString()
if DEBUG:
print('QUERY:')
print(queryXML)
# POST query
response = urllib_request.urlopen(_gnurl(clientID), queryXML)
responseXML = response.read()
myPlaylist = []
for x in range(1, int(count)):
track = _parseRadioMetadata(responseXML,x)
myPlaylist.append(track)
print(responseXML)
return myPlaylist
#*****************************************************************************************************************************************
# Added by Fabian in order to cover the Rhythm API
# Returns a list of gnmetadata dictionaries
def radioEvent(clientID='', userID='', radioID='', gnID='', event ='TRACK_PLAYED', count='10', popularity ='', similarity = ''):
if clientID=='' or userID=='':
print('ClientID and UserID are required')
return None
if radioID=='' or gnID=='':
print('Event query must contain the radioID and gnID')
return None
#Create XML request
query = _gnquery()
# Build the user header
query.addAuth(clientID, userID)
query.addQuery('RADIO_EVENT')
query.addRadioID(radioID)
query.addQueryEVENT(event, gnID)
query.addQueryOption('RETURN_COUNT', count)
if popularity!='':
query.addQueryOption('FOCUS_POPULARITY', popularity)
if similarity!='':
query.addQueryOption('FOCUS_SIMILARITY', similarity)
query.addQueryOption('SELECT_EXTENDED', 'COVER,REVIEW,ARTIST_BIOGRAPHY,ARTIST_IMAGE,ARTIST_OET,MOOD,TEMPO,LINK')
query.addQueryOption('SELECT_DETAIL', 'GENRE:3LEVEL,MOOD:2LEVEL,TEMPO:3LEVEL,ARTIST_ORIGIN:4LEVEL,ARTIST_ERA:2LEVEL,ARTIST_TYPE:2LEVEL')
query.addQueryOption('RETURN_SETTINGS', 'YES')
queryXML = query.toString()
if DEBUG:
print('QUERY:')
print(queryXML)
# POST query
response = urllib_request.urlopen(_gnurl(clientID), queryXML)
responseXML = response.read()
myPlaylist = []
for x in range(1, int(count)):
track = _parseRadioMetadata(responseXML,x)
myPlaylist.append(track)
print(responseXML)
return myPlaylist
#***********************************************************************************************************************
def search(clientID='', userID='', artist='', album='', track='', toc=''):
"""
Queries the Gracenote service for a track, album, artist, or TOC
TOC is a string of offsets in the format '150 20512 30837 50912 64107 78357 ...'
"""
if clientID=='' or userID=='':
print('ClientID and UserID are required')
return None
if artist=='' and album=='' and track=='' and toc=='':
print('Must query with at least one field (artist, album, track, toc)')
return None
# Create XML request
query = _gnquery()
query.addAuth(clientID, userID)
if (toc != ''):
query.addQuery('ALBUM_TOC')
query.addQueryMode('SINGLE_BEST_COVER')
query.addQueryTOC(toc)
else:
query.addQuery('ALBUM_SEARCH')
query.addQueryMode('SINGLE_BEST_COVER')
query.addQueryTextField('ARTIST', artist)
query.addQueryTextField('ALBUM_TITLE', album)
query.addQueryTextField('TRACK_TITLE', track)
query.addQueryOption('SELECT_EXTENDED', 'COVER,REVIEW,ARTIST_BIOGRAPHY,ARTIST_IMAGE,ARTIST_OET,MOOD,TEMPO')
query.addQueryOption('SELECT_DETAIL', 'GENRE:3LEVEL,MOOD:2LEVEL,TEMPO:3LEVEL,ARTIST_ORIGIN:4LEVEL,ARTIST_ERA:2LEVEL,ARTIST_TYPE:2LEVEL')
queryXML = query.toString()
if DEBUG:
print('------------')
print('QUERY XML')
print('------------')
print(queryXML)
# POST query
response = urllib_request.urlopen(_gnurl(clientID), queryXML)
responseXML = response.read()
if DEBUG:
print('------------')
print('RESPONSE XML')
print('------------')
print(responseXML)
# Create GNTrackMetadata object
metadata = gnmetadata()
# Parse response
responseTree = xml.etree.ElementTree.fromstring(responseXML)
responseElem = responseTree.find('RESPONSE')
if responseElem.attrib['STATUS'] == 'OK':
# Find Album element
albumElem = responseElem.find('ALBUM')
# Parse album metadata
metadata['album_gnid'] = _getElemText(albumElem, 'GN_ID')
metadata['album_artist_name'] = _getElemText(albumElem, 'ARTIST')
metadata['album_title'] = _getElemText(albumElem, 'TITLE')
metadata['album_year'] = _getElemText(albumElem, 'DATE')
metadata['album_art_url'] = _getElemText(albumElem, 'URL', 'TYPE', 'COVERART')
metadata['genre'] = _getMultiElemText(albumElem, 'GENRE', 'ORD', 'ID')
metadata['artist_image_url'] = _getElemText(albumElem, 'URL', 'TYPE', 'ARTIST_IMAGE')
metadata['artist_bio_url'] = _getElemText(albumElem, 'URL', 'TYPE', 'ARTIST_BIOGRAPHY')
metadata['review_url'] = _getElemText(albumElem, 'URL', 'TYPE', 'REVIEW')
# Look for OET
artistOriginElem = albumElem.find('ARTIST_ORIGIN')
if artistOriginElem is not None:
metadata['artist_origin'] = _getMultiElemText(albumElem, 'ARTIST_ORIGIN', 'ORD', 'ID')
metadata['artist_era'] = _getMultiElemText(albumElem, 'ARTIST_ERA', 'ORD', 'ID')
metadata['artist_type'] = _getMultiElemText(albumElem, 'ARTIST_TYPE', 'ORD', 'ID')
else:
# Try to get OET again by fetching album by GNID
metadata['artist_origin'], metadata['artist_era'], metadata['artist_type'] = _getOET(clientID, userID, metadata['album_gnid'])
# Parse track metadata
matchedTrackElem = albumElem.find('MATCHED_TRACK_NUM')
if matchedTrackElem is not None:
trackElem = albumElem.find('TRACK')
metadata['track_number'] = _getElemText(trackElem, 'TRACK_NUM')
metadata['track_gnid'] = _getElemText(trackElem, 'GN_ID')
metadata['track_title'] = _getElemText(trackElem, 'TITLE')
metadata['track_artist_name'] = _getElemText(trackElem, 'ARTIST')
metadata['mood'] = _getMultiElemText(trackElem, 'MOOD', 'ORD', 'ID')
metadata['tempo'] = _getMultiElemText(trackElem, 'TEMPO', 'ORD', 'ID')
# If track-level GOET exists, overwrite metadata from album
if trackElem.find('GENRE') is not None:
metadata['genre'] = _getMultiElemText(trackElem, 'GENRE', 'ORD', 'ID')
if trackElem.find('ARTIST_ORIGIN') is not None:
metadata['artist_origin'] = _getMultiElemText(trackElem, 'ARTIST_ORIGIN', 'ORD', 'ID')
if trackElem.find('ARTIST_ERA') is not None:
metadata['artist_era'] = _getMultiElemText(trackElem, 'ARTIST_ERA', 'ORD', 'ID')
if trackElem.find('ARTIST_TYPE') is not None:
metadata['artist_type'] = _getMultiElemText(trackElem, 'ARTIST_TYPE', 'ORD', 'ID')
# Parse tracklist
metadata['tracks'] = []
for trackElem in albumElem.iter('TRACK'):
trackdata = {}
trackdata['track_number'] = _getElemText(trackElem, 'TRACK_NUM')
trackdata['track_gnid'] = _getElemText(trackElem, 'GN_ID')
trackdata['track_title'] = _getElemText(trackElem, 'TITLE')
trackdata['track_artist_name'] = _getElemText(trackElem, 'ARTIST')
trackdata['mood'] = _getMultiElemText(trackElem, 'MOOD', 'ORD', 'ID')
trackdata['tempo'] = _getMultiElemText(trackElem, 'TEMPO', 'ORD', 'ID')
# If track-level GOET exists, overwrite metadata from album
if trackElem.find('GENRE') is not None:
trackdata['genre'] = _getMultiElemText(trackElem, 'GENRE', 'ORD', 'ID')
if trackElem.find('ARTIST_ORIGIN') is not None:
trackdata['artist_origin'] = _getMultiElemText(trackElem, 'ARTIST_ORIGIN', 'ORD', 'ID')
if trackElem.find('ARTIST_ERA') is not None:
trackdata['artist_era'] = _getMultiElemText(trackElem, 'ARTIST_ERA', 'ORD', 'ID')
if trackElem.find('ARTIST_TYPE') is not None:
trackdata['artist_type'] = _getMultiElemText(trackElem, 'ARTIST_TYPE', 'ORD', 'ID')
metadata['tracks'].append(trackdata)
return metadata
def _parseRadioMetadata(responseXML, number):
# Create GNTrackMetadata object
metadata = gnmetadata()
# Parse response
responseTree = xml.etree.ElementTree.fromstring(responseXML)
responseElem = responseTree.find('RESPONSE')
if responseElem.attrib['STATUS'] == 'OK':
#find the radio ID
RadioElem = responseElem.find('RADIO')
metadata['radio_id'] = _getElemText(RadioElem, 'ID')
# Find Album the right album element
albums = responseElem.findall('ALBUM')
for albumElem in albums:
if albumElem.attrib["ORD"] == str(number):
# Parse album metadata
metadata['album_gnid'] = _getElemText(albumElem, 'GN_ID')
metadata['album_artist_name'] = _getElemText(albumElem, 'ARTIST')
metadata['album_title'] = _getElemText(albumElem, 'TITLE')
metadata['album_year'] = _getElemText(albumElem, 'DATE')
metadata['album_art_url'] = _getElemText(albumElem, 'URL', 'TYPE', 'COVERART')
metadata['genre'] = _getMultiElemText(albumElem, 'GENRE', 'ORD', 'ID')
metadata['artist_image_url'] = _getElemText(albumElem, 'URL', 'TYPE', 'ARTIST_IMAGE')
metadata['artist_bio_url'] = _getElemText(albumElem, 'URL', 'TYPE', 'ARTIST_BIOGRAPHY')
metadata['review_url'] = _getElemText(albumElem, 'URL', 'TYPE', 'REVIEW')
# Look for OET
artistOriginElem = albumElem.find('ARTIST_ORIGIN')
if artistOriginElem is not None:
metadata['artist_origin'] = _getMultiElemText(albumElem, 'ARTIST_ORIGIN', 'ORD', 'ID')
metadata['artist_era'] = _getMultiElemText(albumElem, 'ARTIST_ERA', 'ORD', 'ID')
metadata['artist_type'] = _getMultiElemText(albumElem, 'ARTIST_TYPE', 'ORD', 'ID')
else:
# Try to get OET again by fetching album by GNID
metadata['artist_origin'], metadata['artist_era'], metadata['artist_type'] = _getOET(clientID, userID, metadata['album_gnid'])
# Parse track metadata
trackElem = albumElem.find('TRACK')
metadata['track_number'] = _getElemText(trackElem, 'TRACK_NUM')
metadata['track_gnid'] = _getElemText(trackElem, 'GN_ID')
metadata['track_title'] = _getElemText(trackElem, 'TITLE')
metadata['track_artist_name'] = _getElemText(trackElem, 'ARTIST')
metadata['mood'] = _getMultiElemText(trackElem, 'MOOD', 'ORD', 'ID')
metadata['tempo'] = _getMultiElemText(trackElem, 'TEMPO', 'ORD', 'ID')
# If track-level GOET exists, overwrite metadata from album
if trackElem.find('GENRE') is not None:
metadata['genre'] = _getMultiElemText(trackElem, 'GENRE', 'ORD', 'ID')
if trackElem.find('ARTIST_ORIGIN') is not None:
metadata['artist_origin'] = _getMultiElemText(trackElem, 'ARTIST_ORIGIN', 'ORD', 'ID')
if trackElem.find('ARTIST_ERA') is not None:
metadata['artist_era'] = _getMultiElemText(trackElem, 'ARTIST_ERA', 'ORD', 'ID')
if trackElem.find('ARTIST_TYPE') is not None:
metadata['artist_type'] = _getMultiElemText(trackElem, 'ARTIST_TYPE', 'ORD', 'ID')
if trackElem.find('XID') is not None:
metadata
return metadata
def get_discography(clientID='', userID='', artist='', rangeStart=1, rangeEnd=10):
"""
Queries the Gracenote service for all albums containing an artist
"""
if clientID=='' or userID=='':
print('ClientID and UserID are required')
return None
if artist=='':
print('Must specify artist')
return None
# Create XML request
query = _gnquery()
query.addAuth(clientID, userID)
query.addQuery('ALBUM_SEARCH')
query.addQueryTextField('ARTIST', artist)
query.addQueryOption('SELECT_EXTENDED', 'COVER,REVIEW,ARTIST_BIOGRAPHY,ARTIST_IMAGE,ARTIST_OET,MOOD,TEMPO')
query.addQueryOption('SELECT_DETAIL', 'GENRE:3LEVEL,MOOD:2LEVEL,TEMPO:3LEVEL,ARTIST_ORIGIN:4LEVEL,ARTIST_ERA:2LEVEL,ARTIST_TYPE:2LEVEL')
query.addQueryRange(rangeStart,rangeEnd)
queryXML = query.toString()
if DEBUG:
print('------------')
print('QUERY XML')
print('------------')
print(queryXML)
# POST query
response = urllib_request.urlopen(_gnurl(clientID), queryXML)
responseXML = response.read()
if DEBUG:
print('------------')
print('RESPONSE XML')
print('------------')
print(responseXML)
# Create result array
discography = []
# Parse response
responseTree = xml.etree.ElementTree.fromstring(responseXML)
responseElem = responseTree.find('RESPONSE')
if responseElem.attrib['STATUS'] == 'OK':
# Find Album element
albumElems = responseElem.findall('ALBUM')
for albumElem in albumElems:
metadata = gnmetadata()
# Parse album metadata
metadata['album_gnid'] = _getElemText(albumElem, 'GN_ID')
metadata['album_artist_name'] = _getElemText(albumElem, 'ARTIST')
metadata['album_title'] = _getElemText(albumElem, 'TITLE')
metadata['album_year'] = _getElemText(albumElem, 'DATE')
metadata['album_art_url'] = _getElemText(albumElem, 'URL', 'TYPE', 'COVERART')
metadata['genre'] = _getMultiElemText(albumElem, 'GENRE', 'ORD', 'ID')
metadata['artist_image_url'] = _getElemText(albumElem, 'URL', 'TYPE', 'ARTIST_IMAGE')
metadata['artist_bio_url'] = _getElemText(albumElem, 'URL', 'TYPE', 'ARTIST_BIOGRAPHY')
metadata['review_url'] = _getElemText(albumElem, 'URL', 'TYPE', 'REVIEW')
# Look for OET
artistOriginElem = albumElem.find('ARTIST_ORIGIN')
if artistOriginElem is not None:
metadata['artist_origin'] = _getMultiElemText(albumElem, 'ARTIST_ORIGIN', 'ORD', 'ID')
metadata['artist_era'] = _getMultiElemText(albumElem, 'ARTIST_ERA', 'ORD', 'ID')
metadata['artist_type'] = _getMultiElemText(albumElem, 'ARTIST_TYPE', 'ORD', 'ID')
# Parse tracklist
metadata['tracks'] = []
for trackElem in albumElem.iter('TRACK'):
trackdata = {}
trackdata['track_number'] = _getElemText(trackElem, 'TRACK_NUM')
trackdata['track_gnid'] = _getElemText(trackElem, 'GN_ID')
trackdata['track_title'] = _getElemText(trackElem, 'TITLE')
trackdata['track_artist_name'] = _getElemText(trackElem, 'ARTIST')
trackdata['mood'] = _getMultiElemText(trackElem, 'MOOD', 'ORD', 'ID')
trackdata['tempo'] = _getMultiElemText(trackElem, 'TEMPO', 'ORD', 'ID')
# If track-level GOET exists, overwrite metadata from album
if trackElem.find('GENRE') is not None:
trackdata['genre'] = _getMultiElemText(trackElem, 'GENRE', 'ORD', 'ID')
if trackElem.find('ARTIST_ORIGIN') is not None:
trackdata['artist_origin'] = _getMultiElemText(trackElem, 'ARTIST_ORIGIN', 'ORD', 'ID')
if trackElem.find('ARTIST_ERA') is not None:
trackdata['artist_era'] = _getMultiElemText(trackElem, 'ARTIST_ERA', 'ORD', 'ID')
if trackElem.find('ARTIST_TYPE') is not None:
trackdata['artist_type'] = _getMultiElemText(trackElem, 'ARTIST_TYPE', 'ORD', 'ID')
metadata['tracks'].append(trackdata)
discography.append(metadata)
return discography
def fetch(clientID='', userID='', GNID=''):
"""
Fetches a track or album by GN ID
"""
if clientID=='' or userID=='':
print('ClientID and UserID are required')
return None
if GNID=='':
print('GNID is required')
return None
# Create XML request
query = _gnquery()
query.addAuth(clientID, userID)
query.addQuery('ALBUM_FETCH')
query.addQueryGNID(GNID)
query.addQueryOption('SELECT_EXTENDED', 'COVER,REVIEW,ARTIST_BIOGRAPHY,ARTIST_IMAGE,ARTIST_OET,MOOD,TEMPO')
query.addQueryOption('SELECT_DETAIL', 'GENRE:3LEVEL,MOOD:2LEVEL,TEMPO:3LEVEL,ARTIST_ORIGIN:4LEVEL,ARTIST_ERA:2LEVEL,ARTIST_TYPE:2LEVEL')
queryXML = query.toString()
if DEBUG:
print('------------')
print('QUERY XML')
print('------------')
print(queryXML)
# POST query
response = urllib_request.urlopen(_gnurl(clientID), queryXML)
responseXML = response.read()
if DEBUG:
print('------------')
print('RESPONSE XML')
print('------------')
print(responseXML)
# Create GNTrackMetadata object
metadata = gnmetadata()
# Parse response
responseTree = xml.etree.ElementTree.fromstring(responseXML)
responseElem = responseTree.find('RESPONSE')
if responseElem.attrib['STATUS'] == 'OK':
# Find Album element
albumElem = responseElem.find('ALBUM')
# Parse album metadata
metadata['album_gnid'] = _getElemText(albumElem, 'GN_ID')
metadata['album_artist_name'] = _getElemText(albumElem, 'ARTIST')
metadata['album_title'] = _getElemText(albumElem, 'TITLE')
metadata['album_year'] = _getElemText(albumElem, 'DATE')
metadata['album_art_url'] = _getElemText(albumElem, 'URL', 'TYPE', 'COVERART')
metadata['genre'] = _getMultiElemText(albumElem, 'GENRE', 'ORD', 'ID')
metadata['artist_image_url'] = _getElemText(albumElem, 'URL', 'TYPE', 'ARTIST_IMAGE')
metadata['artist_bio_url'] = _getElemText(albumElem, 'URL', 'TYPE', 'ARTIST_BIOGRAPHY')
metadata['review_url'] = _getElemText(albumElem, 'URL', 'TYPE', 'REVIEW')
# Look for OET
artistOriginElem = albumElem.find('ARTIST_ORIGIN')
if artistOriginElem is not None:
metadata['artist_origin'] = _getMultiElemText(albumElem, 'ARTIST_ORIGIN', 'ORD', 'ID')
metadata['artist_era'] = _getMultiElemText(albumElem, 'ARTIST_ERA', 'ORD', 'ID')
metadata['artist_type'] = _getMultiElemText(albumElem, 'ARTIST_TYPE', 'ORD', 'ID')
else:
# Try to get OET again by fetching album by GNID
metadata['artist_origin'], metadata['artist_era'], metadata['artist_type'] = _getOET(clientID, userID, metadata['album_gnid'])
# Parse track metadata
matchedTrackElem = albumElem.find('MATCHED_TRACK_NUM')
if matchedTrackElem is not None:
trackElem = albumElem.find('TRACK')
metadata['track_number'] = _getElemText(trackElem, 'TRACK_NUM')
metadata['track_gnid'] = _getElemText(trackElem, 'GN_ID')
metadata['track_title'] = _getElemText(trackElem, 'TITLE')
metadata['track_artist_name'] = _getElemText(trackElem, 'ARTIST')
metadata['mood'] = _getMultiElemText(trackElem, 'MOOD', 'ORD', 'ID')
metadata['tempo'] = _getMultiElemText(trackElem, 'TEMPO', 'ORD', 'ID')
# If track-level GOET exists, overwrite metadata from album
if trackElem.find('GENRE') is not None:
metadata['genre'] = _getMultiElemText(trackElem, 'GENRE', 'ORD', 'ID')
if trackElem.find('ARTIST_ORIGIN') is not None:
metadata['artist_origin'] = _getMultiElemText(trackElem, 'ARTIST_ORIGIN', 'ORD', 'ID')
if trackElem.find('ARTIST_ERA') is not None:
metadata['artist_era'] = _getMultiElemText(trackElem, 'ARTIST_ERA', 'ORD', 'ID')
if trackElem.find('ARTIST_TYPE') is not None:
metadata['artist_type'] = _getMultiElemText(trackElem, 'ARTIST_TYPE', 'ORD', 'ID')
# Parse tracklist
metadata['tracks'] = []
for trackElem in albumElem.iter('TRACK'):
trackdata = {}
trackdata['track_number'] = _getElemText(trackElem, 'TRACK_NUM')
trackdata['track_gnid'] = _getElemText(trackElem, 'GN_ID')
trackdata['track_title'] = _getElemText(trackElem, 'TITLE')
trackdata['track_artist_name'] = _getElemText(trackElem, 'ARTIST')
trackdata['mood'] = _getMultiElemText(trackElem, 'MOOD', 'ORD', 'ID')
trackdata['tempo'] = _getMultiElemText(trackElem, 'TEMPO', 'ORD', 'ID')
# If track-level GOET exists, overwrite metadata from album
if trackElem.find('GENRE') is not None:
trackdata['genre'] = _getMultiElemText(trackElem, 'GENRE', 'ORD', 'ID')
if trackElem.find('ARTIST_ORIGIN') is not None:
trackdata['artist_origin'] = _getMultiElemText(trackElem, 'ARTIST_ORIGIN', 'ORD', 'ID')
if trackElem.find('ARTIST_ERA') is not None:
trackdata['artist_era'] = _getMultiElemText(trackElem, 'ARTIST_ERA', 'ORD', 'ID')
if trackElem.find('ARTIST_TYPE') is not None:
trackdata['artist_type'] = _getMultiElemText(trackElem, 'ARTIST_TYPE', 'ORD', 'ID')
metadata['tracks'].append(trackdata)
return metadata
def _gnurl(clientID):
"""
Helper function to form URL to Gracenote service
"""
clientIDprefix = clientID.split('-')[0]
return 'https://c' + clientIDprefix + '.web.cddbp.net/webapi/xml/1.0/'
def _getOET(clientID, userID, GNID):
"""
Helper function to retrieve Origin, Era, and Artist Type by direct album
fetch
"""
# Create XML request
query = _gnquery()
query.addAuth(clientID, userID)
query.addQuery('ALBUM_FETCH')
query.addQueryGNID(GNID)
query.addQueryOption('SELECT_EXTENDED', 'ARTIST_OET')
query.addQueryOption('SELECT_DETAIL', 'ARTIST_ORIGIN:4LEVEL,ARTIST_ERA:2LEVEL,ARTIST_TYPE:2LEVEL')
queryXML = query.toString()
if DEBUG:
print('------------')
print('QUERY XML (from _getOET())')
print('------------')
print(queryXML)
# POST query
response = urllib_request.urlopen(_gnurl(clientID), queryXML)
albumXML = response.read()
if DEBUG:
print('------------')
print('RESPONSE XML (from _getOET())')
print('------------')
print(albumXML)
# Parse XML
responseTree = xml.etree.ElementTree.fromstring(albumXML)
responseElem = responseTree.find('RESPONSE')
if responseElem.attrib['STATUS'] == 'OK':
albumElem = responseElem.find('ALBUM')
artistOrigin = _getMultiElemText(albumElem, 'ARTIST_ORIGIN', 'ORD', 'ID')
artistEra = _getMultiElemText(albumElem, 'ARTIST_ERA', 'ORD', 'ID')
artistType = _getMultiElemText(albumElem, 'ARTIST_TYPE', 'ORD', 'ID')
return artistOrigin, artistEra, artistType
class _gnquery:
"""
A utility class for creating and configuring an XML query for POST'ing to
the Gracenote service
"""
def __init__(self):
self.root = xml.etree.ElementTree.Element('QUERIES')
def addAuth(self, clientID, userID):
auth = xml.etree.ElementTree.SubElement(self.root, 'AUTH')
client = xml.etree.ElementTree.SubElement(auth, 'CLIENT')
user = xml.etree.ElementTree.SubElement(auth, 'USER')
client.text = clientID
user.text = userID
def addQuery(self, cmd):
query = xml.etree.ElementTree.SubElement(self.root, 'QUERY')
query.attrib['CMD'] = cmd
def addQueryMode(self, modeStr):
query = self.root.find('QUERY')
mode = xml.etree.ElementTree.SubElement(query, 'MODE')
mode.text = modeStr
def addQueryTextField(self, fieldName, value):
query = self.root.find('QUERY')
text = xml.etree.ElementTree.SubElement(query, 'TEXT')
text.attrib['TYPE'] = fieldName
text.text = value
def addQueryOption(self, parameterName, value):
query = self.root.find('QUERY')
option = xml.etree.ElementTree.SubElement(query, 'OPTION')
parameter = xml.etree.ElementTree.SubElement(option, 'PARAMETER')
parameter.text = parameterName
valueElem = xml.etree.ElementTree.SubElement(option, 'VALUE')
valueElem.text = value
def addQueryGNID(self, GNID):
query = self.root.find('QUERY')
GNIDElem = xml.etree.ElementTree.SubElement(query, 'GN_ID')
GNIDElem.text = GNID
def addQueryClient(self, clientID):
query = self.root.find('QUERY')
client = xml.etree.ElementTree.SubElement(query, 'CLIENT')
client.text = clientID
def addQueryRange(self, start, end):
query = self.root.find('QUERY')
queryRange = xml.etree.ElementTree.SubElement(query, 'RANGE')
rangeStart = xml.etree.ElementTree.SubElement(queryRange, 'START')
rangeStart.text = str(start)
rangeEnd = xml.etree.ElementTree.SubElement(queryRange, 'END')
rangeEnd.text = str(end)
def addQueryTOC(self, toc):
# TOC is a string of format '150 20512 30837 50912 64107 78357 ...'
query = self.root.find('QUERY')
tocElem = xml.etree.ElementTree.SubElement(query, 'TOC')
offsetElem = xml.etree.ElementTree.SubElement(tocElem, 'OFFSETS')
offsetElem.text = toc
def toString(self):
return xml.etree.ElementTree.tostring(self.root)
#Methods added by Fabian to reflect the Rhythm use case
def addAttributeSeed(self, moodID, eraID, genreID):
query = self.root.find('QUERY')
seed = xml.etree.ElementTree.SubElement(query, 'SEED')
seed.attrib['TYPE'] = "ATTRIBUTE"
if genreID!='':
genreElement = xml.etree.ElementTree.SubElement(seed, 'GENRE')
genreElement.attrib['ID'] = genreID
if moodID!='':
genreElement = xml.etree.ElementTree.SubElement(seed, 'MOOD')
genreElement.attrib['ID'] = moodID
if eraID!='':
genreElement = xml.etree.ElementTree.SubElement(seed, 'ERA')
genreElement.attrib['ID'] = eraID
def addTextSeed(self, artist, track):
query = self.root.find('QUERY')
seed = xml.etree.ElementTree.SubElement(query, 'SEED')
seed.attrib['TYPE'] = "TEXT"
if artist!='':
text = xml.etree.ElementTree.SubElement(seed, 'TEXT')
text.attrib['TYPE'] = "ARTIST"
text.text = artist
if track!='':
text = xml.etree.ElementTree.SubElement(seed, 'TEXT')
text.attrib['TYPE'] = "TRACK"
text.text = track
def addQueryEVENT(self, eventType, gnID):
query = self.root.find('QUERY')
event = xml.etree.ElementTree.SubElement(query, 'EVENT')
event.attrib['TYPE'] = eventType
gnidTag = xml.etree.ElementTree.SubElement(event, 'GN_ID')
gnidTag.text = gnID
def addRadioID(self, radioID):
query = self.root.find('QUERY')
radio = xml.etree.ElementTree.SubElement(query, 'RADIO')
myradioid = xml.etree.ElementTree.SubElement(radio, 'ID')
myradioid.text = radioID
def _getElemText(parentElem, elemName, elemAttribName=None, elemAttribValue=None):
"""
XML parsing helper function to find child element with a specific name,
and return the text value
"""
elems = parentElem.findall(elemName)
for elem in elems:
if elemAttribName is not None and elemAttribValue is not None:
if elem.attrib[elemAttribName] == elemAttribValue:
return urllib_parse.unquote(elem.text)
else:
continue
else: # Just return the first one
return urllib_parse.unquote(elem.text)
return ''
def _getElemAttrib(parentElem, elemName, elemAttribName):
"""
XML parsing helper function to find child element with a specific name,
and return the value of a specified attribute
"""
elem = parentElem.find(elemName)
if elem is not None:
return elem.attrib[elemAttribName]
def _getMultiElemText(parentElem, elemName, topKey, bottomKey):
"""
XML parsing helper function to return a 2-level dict of multiple elements
by a specified name, using topKey as the first key, and bottomKey as the second key
"""
elems = parentElem.findall(elemName)
result = {} # 2-level dictionary of items, keyed by topKey and then bottomKey
if elems is not None:
for elem in elems:
if topKey in elem.attrib:
result[elem.attrib[topKey]] = {bottomKey:elem.attrib[bottomKey], 'TEXT':elem.text}
else:
result['0'] = {bottomKey:elem.attrib[bottomKey], 'TEXT':elem.text}
return result
|
|
# Copyright 2015 SAP SE.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: //www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import sys
from sqlalchemy.testing import exclusions, requirements
class Requirements(requirements.SuiteRequirements):
@property
def temporary_tables(self):
# TODO: HANA supports temporty table but only with GLOBAL or LOCAL specification
return exclusions.closed()
@property
def temp_table_reflection(self):
return exclusions.closed()
@property
def views(self):
return exclusions.open()
@property
def deferrable_or_no_constraints(self):
"""Target database must support derferable constraints."""
return exclusions.closed()
@property
def named_constraints(self):
"""target database must support names for constraints."""
return exclusions.closed()
@property
def unique_constraint_reflection(self):
return exclusions.open()
@property
def reflects_pk_names(self):
return exclusions.open()
@property
def self_referential_foreign_keys(self):
"""SAP HANA doen't support self-referential foreign keys."""
return exclusions.closed()
@property
def empty_inserts(self):
"""Empty value tuple in INSERT statement is not allowed"""
return exclusions.closed()
@property
def precision_numerics_enotation_large(self):
return exclusions.open()
@property
def precision_numerics_many_significant_digits(self):
return exclusions.open()
@property
def precision_numerics_retains_significant_digits(self):
return exclusions.open()
@property
def datetime_literals(self):
"""HANA has the function to_date, to_time, to_timestamp"""
return exclusions.open()
@property
def time_microseconds(self):
"""No support for microseconds in datetime"""
return exclusions.closed()
@property
def datetime_microseconds(self):
"""No support for microseconds in datetime"""
return exclusions.closed()
@property
def date_historic(self):
return exclusions.open()
@property
def text_type(self):
"""Currently not supported by PYHDB"""
return exclusions.open()
@property
def schemas(self):
return exclusions.open()
@property
def percent_schema_names(self):
return exclusions.closed()
@property
def savepoints(self):
"""No support for savepoints in transactions"""
return exclusions.closed()
@property
def selectone(self):
"""HANA doesn't support 'SELECT 1' without 'FROM DUMMY'"""
return exclusions.closed()
@property
def order_by_col_from_union(self):
return exclusions.open()
@property
def broken_cx_oracle6_numerics(self):
return exclusions.closed()
@property
def mysql_zero_date(self):
return exclusions.closed()
@property
def mysql_non_strict(self):
return exclusions.closed()
@property
def two_phase_transactions(self):
"""Not supported by PYHDB"""
return exclusions.closed()
@property
def predictable_gc(self):
return exclusions.open()
@property
def cpython(self):
return exclusions.closed()
@property
def python3(self):
if sys.version_info < (3,):
return exclusions.closed()
return exclusions.open()
@property
def identity(self):
return exclusions.closed()
@property
def sane_rowcount(self):
return exclusions.closed()
@property
def sane_multi_rowcount(self):
return exclusions.closed()
@property
def check_constraints(self):
return exclusions.closed()
@property
def update_nowait(self):
return exclusions.closed()
@property
def independent_connections(self):
return exclusions.open()
@property
def non_broken_pickle(self):
return exclusions.closed()
@property
def independent_cursors(self):
return exclusions.open()
@property
def cross_schema_fk_reflection(self):
return exclusions.closed()
@property
def updateable_autoincrement_pks(self):
return exclusions.closed()
@property
def bound_limit_offset(self):
return exclusions.closed()
@property
def isolation_level(self):
# TODO: Check support in pyhdb
return exclusions.closed()
# Disable mysql tests
@property
def mssql_freetds(self):
return exclusions.closed()
# Disable postgresql tests
@property
def postgresql_utf8_server_encoding(self):
return exclusions.closed()
@property
def range_types(self):
return exclusions.closed()
@property
def hstore(self):
return exclusions.closed()
@property
def array_type(self):
return exclusions.closed()
@property
def psycopg2_compatibility(self):
return exclusions.closed()
@property
def postgresql_jsonb(self):
return exclusions.closed()
@property
def savepoints_w_release(self):
return exclusions.closed()
@property
def non_broken_binary(self):
return exclusions.closed()
@property
def oracle5x(self):
return exclusions.closed()
@property
def psycopg2_or_pg8000_compatibility(self):
return exclusions.closed()
@property
def psycopg2_native_hstore(self):
return exclusions.closed()
@property
def psycopg2_native_json(self):
return exclusions.closed()
@property
def two_phase_recovery(self):
return exclusions.closed()
@property
def enforces_check_constraints(self):
return exclusions.closed()
@property
def duplicate_key_raises_integrity_error(self):
"""pyhdb raises the integrity_error, whereas hdbcli raises DBAPIError,
currently the test test_integrity_error is known to fail when one connects to
HANA with hdbcli."""
return exclusions.succeeds_if('hana+pyhdb')
|
|
# Requires Python 2.4 or better and win32api.
"""Config on Msys mingw
This version expects the Pygame 1.9.0 dependencies as built by
msys_build_deps.py
"""
import dll
from setup_win_common import get_definitions
import msys
import os, sys, string
from glob import glob
from distutils.sysconfig import get_python_inc
configcommand = os.environ.get('SDL_CONFIG', 'sdl-config',)
configcommand = configcommand + ' --version --cflags --libs'
localbase = os.environ.get('LOCALBASE', '')
#these get prefixes with '/usr/local' and /mingw or the $LOCALBASE
origincdirs = ['/include', '/include/SDL', '/include/SDL11',
'/include/smpeg', '/include/libpng12', ]
origlibdirs = ['/lib']
class ConfigError(Exception):
pass
def path_join(a, *p):
return os.path.join(a, *p).replace(os.sep, '/')
path_split = os.path.split
def print_(*args, **kwds):
return msys.msys_print(*args, **kwds)
def confirm(message):
"ask a yes/no question, return result"
reply = msys.msys_raw_input("\n%s [Y/n]:" % message)
if reply and string.lower(reply[0]) == 'n':
return 0
return 1
class DependencyProg:
needs_dll = True
def __init__(self, name, envname, exename, minver, msys, defaultlibs=None):
if defaultlibs is None:
defaultlibs = [dll.name_to_root(name)]
self.name = name
try:
command = os.environ[envname]
except KeyError:
command = exename
else:
drv, pth = os.path.splitdrive(command)
if drv:
command = '/' + drv[0] + pth
self.lib_dir = ''
self.inc_dir = ''
self.libs = []
self.cflags = ''
try:
config = msys.run_shell_command([command, '--version', '--cflags', '--libs'])
ver, flags = config.split('\n', 1)
self.ver = ver.strip()
flags = flags.split()
if minver and self.ver < minver:
err= 'WARNING: requires %s version %s (%s found)' % (self.name, self.ver, minver)
raise ValueError, err
self.found = 1
self.cflags = ''
for f in flags:
if f[:2] in ('-I', '-L'):
self.cflags += f[:2] + msys.msys_to_windows(f[2:]) + ' '
elif f[:2] in ('-l', '-D'):
self.cflags += f + ' '
elif f[:3] == '-Wl':
self.cflags += '-Xlinker ' + f + ' '
except:
print_('WARNING: "%s" failed!' % command)
self.found = 0
self.ver = '0'
self.libs = defaultlibs
def configure(self, incdirs, libdir):
if self.found:
print_(self.name + ' '[len(self.name):] + ': found ' + self.ver)
self.found = 1
else:
print_(self.name + ' '[len(self.name):] + ': not found')
class Dependency:
needs_dll = True
def __init__(self, name, checkhead, checklib, libs=None):
if libs is None:
libs = [dll.name_to_root(name)]
self.name = name
self.inc_dir = None
self.lib_dir = None
self.libs = libs
self.found = 0
self.checklib = checklib
self.checkhead = checkhead
self.cflags = ''
def configure(self, incdirs, libdirs):
self.find_inc_dir(incdirs)
self.find_lib_dir(libdirs)
if self.lib_dir and self.inc_dir:
print_(self.name + ' '[len(self.name):] + ': found')
self.found = 1
else:
print_(self.name + ' '[len(self.name):] + ': not found')
def find_inc_dir(self, incdirs):
incname = self.checkhead
for dir in incdirs:
path = path_join(dir, incname)
if os.path.isfile(path):
self.inc_dir = dir
return
def find_lib_dir(self, libdirs):
libname = self.checklib
for dir in libdirs:
path = path_join(dir, libname)
if filter(os.path.isfile, glob(path+'*')):
self.lib_dir = dir
return
class DependencyPython:
needs_dll = False
def __init__(self, name, module, header):
self.name = name
self.lib_dir = ''
self.inc_dir = ''
self.libs = []
self.cflags = ''
self.found = 0
self.ver = '0'
self.module = module
self.header = header
def configure(self, incdirs, libdirs):
self.found = 1
if self.module:
try:
self.ver = __import__(self.module).__version__
except ImportError:
self.found = 0
if self.found and self.header:
fullpath = path_join(get_python_inc(0), self.header)
if not os.path.isfile(fullpath):
self.found = 0
else:
self.inc_dir = os.path.split(fullpath)[0]
if self.found:
print_(self.name + ' '[len(self.name):] + ': found', self.ver)
else:
print_(self.name + ' '[len(self.name):] + ': not found')
class DependencyDLL:
needs_dll = False
def __init__(self, name, libs=None):
if libs is None:
libs = dll.libraries(name)
self.name = 'COPYLIB_' + dll.name_to_root(name)
self.inc_dir = None
self.lib_dir = '_'
self.libs = libs
self.found = 1 # Alway found to make its COPYLIB work
self.cflags = ''
self.lib_name = name
self.file_name_test = dll.tester(name)
def configure(self, incdirs, libdirs, start=None):
omit = []
if start is not None:
if self.set_path(start):
return
omit.append(start)
p, f = path_split(start)
if f == 'lib' and self.set_path(path_join(p, 'bin')):
return
omit.append(start)
# Search other directories
for dir in libdirs:
if dir not in omit:
if self.set_path(dir):
return
p, f = path_split(dir)
if f == 'lib' and self.set_path(path_join(p, 'bin')): # cond. and
return
def set_path(self, wdir):
test = self.file_name_test
try:
files = os.listdir(wdir)
except:
pass
else:
for f in files:
if test(f) and os.path.isfile(path_join(wdir, f)):
# Found
self.lib_dir = path_join(wdir, f)
return True
# Not found
return False
class DependencyWin:
needs_dll = False
def __init__(self, name, cflags):
self.name = name
self.inc_dir = None
self.lib_dir = None
self.libs = []
self.found = 1
self.cflags = cflags
def configure(self, incdirs, libdirs):
pass
def main():
m = msys.Msys(require_mingw=False)
print_('\nHunting dependencies...')
DEPS = [
DependencyProg('SDL', 'SDL_CONFIG', 'sdl-config', '1.2.13', m),
Dependency('FONT', 'SDL_ttf.h', 'libSDL_ttf.dll.a'),
Dependency('IMAGE', 'SDL_image.h', 'libSDL_image.dll.a'),
Dependency('MIXER', 'SDL_mixer.h', 'libSDL_mixer.dll.a'),
DependencyProg('SMPEG', 'SMPEG_CONFIG', 'smpeg-config', '0.4.3', m),
Dependency('PNG', 'png.h', 'libpng12.dll.a'),
Dependency('JPEG', 'jpeglib.h', 'libjpeg.dll.a'),
Dependency('PORTMIDI', 'portmidi.h', 'libportmidi.dll.a'),
Dependency('PORTTIME', 'portmidi.h', 'libportmidi.dll.a'),
DependencyDLL('TIFF'),
DependencyDLL('VORBISFILE'),
DependencyDLL('VORBIS'),
DependencyDLL('OGG'),
DependencyDLL('FREETYPE'),
DependencyDLL('Z'),
]
if not DEPS[0].found:
print_('Unable to run "sdl-config". Please make sure a development version of SDL is installed.')
sys.exit(1)
if localbase:
incdirs = [localbase+d for d in origincdirs]
libdirs = [localbase+d for d in origlibdirs]
else:
incdirs = []
libdirs = []
incdirs += [m.msys_to_windows("/usr/local"+d) for d in origincdirs]
libdirs += [m.msys_to_windows("/usr/local"+d) for d in origlibdirs]
if m.mingw_root is not None:
incdirs += [m.msys_to_windows("/mingw"+d) for d in origincdirs]
libdirs += [m.msys_to_windows("/mingw"+d) for d in origlibdirs]
for arg in string.split(DEPS[0].cflags):
if arg[:2] == '-I':
incdirs.append(arg[2:])
elif arg[:2] == '-L':
libdirs.append(arg[2:])
dll_deps = []
for d in DEPS:
d.configure(incdirs, libdirs)
if d.needs_dll:
dll_dep = DependencyDLL(d.name)
dll_dep.configure(incdirs, libdirs, d.lib_dir)
dll_deps.append(dll_dep)
DEPS += dll_deps
for d in get_definitions():
DEPS.append(DependencyWin(d.name, d.value))
for d in DEPS:
if isinstance(d, DependencyDLL):
if d.lib_dir == '':
print_("DLL for %-12s: not found" % d.lib_name)
else:
print_("DLL for %-12s: %s" % (d.lib_name, d.lib_dir))
for d in DEPS[1:]:
if not d.found:
if not confirm("""
Warning, some of the pygame dependencies were not found. Pygame can still
compile and install, but games that depend on those missing dependencies
will not run. Would you like to continue the configuration?"""):
raise SystemExit()
break
return DEPS
if __name__ == '__main__':
print_("""This is the configuration subscript for MSYS.
Please run "config.py" for full configuration.""")
|
|
# /***********************************************************************
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Note that these code samples being shared are not official Google
# products and are not formally supported.
# ************************************************************************/
import traceback
from absl import logging
from google.api_core.exceptions import NotFound
from google.cloud import bigquery
from google.cloud.bigquery import Table
from termcolor import cprint
import app_settings
from flagmaker.settings import AbstractSettings
from utilities import Aggregation
from utilities import SettingUtil
from utilities import ViewGetter
from utilities import ViewTypes
from utilities import aggregate_if
from utilities import get_view_name
class DataSets:
raw: bigquery.dataset.Dataset = None
views: bigquery.dataset.Dataset = None
class CreateViews:
client: bigquery.Client = None
project: str = None
advertiser: str = None
settings: AbstractSettings = None
def __init__(self, config, client, project, advertiser):
self.settings: app_settings.AppSettings = config
self.client = client
self.project = project
self.advertiser = advertiser
self.s = SettingUtil(config)
def run(self):
report_level = self.s.unwrap('report_level')
if report_level == 'campaign':
raise MethodNotCreated('Methods for campaign-only views'
' not implemented.')
else:
self.view(
ViewTypes.KEYWORD_MAPPER,
'keyword_mapper'
)
if self.s.unwrap('has_historical_data'):
self.view(
ViewTypes.HISTORICAL_CONVERSIONS,
'historical_conversions'
)
self.view(
ViewTypes.HISTORICAL_REPORT,
'historical_report',
)
self.view(
ViewTypes.REPORT_VIEW,
'report_view',
)
def view(self, view_name: ViewTypes, func_name):
adv = str(self.s.unwrap('advertiser_id'))
logging.debug(view_name.value)
adv_view = get_view_name(view_name, adv)
view_ref = DataSets.views.table(adv_view)
view_query = getattr(
self,
func_name if func_name is not None else view_name.value
)(adv)
logging.debug(view_query)
try:
logging.debug(view_ref)
view: Table = self.client.get_table(view_ref)
view.view_query = view_query
self.client.update_table(view, ['view_query'])
cprint('= updated {}'.format(adv_view), 'green')
except NotFound as err:
try:
logging.debug('error:\n-----\n%s\n-----\n', err)
view = bigquery.Table(view_ref)
logging.info('%s.%s', view.dataset_id, view.table_id)
view.view_query = view_query
self.client.create_table(view, exists_ok=True)
cprint('+ created {}'.format(adv_view), 'green')
except NotFound as err:
cprint('Error: {}'.format(str(err)), 'red')
logging.info(traceback.format_exc())
self.keyword_mapper(adv)
def historical_conversions(self, advertiser):
views = ViewGetter(advertiser)
sql = """SELECT
h.date,
a.keywordId{deviceSegment},
a.keywordMatchType MatchType,
h.ad_group AdGroup,
{conversions} conversions,
{revenue} revenue
FROM `{project}`.`{raw}`.`{historical_table_name}` h
INNER JOIN (
SELECT keywordId,
keyword,
campaign,
keywordMatchType,
adGroup,
account
FROM `{project}`.`{views}`.`{keyword_mapper}`
GROUP BY
keywordId,
keyword,
campaign,
account,
adGroup,
keywordMatchType
) a
ON a.keyword=h.keyword
AND a.campaign=h.campaign_name
AND a.account=h.account_name
AND a.adGroup=h.ad_group
AND LOWER(a.keywordMatchType) = LOWER(h.match_type)
GROUP BY
h.date,
a.keywordId,
a.keyword,
a.keywordMatchType,
h.ad_group,
a.campaign{device_segment_column_name},
a.account""".format(
project=self.s.unwrap('gcp_project_name'),
deviceSegment=(
',\n' + 'h.device_segment'
+ ' AS device_segment'
) if self.s.unwrap('has_device_segment') else '',
device_segment_column_name = (
',\n'
+ 'h.' + self.s.unwrap('device_segment_column_name')
) if self.s.unwrap('has_device_segment') else '',
raw=self.s.unwrap('raw_dataset'),
views=self.s.unwrap('view_dataset'),
historical_table_name=views.get(ViewTypes.HISTORICAL),
keyword_mapper=views.get(ViewTypes.KEYWORD_MAPPER),
conversions=aggregate_if(
Aggregation.SUM,
'conversions',
'SUM(1)',
prefix='h',
),
revenue=aggregate_if(
Aggregation.SUM,
'revenue',
0,
prefix='h',
),
)
return sql
def keyword_mapper(self, advertiser):
sql = '''SELECT
k.keywordId,
k.keywordText keyword,
k.keywordMatchType,
c.campaign,
a.account,
g.adGroup,
a.accountType
FROM (
SELECT keywordText,
keywordId,
keywordEngineId,
campaignId,
accountId,
adgroupId,
keywordMatchType,
RANK() OVER (
PARTITION BY keywordText, keywordMatchType, campaignId,
accountId, adGroupId
ORDER BY CASE WHEN status='Active' THEN 0 ELSE 1 END,
CASE WHEN keywordEngineId IS NOT NULL THEN 0 ELSE 1 END
) rank
FROM `{project}`.`{raw_data}`.`Keyword_{advertiser_id}` c
) k
INNER JOIN (
SELECT campaignId, campaign
FROM `{project}`.`{raw_data}`.`Campaign_{advertiser_id}`
GROUP BY campaignId, campaign
) c ON c.campaignId = k.campaignId
INNER JOIN (
SELECT accountId, account, accountType
FROM `{project}`.`{raw_data}`.`Account_{advertiser_id}`
GROUP BY accountId, account, accountType
) a ON a.accountId = k.accountId
INNER JOIN (
SELECT adGroupId, adGroup
FROM `{project}`.`{raw_data}`.`AdGroup_{advertiser_id}`
GROUP BY adGroupId, adGroup
) g ON g.adGroupId = k.adGroupId
WHERE keywordText IS NOT NULL
AND rank = 1
GROUP BY
k.keywordId,
k.keywordText,
k.keywordMatchType,
c.campaign,
a.account,
a.accountType,
g.adGroup'''.format(
project=self.s.unwrap('gcp_project_name'),
raw_data=self.s.unwrap('raw_dataset'),
advertiser_id=advertiser,
)
return sql
def report_view(self, advertiser):
views = ViewGetter(advertiser)
deviceSegment = (',\n' + 'd.deviceSegment AS Device_Segment'
if self.s.unwrap('has_device_segment') else '')
historical_conversions = views.get(ViewTypes.HISTORICAL_CONVERSIONS)
project = self.s.unwrap('gcp_project_name')
view_data = self.s.unwrap('view_dataset')
raw_data = self.s.unwrap('raw_dataset')
keyword_mapper = views.get(ViewTypes.KEYWORD_MAPPER)
date = self.s.unwrap('first_date_conversions')
maybe_historical_data = 'LEFT JOIN ('
if self.s.unwrap('has_historical_data'):
maybe_historical_data += f"""
SELECT
date,
keywordId{deviceSegment},
SUM(revenue) revenue,
SUM(conversions) conversions
FROM `{project}.{view_data}.{historical_conversions}` o
GROUP BY
date,
keywordId{deviceSegment}
"""
else:
maybe_historical_data += """
SELECT CURRENT_DATE() as date,
'0' as keywordId,
NULL as device_segment,
NULL as revenue,
NULL AS conversions
"""
maybe_historical_data += ') h ON h.keywordId = d.keywordId' \
' AND h.date = d.date'
sql = f"""SELECT
d.date Date,
m.keywordId,
m.keyword Keyword{deviceSegment},
m.campaign Campaign,
a.advertiser Advertiser,
m.account Engine,
m.accountType Account_Type,
SUM(clicks) Clicks,
SUM(impr) Impressions,
SUM(weightedPos) Weighted_Pos,
COALESCE(SUM(cost), 0) Cost,
COALESCE(SUM(c.revenue), 0) + COALESCE(SUM(h.revenue), 0) Revenue,
COALESCE(SUM(c.conversions), 0) + COALESCE(SUM(h.conversions), 0) Conversions
FROM (
SELECT date,
keywordId,
SUM(clicks) clicks,
SUM(impr) impr,
SUM(avgPos*impr) weightedPos,
SUM(cost) cost{deviceSegment}
FROM `{project}.{raw_data}.KeywordDeviceStats_{advertiser}`
GROUP BY
date,
keywordId{deviceSegment}
) d
INNER JOIN (
SELECT advertiser
FROM `{project}.{raw_data}.Advertiser_{advertiser}`
LIMIT 1
) a ON 1=1
INNER JOIN `{project}.{view_data}.{keyword_mapper}` m
ON m.keywordId = d.keywordId
{maybe_historical_data}
LEFT JOIN (
SELECT
date,
keywordId,
SUM(dfaRevenue) revenue,
SUM(dfaTransactions) conversions
FROM
`{project}.{raw_data}.KeywordFloodlightAndDeviceStats_{advertiser}`
GROUP BY date, keywordId
) c
ON c.keywordId=d.keywordId
AND c.date=d.date
{"AND c.date > '{date}'" if date else ''}
GROUP BY
d.date,
m.keywordId,
m.keyword,
m.campaign,
a.advertiser,
m.account{deviceSegment},
m.accountType"""
return sql
def historical_report(self, advertiser):
views = ViewGetter(advertiser)
sql = """WITH conversions AS (
SELECT
SPLIT(
REPEAT(CONCAT(keywordId, ","),
CAST(FLOOR(conversions) AS INT64)),
","
) keywords
FROM `{project}.{view_data}.{historical_conversions}`
)
SELECT
'conversions' as Row_Type,
'create' as Action,
'active' as Status,
a.advertiserId as Advertiser_ID,
a.agencyId as Agency_ID,
k.accountId as Account_ID,
a.accountType as Account_Type,
null as Floodlight_activity_ID,
null as Floodlight_activity_group_ID,
c1.date as Conversion_Date,
c1.keywordId as Keyword_ID,
c1.MatchType as Match_type,
c1.AdGroup,
c1.revenue/c1.conversions Conversion_Revenue
FROM `{project}.{view_data}.{historical_conversions}` c1
INNER JOIN (
SELECT keywordId
FROM conversions
CROSS JOIN UNNEST(conversions.keywords) keywordId
WHERE keywordId IS NOT NULL
) keywords
ON keywords.keywordId=c1.keywordId
AND FLOOR(c1.conversions) >= 1
INNER JOIN (
SELECT
accountId, advertiserId, agencyId,
keywordId, keywordMatchType
FROM `{project}`.`{raw_data}`.`Keyword_{advertiser_id}`
GROUP BY
accountId, advertiserId, agencyId,
keywordId, keywordMatchType
) k
ON keywords.keywordId=k.keywordId
INNER JOIN (
SELECT accountId, accountType, advertiserId, agencyId
FROM `{project}.{raw_data}.Account_{advertiser_id}`
GROUP BY accountId, accountType, advertiserId, agencyId
) a
ON a.accountId=k.accountId
""".format(
advertiser_id=advertiser,
project=self.s.unwrap('gcp_project_name'),
raw_data=self.s.unwrap('raw_dataset'),
view_data=self.s.unwrap('view_dataset'),
keyword_mapper=views.get(ViewTypes.KEYWORD_MAPPER),
historical_conversions=views.get(ViewTypes.HISTORICAL_CONVERSIONS),
)
return sql
class MethodNotCreated(Exception):
pass
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Autogenerated by Thrift Compiler (0.7.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
from thrift.Thrift import *
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface:
def execute(self, functionName, funcArgs):
"""
Parameters:
- functionName
- funcArgs
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def execute(self, functionName, funcArgs):
"""
Parameters:
- functionName
- funcArgs
"""
self.send_execute(functionName, funcArgs)
return self.recv_execute()
def send_execute(self, functionName, funcArgs):
self._oprot.writeMessageBegin('execute', TMessageType.CALL, self._seqid)
args = execute_args()
args.functionName = functionName
args.funcArgs = funcArgs
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_execute(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = execute_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
if result.aze is not None:
raise result.aze
raise TApplicationException(TApplicationException.MISSING_RESULT, "execute failed: unknown result");
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["execute"] = Processor.process_execute
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_execute(self, seqid, iprot, oprot):
args = execute_args()
args.read(iprot)
iprot.readMessageEnd()
result = execute_result()
try:
result.success = self._handler.execute(args.functionName, args.funcArgs)
except DRPCExecutionException, e:
result.e = e
except AuthorizationException, aze:
result.aze = aze
oprot.writeMessageBegin("execute", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class execute_args:
"""
Attributes:
- functionName
- funcArgs
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'functionName', None, None, ), # 1
(2, TType.STRING, 'funcArgs', None, None, ), # 2
)
def __hash__(self):
return 0 + hash(self.functionName) + hash(self.funcArgs)
def __init__(self, functionName=None, funcArgs=None,):
self.functionName = functionName
self.funcArgs = funcArgs
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.functionName = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.funcArgs = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('execute_args')
if self.functionName is not None:
oprot.writeFieldBegin('functionName', TType.STRING, 1)
oprot.writeString(self.functionName.encode('utf-8'))
oprot.writeFieldEnd()
if self.funcArgs is not None:
oprot.writeFieldBegin('funcArgs', TType.STRING, 2)
oprot.writeString(self.funcArgs.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class execute_result:
"""
Attributes:
- success
- e
- aze
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'e', (DRPCExecutionException, DRPCExecutionException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 2
)
def __hash__(self):
return 0 + hash(self.success) + hash(self.e) + hash(self.aze)
def __init__(self, success=None, e=None, aze=None,):
self.success = success
self.e = e
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = DRPCExecutionException()
self.e.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('execute_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8'))
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 2)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
|
|
"""Test requirements module."""
import os
from pathlib import Path
import pytest
from homeassistant import loader, setup
from homeassistant.requirements import (
CONSTRAINT_FILE,
PROGRESS_FILE,
RequirementsNotFound,
_install,
async_get_integration_with_requirements,
async_process_requirements,
)
from tests.async_mock import call, patch
from tests.common import MockModule, mock_integration
def env_without_wheel_links():
"""Return env without wheel links."""
env = dict(os.environ)
env.pop("WHEEL_LINKS", None)
return env
async def test_requirement_installed_in_venv(hass):
"""Test requirement installed in virtual environment."""
with patch("os.path.dirname", return_value="ha_package_path"), patch(
"homeassistant.util.package.is_virtual_env", return_value=True
), patch("homeassistant.util.package.is_docker_env", return_value=False), patch(
"homeassistant.util.package.install_package", return_value=True
) as mock_install, patch.dict(
os.environ, env_without_wheel_links(), clear=True
):
hass.config.skip_pip = False
mock_integration(hass, MockModule("comp", requirements=["package==0.0.1"]))
assert await setup.async_setup_component(hass, "comp", {})
assert "comp" in hass.config.components
assert mock_install.call_args == call(
"package==0.0.1",
constraints=os.path.join("ha_package_path", CONSTRAINT_FILE),
no_cache_dir=False,
)
async def test_requirement_installed_in_deps(hass):
"""Test requirement installed in deps directory."""
with patch("os.path.dirname", return_value="ha_package_path"), patch(
"homeassistant.util.package.is_virtual_env", return_value=False
), patch("homeassistant.util.package.is_docker_env", return_value=False), patch(
"homeassistant.util.package.install_package", return_value=True
) as mock_install, patch.dict(
os.environ, env_without_wheel_links(), clear=True
):
hass.config.skip_pip = False
mock_integration(hass, MockModule("comp", requirements=["package==0.0.1"]))
assert await setup.async_setup_component(hass, "comp", {})
assert "comp" in hass.config.components
assert mock_install.call_args == call(
"package==0.0.1",
target=hass.config.path("deps"),
constraints=os.path.join("ha_package_path", CONSTRAINT_FILE),
no_cache_dir=False,
)
async def test_install_existing_package(hass):
"""Test an install attempt on an existing package."""
with patch(
"homeassistant.util.package.install_package", return_value=True
) as mock_inst:
await async_process_requirements(hass, "test_component", ["hello==1.0.0"])
assert len(mock_inst.mock_calls) == 1
with patch("homeassistant.util.package.is_installed", return_value=True), patch(
"homeassistant.util.package.install_package"
) as mock_inst:
await async_process_requirements(hass, "test_component", ["hello==1.0.0"])
assert len(mock_inst.mock_calls) == 0
async def test_install_missing_package(hass):
"""Test an install attempt on an existing package."""
with patch(
"homeassistant.util.package.install_package", return_value=False
) as mock_inst:
with pytest.raises(RequirementsNotFound):
await async_process_requirements(hass, "test_component", ["hello==1.0.0"])
assert len(mock_inst.mock_calls) == 1
async def test_get_integration_with_requirements(hass):
"""Check getting an integration with loaded requirements."""
hass.config.skip_pip = False
mock_integration(
hass, MockModule("test_component_dep", requirements=["test-comp-dep==1.0.0"])
)
mock_integration(
hass,
MockModule(
"test_component_after_dep", requirements=["test-comp-after-dep==1.0.0"]
),
)
mock_integration(
hass,
MockModule(
"test_component",
requirements=["test-comp==1.0.0"],
dependencies=["test_component_dep"],
partial_manifest={"after_dependencies": ["test_component_after_dep"]},
),
)
with patch(
"homeassistant.util.package.is_installed", return_value=False
) as mock_is_installed, patch(
"homeassistant.util.package.install_package", return_value=True
) as mock_inst:
integration = await async_get_integration_with_requirements(
hass, "test_component"
)
assert integration
assert integration.domain == "test_component"
assert len(mock_is_installed.mock_calls) == 3
assert sorted(mock_call[1][0] for mock_call in mock_is_installed.mock_calls) == [
"test-comp-after-dep==1.0.0",
"test-comp-dep==1.0.0",
"test-comp==1.0.0",
]
assert len(mock_inst.mock_calls) == 3
assert sorted(mock_call[1][0] for mock_call in mock_inst.mock_calls) == [
"test-comp-after-dep==1.0.0",
"test-comp-dep==1.0.0",
"test-comp==1.0.0",
]
async def test_install_with_wheels_index(hass):
"""Test an install attempt with wheels index URL."""
hass.config.skip_pip = False
mock_integration(hass, MockModule("comp", requirements=["hello==1.0.0"]))
with patch("homeassistant.util.package.is_installed", return_value=False), patch(
"homeassistant.util.package.is_docker_env", return_value=True
), patch("homeassistant.util.package.install_package") as mock_inst, patch.dict(
os.environ, {"WHEELS_LINKS": "https://wheels.hass.io/test"}
), patch(
"os.path.dirname"
) as mock_dir:
mock_dir.return_value = "ha_package_path"
assert await setup.async_setup_component(hass, "comp", {})
assert "comp" in hass.config.components
assert mock_inst.call_args == call(
"hello==1.0.0",
find_links="https://wheels.hass.io/test",
constraints=os.path.join("ha_package_path", CONSTRAINT_FILE),
no_cache_dir=True,
)
async def test_install_on_docker(hass):
"""Test an install attempt on an docker system env."""
hass.config.skip_pip = False
mock_integration(hass, MockModule("comp", requirements=["hello==1.0.0"]))
with patch("homeassistant.util.package.is_installed", return_value=False), patch(
"homeassistant.util.package.is_docker_env", return_value=True
), patch("homeassistant.util.package.install_package") as mock_inst, patch(
"os.path.dirname"
) as mock_dir, patch.dict(
os.environ, env_without_wheel_links(), clear=True
):
mock_dir.return_value = "ha_package_path"
assert await setup.async_setup_component(hass, "comp", {})
assert "comp" in hass.config.components
assert mock_inst.call_args == call(
"hello==1.0.0",
constraints=os.path.join("ha_package_path", CONSTRAINT_FILE),
no_cache_dir=True,
)
async def test_progress_lock(hass):
"""Test an install attempt on an existing package."""
progress_path = Path(hass.config.path(PROGRESS_FILE))
kwargs = {"hello": "world"}
def assert_env(req, **passed_kwargs):
"""Assert the env."""
assert progress_path.exists()
assert req == "hello"
assert passed_kwargs == kwargs
return True
with patch("homeassistant.util.package.install_package", side_effect=assert_env):
_install(hass, "hello", kwargs)
assert not progress_path.exists()
async def test_discovery_requirements_ssdp(hass):
"""Test that we load discovery requirements."""
hass.config.skip_pip = False
ssdp = await loader.async_get_integration(hass, "ssdp")
mock_integration(
hass, MockModule("ssdp_comp", partial_manifest={"ssdp": [{"st": "roku:ecp"}]})
)
with patch(
"homeassistant.requirements.async_process_requirements",
) as mock_process:
await async_get_integration_with_requirements(hass, "ssdp_comp")
assert len(mock_process.mock_calls) == 3
assert mock_process.mock_calls[0][1][2] == ssdp.requirements
# Ensure zeroconf is a dep for ssdp
assert mock_process.mock_calls[1][1][1] == "zeroconf"
@pytest.mark.parametrize(
"partial_manifest",
[{"zeroconf": ["_googlecast._tcp.local."]}, {"homekit": {"models": ["LIFX"]}}],
)
async def test_discovery_requirements_zeroconf(hass, partial_manifest):
"""Test that we load discovery requirements."""
hass.config.skip_pip = False
zeroconf = await loader.async_get_integration(hass, "zeroconf")
mock_integration(
hass, MockModule("comp", partial_manifest=partial_manifest),
)
with patch(
"homeassistant.requirements.async_process_requirements",
) as mock_process:
await async_get_integration_with_requirements(hass, "comp")
assert len(mock_process.mock_calls) == 2 # zeroconf also depends on http
assert mock_process.mock_calls[0][1][2] == zeroconf.requirements
|
|
"""Unit test for Treadmill apptrace events module.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import mock
from treadmill.apptrace import events
class AppTraceEventsTest(unittest.TestCase):
"""Test all event classes operations.
"""
@mock.patch('treadmill.apptrace.events.AbortedTraceEvent.from_data',
mock.Mock(set_spec=True))
@mock.patch('treadmill.apptrace.events.ConfiguredTraceEvent.from_data',
mock.Mock(set_spec=True))
@mock.patch('treadmill.apptrace.events.DeletedTraceEvent.from_data',
mock.Mock(set_spec=True))
@mock.patch('treadmill.apptrace.events.FinishedTraceEvent.from_data',
mock.Mock(set_spec=True))
@mock.patch('treadmill.apptrace.events.KilledTraceEvent.from_data',
mock.Mock(set_spec=True))
@mock.patch('treadmill.apptrace.events.PendingTraceEvent.from_data',
mock.Mock(set_spec=True))
@mock.patch('treadmill.apptrace.events.PendingDeleteTraceEvent.from_data',
mock.Mock(set_spec=True))
@mock.patch('treadmill.apptrace.events.ScheduledTraceEvent.from_data',
mock.Mock(set_spec=True))
@mock.patch(('treadmill.apptrace.events'
'.ServiceExitedTraceEvent.from_data'),
mock.Mock(set_spec=True))
@mock.patch(('treadmill.apptrace.events'
'.ServiceRunningTraceEvent.from_data'),
mock.Mock(set_spec=True))
def test_factory(self):
"""Test class factory operations.
"""
events.AppTraceEvent.from_data(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
event_type='scheduled',
event_data='here',
payload={'foo': 'bar'}
)
events.ScheduledTraceEvent.from_data.assert_called_with(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
event_type='scheduled',
event_data='here',
payload={'foo': 'bar'}
)
events.AppTraceEvent.from_data(
timestamp=2,
source='tests',
instanceid='proid.foo#123',
event_type='pending',
event_data=None,
payload={'foo': 'bar'}
)
events.PendingTraceEvent.from_data.assert_called_with(
timestamp=2,
source='tests',
instanceid='proid.foo#123',
event_type='pending',
event_data=None,
payload={'foo': 'bar'}
)
def test_factory_bad_event(self):
"""Tests that failure to parse the event returns None.
"""
res = events.AppTraceEvent.from_data(
timestamp=2,
source='tests',
instanceid='proid.foo#123',
event_type='does_not_exists',
event_data=None,
payload={'foo': 'bar'}
)
self.assertIsNone(res)
res = events.AppTraceEvent.from_data(
timestamp=2,
source='tests',
instanceid='proid.foo#123',
event_type='service_running',
event_data=None,
payload={'foo': 'bar'}
)
self.assertIsNone(res)
def test_scheduled(self):
"""Scheduled event operations.
"""
event = events.ScheduledTraceEvent(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
where='here',
why='because',
payload={'foo': 'bar'}
)
self.assertEqual(
event.to_dict(),
{
'event_type': 'scheduled',
'timestamp': 1,
'source': 'tests',
'instanceid': 'proid.foo#123',
'where': 'here',
'why': 'because',
'payload': {'foo': 'bar'},
}
)
self.assertEqual(
event.to_data(),
(
1,
'tests',
'proid.foo#123',
'scheduled',
'here:because',
{'foo': 'bar'},
)
)
self.assertEqual(
event,
events.ScheduledTraceEvent.from_data(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
event_type='scheduled',
event_data='here:because',
payload={'foo': 'bar'}
)
)
def test_pending(self):
"""Pending event operations.
"""
event = events.PendingTraceEvent(
why='created',
timestamp=2,
source='tests',
instanceid='proid.foo#123',
payload={'foo': 'bar'}
)
self.assertEqual(
event.to_dict(),
{
'event_type': 'pending',
'timestamp': 2,
'source': 'tests',
'instanceid': 'proid.foo#123',
'payload': {'foo': 'bar'},
'why': 'created',
}
)
self.assertEqual(
event.to_data(),
(
2,
'tests',
'proid.foo#123',
'pending',
'created',
{'foo': 'bar'},
)
)
self.assertEqual(
event,
events.PendingTraceEvent.from_data(
timestamp=2,
source='tests',
instanceid='proid.foo#123',
event_type='pending',
event_data='created',
payload={'foo': 'bar'}
)
)
def test_pending_delete(self):
"""PendingDelete event operations.
"""
event = events.PendingDeleteTraceEvent(
why='deleted',
timestamp=2,
source='tests',
instanceid='proid.foo#123',
payload={'foo': 'bar'}
)
self.assertEqual(
event.to_dict(),
{
'event_type': 'pending_delete',
'timestamp': 2,
'source': 'tests',
'instanceid': 'proid.foo#123',
'payload': {'foo': 'bar'},
'why': 'deleted',
}
)
self.assertEqual(
event.to_data(),
(
2,
'tests',
'proid.foo#123',
'pending_delete',
'deleted',
{'foo': 'bar'},
)
)
self.assertEqual(
event,
events.PendingDeleteTraceEvent.from_data(
timestamp=2,
source='tests',
instanceid='proid.foo#123',
event_type='pending_delete',
event_data='deleted',
payload={'foo': 'bar'}
)
)
def test_configured(self):
"""Configured event operations.
"""
event = events.ConfiguredTraceEvent(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
uniqueid='AAAA',
payload={'foo': 'bar'}
)
self.assertEqual(
event.to_dict(),
{
'event_type': 'configured',
'timestamp': 1,
'source': 'tests',
'instanceid': 'proid.foo#123',
'uniqueid': 'AAAA',
'payload': {'foo': 'bar'},
}
)
self.assertEqual(
event.to_data(),
(
1,
'tests',
'proid.foo#123',
'configured',
'AAAA',
{'foo': 'bar'},
)
)
self.assertEqual(
event,
events.ConfiguredTraceEvent.from_data(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
event_type='configured',
event_data='AAAA',
payload={'foo': 'bar'}
)
)
def test_deleted(self):
"""Deleted event operations.
"""
event = events.DeletedTraceEvent(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
payload={'foo': 'bar'}
)
self.assertEqual(
event.to_dict(),
{
'event_type': 'deleted',
'timestamp': 1,
'source': 'tests',
'instanceid': 'proid.foo#123',
'payload': {'foo': 'bar'},
}
)
self.assertEqual(
event.to_data(),
(
1,
'tests',
'proid.foo#123',
'deleted',
'',
{'foo': 'bar'},
)
)
self.assertEqual(
event,
events.DeletedTraceEvent.from_data(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
event_type='deleted',
event_data='not used',
payload={'foo': 'bar'}
)
)
def test_finished(self):
"""Finished event operations.
"""
event = events.FinishedTraceEvent(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
rc=1,
signal=2,
payload={'foo': 'bar'}
)
self.assertEqual(
event.to_dict(),
{
'event_type': 'finished',
'timestamp': 1,
'source': 'tests',
'instanceid': 'proid.foo#123',
'rc': 1,
'signal': 2,
'payload': {'foo': 'bar'},
}
)
self.assertEqual(
event.to_data(),
(
1,
'tests',
'proid.foo#123',
'finished',
'1.2',
{'foo': 'bar'},
)
)
self.assertEqual(
event,
events.FinishedTraceEvent.from_data(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
event_type='finished',
event_data='1.2',
payload={'foo': 'bar'}
)
)
def test_aborted(self):
"""Aborted event operations.
"""
event = events.AbortedTraceEvent(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
why='reason',
payload='test'
)
self.assertEqual(
event.to_dict(),
{
'event_type': 'aborted',
'timestamp': 1,
'source': 'tests',
'instanceid': 'proid.foo#123',
'why': 'reason',
'payload': 'test',
}
)
self.assertEqual(
event.to_data(),
(
1,
'tests',
'proid.foo#123',
'aborted',
'reason',
'test',
)
)
self.assertEqual(
event,
events.AbortedTraceEvent.from_data(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
event_type='aborted',
event_data='reason',
payload='test'
)
)
def test_killed(self):
"""Killed event operations.
"""
event = events.KilledTraceEvent(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
is_oom=True,
payload={'foo': 'bar'}
)
self.assertEqual(
event.to_dict(),
{
'event_type': 'killed',
'timestamp': 1,
'source': 'tests',
'instanceid': 'proid.foo#123',
'is_oom': True,
'payload': {'foo': 'bar'},
}
)
self.assertEqual(
event.to_data(),
(
1,
'tests',
'proid.foo#123',
'killed',
'oom',
{'foo': 'bar'},
)
)
self.assertEqual(
event,
events.KilledTraceEvent.from_data(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
event_type='killed',
event_data='oom',
payload={'foo': 'bar'}
)
)
def test_service_running(self):
"""ServiceRunning event operations.
"""
event = events.ServiceRunningTraceEvent(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
uniqueid='AAAA',
service='web.web',
payload={'foo': 'bar'}
)
self.assertEqual(
event.to_dict(),
{
'event_type': 'service_running',
'timestamp': 1,
'source': 'tests',
'instanceid': 'proid.foo#123',
'uniqueid': 'AAAA',
'service': 'web.web',
'payload': {'foo': 'bar'},
}
)
self.assertEqual(
event.to_data(),
(
1,
'tests',
'proid.foo#123',
'service_running',
'AAAA.web.web',
{'foo': 'bar'},
)
)
self.assertEqual(
event,
events.ServiceRunningTraceEvent.from_data(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
event_type='service_running',
event_data='AAAA.web.web',
payload={'foo': 'bar'}
)
)
def test_service_exited(self):
"""ServiceExited event operations.
"""
event = events.ServiceExitedTraceEvent(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
uniqueid='AAAA',
service='web.x',
rc=1,
signal=2,
payload={'foo': 'bar'}
)
self.assertEqual(
event.to_dict(),
{
'event_type': 'service_exited',
'timestamp': 1,
'source': 'tests',
'instanceid': 'proid.foo#123',
'uniqueid': 'AAAA',
'service': 'web.x',
'rc': 1,
'signal': 2,
'payload': {'foo': 'bar'},
}
)
self.assertEqual(
event.to_data(),
(
1,
'tests',
'proid.foo#123',
'service_exited',
'AAAA.web.x.1.2',
{'foo': 'bar'},
)
)
self.assertEqual(
event,
events.ServiceExitedTraceEvent.from_data(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
event_type='service_exited',
event_data='AAAA.web.x.1.2',
payload={'foo': 'bar'}
)
)
if __name__ == '__main__':
unittest.main()
|
|
# The contents of this file are subject to the Common Public Attribution
# License Version 1.0. (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://code.reddit.com/LICENSE. The License is based on the Mozilla Public
# License Version 1.1, but Sections 14 and 15 have been added to cover use of
# software over a computer network and provide for limited attribution for the
# Original Developer. In addition, Exhibit A has been modified to be consistent
# with Exhibit B.
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for
# the specific language governing rights and limitations under the License.
#
# The Original Code is reddit.
#
# The Original Developer is the Initial Developer. The Initial Developer of
# the Original Code is reddit Inc.
#
# All portions of the code written by reddit are Copyright (c) 2006-2013 reddit
# Inc. All Rights Reserved.
###############################################################################
from urllib import urlencode
import base64
import simplejson
from pylons import c, g, request
from pylons.i18n import _
from r2.config.extensions import set_extension
from r2.lib.base import abort
from reddit_base import RedditController, MinimalController, require_https
from r2.lib.db.thing import NotFound
from r2.models import Account
from r2.models.token import (
OAuth2Client, OAuth2AuthorizationCode, OAuth2AccessToken,
OAuth2RefreshToken, OAuth2Scope)
from r2.lib.errors import ForbiddenError, errors
from r2.lib.pages import OAuth2AuthorizationPage
from r2.lib.require import RequirementException, require, require_split
from r2.lib.utils import constant_time_compare, parse_http_basic, UrlParser
from r2.lib.validator import (
nop,
validate,
VRequired,
VThrottledLogin,
VOneOf,
VUser,
VModhash,
VOAuth2ClientID,
VOAuth2Scope,
VOAuth2RefreshToken,
)
def _update_redirect_uri(base_redirect_uri, params):
parsed = UrlParser(base_redirect_uri)
parsed.update_query(**params)
return parsed.unparse()
class OAuth2FrontendController(RedditController):
def check_for_bearer_token(self):
pass
def pre(self):
RedditController.pre(self)
require_https()
def _check_redirect_uri(self, client, redirect_uri):
if not redirect_uri or not client or redirect_uri != client.redirect_uri:
abort(ForbiddenError(errors.OAUTH2_INVALID_REDIRECT_URI))
def _error_response(self, state, redirect_uri):
"""Return an error redirect, but only if client_id and redirect_uri are valid."""
resp = {"state": state}
if (errors.OAUTH2_INVALID_CLIENT, "client_id") in c.errors:
resp["error"] = "unauthorized_client"
elif (errors.OAUTH2_ACCESS_DENIED, "authorize") in c.errors:
resp["error"] = "access_denied"
elif (errors.BAD_HASH, None) in c.errors:
resp["error"] = "access_denied"
elif (errors.INVALID_OPTION, "response_type") in c.errors:
resp["error"] = "unsupported_response_type"
elif (errors.OAUTH2_INVALID_SCOPE, "scope") in c.errors:
resp["error"] = "invalid_scope"
else:
resp["error"] = "invalid_request"
final_redirect = _update_redirect_uri(redirect_uri, resp)
return self.redirect(final_redirect, code=302)
@validate(VUser(),
response_type = VOneOf("response_type", ("code",)),
client = VOAuth2ClientID(),
redirect_uri = VRequired("redirect_uri", errors.OAUTH2_INVALID_REDIRECT_URI),
scope = VOAuth2Scope(),
state = VRequired("state", errors.NO_TEXT),
duration = VOneOf("duration", ("temporary", "permanent"),
default="temporary"))
def GET_authorize(self, response_type, client, redirect_uri, scope, state,
duration):
"""
First step in [OAuth 2.0](http://oauth.net/2/) authentication.
End users will be prompted for their credentials (username/password)
and asked if they wish to authorize the application identified by
the **client_id** parameter with the permissions specified by the
**scope** parameter. They are then redirected to the endpoint on
the client application's side specified by **redirect_uri**.
If the user granted permission to the application, the response will
contain a **code** parameter with a temporary authorization code
which can be exchanged for an access token at
[/api/v1/access_token](#api_method_access_token).
**redirect_uri** must match the URI configured for the client in the
[app preferences](/prefs/apps). If **client_id** or **redirect_uri**
is not valid, or if the call does not take place over SSL, a 403
error will be returned. For all other errors, a redirect to
**redirect_uri** will be returned, with a **error** parameter
indicating why the request failed.
"""
self._check_redirect_uri(client, redirect_uri)
if not c.errors:
return OAuth2AuthorizationPage(client, redirect_uri, scope, state,
duration).render()
else:
return self._error_response(state, redirect_uri)
@validate(VUser(),
VModhash(fatal=False),
client = VOAuth2ClientID(),
redirect_uri = VRequired("redirect_uri", errors.OAUTH2_INVALID_REDIRECT_URI),
scope = VOAuth2Scope(),
state = VRequired("state", errors.NO_TEXT),
duration = VOneOf("duration", ("temporary", "permanent"),
default="temporary"),
authorize = VRequired("authorize", errors.OAUTH2_ACCESS_DENIED))
def POST_authorize(self, authorize, client, redirect_uri, scope, state,
duration):
"""Endpoint for OAuth2 authorization."""
self._check_redirect_uri(client, redirect_uri)
if not c.errors:
code = OAuth2AuthorizationCode._new(client._id, redirect_uri,
c.user._id36, scope,
duration == "permanent")
resp = {"code": code._id, "state": state}
final_redirect = _update_redirect_uri(redirect_uri, resp)
return self.redirect(final_redirect, code=302)
else:
return self._error_response(state, redirect_uri)
class OAuth2AccessController(MinimalController):
def pre(self):
set_extension(request.environ, "json")
MinimalController.pre(self)
require_https()
c.oauth2_client = self._get_client_auth()
def _get_client_auth(self):
auth = request.headers.get("Authorization")
try:
client_id, client_secret = parse_http_basic(auth)
client = OAuth2Client.get_token(client_id)
require(client)
require(constant_time_compare(client.secret, client_secret))
return client
except RequirementException:
abort(401, headers=[("WWW-Authenticate", 'Basic realm="reddit"')])
@validate(grant_type=VOneOf("grant_type",
("authorization_code",
"refresh_token",
"password")))
def POST_access_token(self, grant_type):
"""
Exchange an [OAuth 2.0](http://oauth.net/2/) authorization code
or refresh token (from [/api/v1/authorize](#api_method_authorize)) for
an access token.
On success, returns a URL-encoded dictionary containing
**access_token**, **token_type**, **expires_in**, and **scope**.
If an authorization code for a permanent grant was given, a
**refresh_token** will be included. If there is a problem, an **error**
parameter will be returned instead.
Must be called using SSL, and must contain a HTTP `Authorization:`
header which contains the application's client identifier as the
username and client secret as the password. (The client id and secret
are visible on the [app preferences page](/prefs/apps).)
Per the OAuth specification, **grant_type** must
be ``authorization_code`` for the initial access token or
``refresh_token`` for renewing the access token.
**redirect_uri** must exactly match the value that was used in the call
to [/api/v1/authorize](#api_method_authorize) that created this grant.
"""
if grant_type == "authorization_code":
return self._access_token_code()
elif grant_type == "refresh_token":
return self._access_token_refresh()
elif grant_type == "password":
return self._access_token_password()
else:
resp = {"error": "unsupported_grant_type"}
return self.api_wrapper(resp)
def _check_for_errors(self):
resp = {}
if (errors.INVALID_OPTION, "scope") in c.errors:
resp["error"] = "invalid_scope"
else:
resp["error"] = "invalid_request"
return resp
def _make_token_dict(self, access_token, refresh_token=None):
if not access_token:
return {"error": "invalid_grant"}
expires_in = int(access_token._ttl) if access_token._ttl else None
resp = {
"access_token": access_token._id,
"token_type": access_token.token_type,
"expires_in": expires_in,
"scope": access_token.scope,
}
if refresh_token:
resp["refresh_token"] = refresh_token._id
return resp
@validate(code=nop("code"),
redirect_uri=VRequired("redirect_uri",
errors.OAUTH2_INVALID_REDIRECT_URI))
def _access_token_code(self, code, redirect_uri):
if not code:
c.errors.add("NO_TEXT", field="code")
if c.errors:
return self.api_wrapper(self._check_for_errors())
access_token = None
refresh_token = None
auth_token = OAuth2AuthorizationCode.use_token(
code, c.oauth2_client._id, redirect_uri)
if auth_token:
if auth_token.refreshable:
refresh_token = OAuth2RefreshToken._new(
auth_token.client_id, auth_token.user_id,
auth_token.scope)
access_token = OAuth2AccessToken._new(
auth_token.client_id, auth_token.user_id,
auth_token.scope,
refresh_token._id if refresh_token else None)
resp = self._make_token_dict(access_token, refresh_token)
return self.api_wrapper(resp)
@validate(refresh_token=VOAuth2RefreshToken("refresh_token"))
def _access_token_refresh(self, refresh_token):
resp = {}
access_token = None
if refresh_token:
access_token = OAuth2AccessToken._new(
refresh_token.client_id, refresh_token.user_id,
refresh_token.scope,
refresh_token=refresh_token._id)
else:
c.errors.add("NO_TEXT", field="refresh_token")
if c.errors:
resp = self._check_for_errors()
else:
resp = self._make_token_dict(access_token)
return self.api_wrapper(resp)
@validate(user=VThrottledLogin(["username", "password"]),
scope=nop("scope"))
def _access_token_password(self, user, scope):
# username:password auth via OAuth is only allowed for
# private use scripts
client = c.oauth2_client
if client.app_type != "script":
return self.api_wrapper({"error": "unauthorized_client",
"error_description": "Only script apps may use password auth"})
dev_ids = client._developer_ids
if not user or user._id not in dev_ids:
return self.api_wrapper({"error": "invalid_grant"})
if c.errors:
return self.api_wrapper(self._check_for_errors())
if scope:
scope = OAuth2Scope(scope)
if not scope.is_valid():
c.errors.add(errors.INVALID_OPTION, "scope")
return self.api_wrapper({"error": "invalid_scope"})
else:
scope = OAuth2Scope(OAuth2Scope.FULL_ACCESS)
access_token = OAuth2AccessToken._new(
client._id,
user._id36,
scope
)
resp = self._make_token_dict(access_token)
return self.api_wrapper(resp)
def require_oauth2_scope(*scopes):
def oauth2_scope_wrap(fn):
fn.oauth2_perms = {"allowed_scopes": scopes}
return fn
return oauth2_scope_wrap
|
|
import logging
from pathlib import Path
from typing import Text, Optional, Union
from unittest.mock import Mock
import pytest
from aioresponses import aioresponses
from rasa.shared.exceptions import FileNotFoundException
from tests.utilities import latest_request, json_of_latest_request
import rasa.utils.endpoints as endpoint_utils
@pytest.mark.parametrize(
"base, subpath, expected_result",
[
("https://example.com", None, "https://example.com"),
("https://example.com/test", None, "https://example.com/test"),
("https://example.com/", None, "https://example.com/"),
("https://example.com/", "test", "https://example.com/test"),
("https://example.com/", "test/", "https://example.com/test/"),
(
"http://duckling.rasa.com:8000",
"/parse",
"http://duckling.rasa.com:8000/parse",
),
(
"http://duckling.rasa.com:8000/",
"/parse",
"http://duckling.rasa.com:8000/parse",
),
],
)
def test_concat_url(base, subpath, expected_result):
assert endpoint_utils.concat_url(base, subpath) == expected_result
def test_warning_for_base_paths_with_trailing_slash(caplog):
test_path = "base/"
with caplog.at_level(logging.DEBUG, logger="rasa.utils.endpoints"):
assert endpoint_utils.concat_url(test_path, None) == test_path
assert len(caplog.records) == 1
async def test_endpoint_config():
with aioresponses() as mocked:
endpoint = endpoint_utils.EndpointConfig(
"https://example.com/",
params={"A": "B"},
headers={"X-Powered-By": "Rasa"},
basic_auth={"username": "user", "password": "pass"},
token="mytoken",
token_name="letoken",
type="redis",
port=6379,
db=0,
password="password",
timeout=30000,
)
mocked.post(
"https://example.com/test?A=B&P=1&letoken=mytoken",
payload={"ok": True},
repeat=True,
status=200,
)
await endpoint.request(
"post",
subpath="test",
content_type="application/text",
json={"c": "d"},
params={"P": "1"},
)
r = latest_request(
mocked, "post", "https://example.com/test?A=B&P=1&letoken=mytoken"
)
assert r
assert json_of_latest_request(r) == {"c": "d"}
assert r[-1].kwargs.get("params", {}).get("A") == "B"
assert r[-1].kwargs.get("params", {}).get("P") == "1"
assert r[-1].kwargs.get("params", {}).get("letoken") == "mytoken"
# unfortunately, the mock library won't report any headers stored on
# the session object, so we need to verify them separately
async with endpoint.session() as s:
assert s._default_headers.get("X-Powered-By") == "Rasa"
assert s._default_auth.login == "user"
assert s._default_auth.password == "pass"
async def test_endpoint_config_with_cafile(tmp_path: Path):
cafile = "data/test_endpoints/cert.pem"
with aioresponses() as mocked:
endpoint = endpoint_utils.EndpointConfig(
"https://example.com/", cafile=str(cafile),
)
mocked.post(
"https://example.com/", status=200,
)
await endpoint.request("post",)
request = latest_request(mocked, "post", "https://example.com/")[-1]
ssl_context = request.kwargs["ssl"]
certs = ssl_context.get_ca_certs()
assert certs[0]["subject"][4][0] == ("organizationalUnitName", "rasa")
async def test_endpoint_config_with_non_existent_cafile(tmp_path: Path):
cafile = "data/test_endpoints/no_file.pem"
endpoint = endpoint_utils.EndpointConfig(
"https://example.com/", cafile=str(cafile),
)
with pytest.raises(FileNotFoundException):
await endpoint.request("post",)
def test_endpoint_config_default_token_name():
test_data = {"url": "http://test", "token": "token"}
actual = endpoint_utils.EndpointConfig.from_dict(test_data)
assert actual.token_name == "token"
def test_endpoint_config_custom_token_name():
test_data = {"url": "http://test", "token": "token", "token_name": "test_token"}
actual = endpoint_utils.EndpointConfig.from_dict(test_data)
assert actual.token_name == "test_token"
async def test_request_non_json_response():
with aioresponses() as mocked:
endpoint = endpoint_utils.EndpointConfig("https://example.com/")
mocked.post(
"https://example.com/test",
payload="ok",
content_type="application/text",
status=200,
)
response = await endpoint.request("post", subpath="test")
assert not response
@pytest.mark.parametrize(
"filename, endpoint_type",
[("data/test_endpoints/example_endpoints.yml", "tracker_store"),],
)
def test_read_endpoint_config(filename: Text, endpoint_type: Text):
conf = endpoint_utils.read_endpoint_config(filename, endpoint_type)
assert isinstance(conf, endpoint_utils.EndpointConfig)
@pytest.mark.parametrize(
"endpoint_type, cafile",
[("action_endpoint", "./some_test_file"), ("tracker_store", None)],
)
def test_read_endpoint_config_with_cafile(endpoint_type: Text, cafile: Optional[Text]):
conf = endpoint_utils.read_endpoint_config(
"data/test_endpoints/example_endpoints.yml", endpoint_type
)
assert conf.cafile == cafile
@pytest.mark.parametrize(
"filename, endpoint_type",
[
("", "tracker_store"),
("data/test_endpoints/example_endpoints.yml", "stuff"),
("data/test_endpoints/example_endpoints.yml", "empty"),
("/unknown/path.yml", "tracker_store"),
],
)
def test_read_endpoint_config_not_found(filename: Text, endpoint_type: Text):
conf = endpoint_utils.read_endpoint_config(filename, endpoint_type)
assert conf is None
@pytest.mark.parametrize(
"value, default, expected_result",
[
(None, True, True),
(False, True, False),
("false", True, False),
("true", False, True),
],
)
def test_bool_arg(
value: Optional[Union[bool, str]], default: bool, expected_result: bool
):
request = Mock()
request.args = {}
if value is not None:
request.args = {"key": value}
assert endpoint_utils.bool_arg(request, "key", default) == expected_result
@pytest.mark.parametrize(
"value, default, expected_result",
[(None, 0.5, 0.5), (0.5, None, 0.5), ("0.5", 0, 0.5), ("a", 0.5, 0.5)],
)
def test_float_arg(
value: Optional[Union[float, str]], default: float, expected_result: float
):
request = Mock()
request.args = {}
if value is not None:
request.args = {"key": value}
assert endpoint_utils.float_arg(request, "key", default) == expected_result
@pytest.mark.parametrize(
"value, default, expected_result",
[(None, 0, 0), (1, 0, 1), ("1", 0, 1), ("a", 0, 0)],
)
def test_int_arg(value: Optional[Union[int, str]], default: int, expected_result: int):
request = Mock()
request.args = {}
if value is not None:
request.args = {"key": value}
assert endpoint_utils.int_arg(request, "key", default) == expected_result
|
|
# Lint as: python3
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Variance swap pricing using replicating portfolio approach."""
import tensorflow.compat.v2 as tf
from tf_quant_finance.black_scholes import vanilla_prices
from tf_quant_finance.math import diff_ops
def replicating_weights(ordered_strikes,
reference_strikes,
expiries,
validate_args=False,
dtype=None,
name=None):
"""Calculates the weights for options to recreate the variance swap payoff.
This implements the approach in Appendix A of Demeterfi et al for calculating
the weight of European options required to replicate the payoff of a variance
swap given traded strikes. In particular this function calculates the weights
for the put option part of the portfolio (when `ordered_strikes` is descending
) or for the call option part of the portfolio (when `ordered_strikes`
is ascending). See the fair strike docstring for further details on variance
swaps.
#### Example
```python
dtype = tf.float64
ordered_put_strikes = [100, 95, 90, 85]
reference_strikes = ordered_put_strikes[0]
expiries = 0.25
# Contains weights for put options at ordered_put_strikes[:-1]
put_weights = variance_replicating_weights(
ordered_put_strikes, reference_strikes, expiries, dtype=dtype)
# [0.00206927, 0.00443828, 0.00494591]
```
#### References
[1] Demeterfi, K., Derman, E., Kamal, M. and Zou, J., 1999. More Than You Ever
Wanted To Know About Volatility Swaps. Goldman Sachs Quantitative Strategies
Research Notes.
Args:
ordered_strikes: A real `Tensor` of liquidly traded strikes of shape
`batch_shape + [num_strikes]`. The last entry will not receive a weight in
the portfolio. The values must be sorted ascending if the strikes are for
calls, or descending if the strikes are for puts. The final value in
`ordered_strikes` will not itself receive a weight.
reference_strikes: A `Tensor` of the same dtype as `ordered_strikes` and of
shape compatible with `batch_shape`. An arbitrarily chosen strike
representing an at the money strike price.
expiries: A `Tensor` of the same dtype as `ordered_strikes` and of shape
compatible with `batch_shape`. Represents the time to maturity of the
options.
validate_args: Python `bool`. When `True`, input `Tensor`s are checked for
validity. The checks verify that `ordered_strikes` is indeed ordered. When
`False` invalid inputs may silently render incorrect outputs, yet runtime
performance may be improved.
Default value: False.
dtype: `tf.Dtype`. If supplied the dtype for the input and output `Tensor`s.
Default value: None leading to use of `ordered_strikes.dtype`.
name: Python str. The name to give to the ops created by this function.
Default value: `None` which maps to 'variance_replicating_weights'.
Returns:
A `Tensor` of shape `batch_shape + [num_strikes - 1]` representing the
weight which should be given to each strike in the replicating portfolio,
save for the final strike which is not represented.
"""
with tf.name_scope(name or 'replicating_weights'):
# Input conversion.
ordered_strikes = tf.convert_to_tensor(
ordered_strikes, dtype=dtype, name='ordered_strikes')
dtype = dtype or ordered_strikes.dtype
reference_strikes = tf.expand_dims(
tf.convert_to_tensor(
reference_strikes, dtype=dtype, name='reference_strikes'), -1)
expiries = tf.expand_dims(
tf.convert_to_tensor(expiries, dtype=dtype, name='expiries'), -1)
# Descending is required for the formulae regardless of use as control dep.
strike_diff = diff_ops.diff(ordered_strikes, order=1, exclusive=True)
strikes_descending = tf.math.reduce_all(strike_diff < 0)
control_dependencies = []
if validate_args:
strikes_ascending = tf.math.reduce_all(strike_diff > 0)
control_dependencies.append(
tf.compat.v1.debugging.Assert(
tf.math.logical_or(strikes_descending, strikes_ascending),
[strike_diff]))
with tf.control_dependencies(control_dependencies):
# Weights calculation
term_lin = (ordered_strikes - reference_strikes) / reference_strikes
term_log = tf.math.log(ordered_strikes) - tf.math.log(reference_strikes)
payoff = (2.0 / expiries) * (term_lin - term_log)
payoff_diff = diff_ops.diff(payoff, order=1, exclusive=True)
r_vals = tf.math.divide_no_nan(payoff_diff, strike_diff)
zero = tf.zeros(r_vals.shape[:-1] + [1], dtype=r_vals.dtype)
r_vals_diff = diff_ops.diff(
tf.concat([zero, r_vals], axis=-1), order=1, exclusive=True)
# If the strikes were for puts we need to flip the sign before returning.
return tf.where(strikes_descending, -r_vals_diff, r_vals_diff)
def fair_strike(put_strikes,
put_volatilities,
call_strikes,
call_volatilities,
expiries,
discount_rates,
spots,
reference_strikes,
validate_args=False,
dtype=None,
name=None):
"""Calculates the fair value strike for a variance swap contract.
This implements the approach in Appendix A of Demeterfi et al (1999), where a
variance swap is defined as a forward contract on the square of annualized
realized volatility (though the approach assumes continuous sampling). The
variance swap payoff is, then:
`notional * (realized_volatility^2 - variance_strike)`
The method calculates the weight of each European option required to
approximately replicate such a payoff using the discrete range of strike
prices and implied volatilities of European options traded on the market. The
fair value `variance_strike` is that which is expected to produce zero payoff.
#### Example
```python
dtype = tf.float64
call_strikes = tf.constant([[100, 105, 110, 115], [1000, 1100, 1200, 1300]],
dtype=dtype)
call_vols = 0.2 * tf.ones((2, 4), dtype=dtype)
put_strikes = tf.constant([[100, 95, 90, 85], [1000, 900, 800, 700]],
dtype=dtype)
put_vols = 0.2 * tf.ones((2, 4), dtype=dtype)
reference_strikes = tf.constant([100.0, 1000.0], dtype=dtype)
expiries = tf.constant([0.25, 0.25], dtype=dtype)
discount_rates = tf.constant([0.05, 0.05], dtype=dtype)
variance_swap_price(
put_strikes,
put_vols,
call_strikes,
put_vols,
expiries,
discount_rates,
reference_strikes,
reference_strikes,
dtype=tf.float64)
# [0.03825004, 0.04659269]
```
#### References
[1] Demeterfi, K., Derman, E., Kamal, M. and Zou, J., 1999. More Than You Ever
Wanted To Know About Volatility Swaps. Goldman Sachs Quantitative Strategies
Research Notes.
Args:
put_strikes: A real `Tensor` of shape `batch_shape + [num_put_strikes]`
containing the strike values of traded puts. This must be supplied in
**descending** order, and its elements should be less than or equal to the
`reference_strike`.
put_volatilities: A real `Tensor` of shape `batch_shape +
[num_put_strikes]` containing the market volatility for each strike in
`put_strikes. The final value is unused.
call_strikes: A real `Tensor` of shape `batch_shape + [num_call_strikes]`
containing the strike values of traded calls. This must be supplied in
**ascending** order, and its elements should be greater than or equal to
the `reference_strike`.
call_volatilities: A real `Tensor` of shape `batch_shape +
[num_call_strikes]` containing the market volatility for each strike in
`call_strikes`. The final value is unused.
expiries: A real `Tensor` of shape compatible with `batch_shape` containing
the time to expiries of the contracts.
discount_rates: A real `Tensor` of shape compatible with `batch_shape`
containing the discount rate to be applied.
spots: A real `Tensor` of shape compatible with `batch_shape` containing the
current spot price of the asset.
reference_strikes: A real `Tensor` of shape compatible with `batch_shape`
containing an arbitrary value demarcating the atm boundary between liquid
calls and puts. Typically either the spot price or the (common) first
value of `put_strikes` or `call_strikes`.
validate_args: Python `bool`. When `True`, input `Tensor`s are checked for
validity. The checks verify the the matching length of strikes and
volatilties. When `False` invalid inputs may silently render incorrect
outputs, yet runtime performance will be improved.
Default value: False.
dtype: `tf.Dtype`. If supplied the dtype for the input and output `Tensor`s.
Default value: None, leading to the default value inferred by Tensorflow.
name: Python str. The name to give to the ops created by this function.
Default value: `None` which maps to 'variance_swap_price'.
Returns:
A `Tensor` of shape `batch_shape` containing the fair value of variance for
each item in the batch. Note this is on the decimal rather than square
percentage scale.
"""
with tf.name_scope(name or 'variance_swap_price'):
put_strikes = tf.convert_to_tensor(
put_strikes, dtype=dtype, name='put_strikes')
dtype = dtype or put_strikes.dtype
put_volatilities = tf.convert_to_tensor(
put_volatilities, dtype=dtype, name='put_volatilities')
call_strikes = tf.convert_to_tensor(
call_strikes, dtype=dtype, name='call_strikes')
call_volatilities = tf.convert_to_tensor(
call_volatilities, dtype=dtype, name='call_volatilities')
expiries = tf.convert_to_tensor(expiries, dtype=dtype, name='expiries')
discount_rates = tf.expand_dims(
tf.convert_to_tensor(
discount_rates, dtype=dtype, name='discount_rates'), -1)
spots = tf.expand_dims(
tf.convert_to_tensor(spots, dtype=dtype, name='spots'), -1)
reference_strikes = tf.convert_to_tensor(
reference_strikes, dtype=dtype, name='reference_strikes')
# Check the inputs are consistent in length.
control_dependencies = []
if validate_args:
control_dependencies.append(
tf.math.reduce_all(
tf.shape(put_strikes)[-1] == tf.shape(put_volatilities)[-1]))
control_dependencies.append(
tf.math.reduce_all(
tf.shape(call_strikes)[-1] == tf.shape(call_volatilities)[-1]))
with tf.control_dependencies(control_dependencies):
# Shape is `batch_shape + [num_put_strikes - 1]`
put_weights = replicating_weights(
put_strikes, reference_strikes, expiries, validate_args=validate_args)
# Shape is `batch_shape + [num_call_strikes - 1]`
call_weights = replicating_weights(
call_strikes,
reference_strikes,
expiries,
validate_args=validate_args)
expiries = tf.expand_dims(expiries, -1)
reference_strikes = tf.expand_dims(reference_strikes, -1)
put_prices = vanilla_prices.option_price(
volatilities=put_volatilities[..., :-1],
strikes=put_strikes[..., :-1],
expiries=expiries,
spots=spots,
discount_rates=discount_rates,
is_call_options=False,
)
call_prices = vanilla_prices.option_price(
volatilities=call_volatilities[..., :-1],
strikes=call_strikes[..., :-1],
expiries=expiries,
spots=spots,
discount_rates=discount_rates,
is_call_options=True,
)
effective_rate = expiries * discount_rates
discount_factor = tf.math.exp(effective_rate)
s_ratio = spots / reference_strikes
centrality_term = (2.0 / expiries) * (
effective_rate - discount_factor * s_ratio + 1 +
tf.math.log(s_ratio))
options_value = discount_factor * (
tf.math.reduce_sum(put_weights * put_prices, axis=-1, keepdims=True) +
tf.math.reduce_sum(
call_weights * call_prices, axis=-1, keepdims=True))
# Return values, undoing the dimension expansion introduced earlier.
return tf.squeeze(options_value + centrality_term, axis=-1)
|
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import dataclasses
import functools
import os
import sys
from contextlib import contextmanager
from dataclasses import dataclass
from io import StringIO
from pathlib import Path, PurePath
from pprint import pformat
from tempfile import mkdtemp
from types import CoroutineType, GeneratorType
from typing import Any, Callable, Generic, Iterable, Iterator, Mapping, Sequence, TypeVar, cast
from pants.base.build_root import BuildRoot
from pants.base.specs_parser import SpecsParser
from pants.build_graph.build_configuration import BuildConfiguration
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.engine.addresses import Address
from pants.engine.console import Console
from pants.engine.environment import CompleteEnvironment
from pants.engine.fs import Digest, PathGlobs, PathGlobsAndRoot, Snapshot, Workspace
from pants.engine.goal import Goal
from pants.engine.internals import native_engine
from pants.engine.internals.native_engine import PyExecutor
from pants.engine.internals.scheduler import ExecutionError, SchedulerSession
from pants.engine.internals.selectors import Effect, Get, Params
from pants.engine.internals.session import SessionValues
from pants.engine.process import InteractiveProcess, InteractiveProcessResult
from pants.engine.rules import QueryRule as QueryRule
from pants.engine.rules import Rule
from pants.engine.target import AllTargets, Target, WrappedTarget
from pants.engine.unions import UnionMembership, UnionRule
from pants.init.engine_initializer import EngineInitializer
from pants.init.logging import initialize_stdio, initialize_stdio_raw, stdio_destination
from pants.option.global_options import (
DynamicRemoteOptions,
ExecutionOptions,
GlobalOptions,
LocalStoreOptions,
)
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.source import source_root
from pants.testutil.option_util import create_options_bootstrapper
from pants.util.collections import assert_single_element
from pants.util.contextutil import temporary_dir, temporary_file
from pants.util.dirutil import (
recursive_dirname,
safe_file_dump,
safe_mkdir,
safe_mkdtemp,
safe_open,
)
from pants.util.logging import LogLevel
from pants.util.ordered_set import FrozenOrderedSet
def logging(original_function=None, *, level: LogLevel = LogLevel.INFO):
"""A decorator that enables logging (optionally at the given level).
May be used without a parameter list:
```
@logging
def test_function():
...
```
...or with a level argument:
```
@logging(level=LogLevel.DEBUG)
def test_function():
...
```
"""
def _decorate(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
stdout_fileno, stderr_fileno = sys.stdout.fileno(), sys.stderr.fileno()
with temporary_dir() as tempdir, initialize_stdio_raw(
level, False, False, {}, True, [], tempdir
), stdin_context() as stdin, stdio_destination(
stdin.fileno(), stdout_fileno, stderr_fileno
):
return func(*args, **kwargs)
return wrapper
if original_function:
return _decorate(original_function)
return _decorate
@contextmanager
def engine_error(
expected_underlying_exception: type[Exception] = Exception, *, contains: str | None = None
) -> Iterator[None]:
"""A context manager to catch `ExecutionError`s in tests and check that the underlying exception
is expected.
Use like this:
with engine_error(ValueError, contains="foo"):
rule_runner.request(OutputType, [input])
Will raise AssertionError if no ExecutionError occurred.
"""
try:
yield
except ExecutionError as exec_error:
if not len(exec_error.wrapped_exceptions) == 1:
formatted_errors = "\n\n".join(repr(e) for e in exec_error.wrapped_exceptions)
raise ValueError(
"Multiple underlying exceptions, but this helper function expected only one. "
"Use `with pytest.raises(ExecutionError) as exc` directly and inspect "
"`exc.value.wrapped_exceptions`.\n\n"
f"Errors: {formatted_errors}"
)
underlying = exec_error.wrapped_exceptions[0]
if not isinstance(underlying, expected_underlying_exception):
raise AssertionError(
"ExecutionError occurred as expected, but the underlying exception had type "
f"{type(underlying)} rather than the expected type "
f"{expected_underlying_exception}:\n\n{underlying}"
)
if contains is not None and contains not in str(underlying):
raise AssertionError(
"Expected value not found in exception.\n"
f"expected: {contains}\n\n"
f"exception: {underlying}"
)
# -----------------------------------------------------------------------------------------------
# `RuleRunner`
# -----------------------------------------------------------------------------------------------
_I = TypeVar("_I")
_O = TypeVar("_O")
# Use the ~minimum possible parallelism since integration tests using RuleRunner will already be run
# by Pants using an appropriate Parallelism. We must set max_threads > core_threads; so 2 is the
# minimum, but, via trial and error, 3 minimizes test times on average.
_EXECUTOR = PyExecutor(core_threads=1, max_threads=3)
# Environment variable names required for locating Python interpreters, for use with RuleRunner's
# env_inherit arguments.
# TODO: This is verbose and redundant: see https://github.com/pantsbuild/pants/issues/13350.
PYTHON_BOOTSTRAP_ENV = {"PATH", "PYENV_ROOT", "HOME"}
@dataclass(frozen=True)
class GoalRuleResult:
exit_code: int
stdout: str
stderr: str
@staticmethod
def noop() -> GoalRuleResult:
return GoalRuleResult(0, stdout="", stderr="")
# This is not frozen because we need to update the `scheduler` when setting options.
@dataclass
class RuleRunner:
build_root: str
options_bootstrapper: OptionsBootstrapper
build_config: BuildConfiguration
scheduler: SchedulerSession
def __init__(
self,
*,
rules: Iterable | None = None,
target_types: Iterable[type[Target]] | None = None,
objects: dict[str, Any] | None = None,
context_aware_object_factories: dict[str, Any] | None = None,
isolated_local_store: bool = False,
preserve_tmpdirs: bool = False,
ca_certs_path: str | None = None,
bootstrap_args: Iterable[str] = (),
use_deprecated_python_macros: bool = False,
extra_session_values: dict[Any, Any] | None = None,
max_workunit_verbosity: LogLevel = LogLevel.DEBUG,
) -> None:
bootstrap_args = [*bootstrap_args]
root_dir: Path | None = None
if preserve_tmpdirs:
root_dir = Path(mkdtemp(prefix="RuleRunner."))
print(f"Preserving rule runner temporary directories at {root_dir}.", file=sys.stderr)
bootstrap_args.extend(
["--no-process-cleanup", f"--local-execution-root-dir={root_dir}"]
)
build_root = (root_dir / "BUILD_ROOT").resolve()
build_root.mkdir()
self.build_root = str(build_root)
else:
self.build_root = os.path.realpath(safe_mkdtemp(prefix="_BUILD_ROOT"))
safe_mkdir(self.pants_workdir)
BuildRoot().path = self.build_root
# TODO: Redesign rule registration for tests to be more ergonomic and to make this less
# special-cased.
all_rules = (
*(rules or ()),
*source_root.rules(),
QueryRule(WrappedTarget, [Address]),
QueryRule(AllTargets, []),
QueryRule(UnionMembership, []),
)
build_config_builder = BuildConfiguration.Builder()
build_config_builder.register_aliases(
BuildFileAliases(
objects=objects, context_aware_object_factories=context_aware_object_factories
)
)
build_config_builder.register_rules("_dummy_for_test_", all_rules)
build_config_builder.register_target_types("_dummy_for_test_", target_types or ())
self.build_config = build_config_builder.create()
self.environment = CompleteEnvironment({})
self.options_bootstrapper = create_options_bootstrapper(args=bootstrap_args)
options = self.options_bootstrapper.full_options(self.build_config)
global_options = self.options_bootstrapper.bootstrap_options.for_global_scope()
dynamic_remote_options, _ = DynamicRemoteOptions.from_options(options, self.environment)
local_store_options = LocalStoreOptions.from_options(global_options)
if isolated_local_store:
if root_dir:
lmdb_store_dir = root_dir / "lmdb_store"
lmdb_store_dir.mkdir()
store_dir = str(lmdb_store_dir)
else:
store_dir = safe_mkdtemp(prefix="lmdb_store.")
local_store_options = dataclasses.replace(local_store_options, store_dir=store_dir)
local_execution_root_dir = global_options.local_execution_root_dir
named_caches_dir = global_options.named_caches_dir
graph_session = EngineInitializer.setup_graph_extended(
pants_ignore_patterns=GlobalOptions.compute_pants_ignore(
self.build_root, global_options
),
use_gitignore=False,
local_store_options=local_store_options,
local_execution_root_dir=local_execution_root_dir,
named_caches_dir=named_caches_dir,
build_root=self.build_root,
build_configuration=self.build_config,
executor=_EXECUTOR,
execution_options=ExecutionOptions.from_options(global_options, dynamic_remote_options),
ca_certs_path=ca_certs_path,
engine_visualize_to=None,
use_deprecated_python_macros=use_deprecated_python_macros,
).new_session(
build_id="buildid_for_test",
session_values=SessionValues(
{
OptionsBootstrapper: self.options_bootstrapper,
CompleteEnvironment: self.environment,
**(extra_session_values or {}),
}
),
max_workunit_level=max_workunit_verbosity,
)
self.scheduler = graph_session.scheduler_session
def __repr__(self) -> str:
return f"RuleRunner(build_root={self.build_root})"
@property
def pants_workdir(self) -> str:
return os.path.join(self.build_root, ".pants.d")
@property
def rules(self) -> FrozenOrderedSet[Rule | UnionRule]:
return FrozenOrderedSet([*self.build_config.rules, *self.build_config.union_rules])
@property
def target_types(self) -> tuple[type[Target], ...]:
return self.build_config.target_types
@property
def union_membership(self) -> UnionMembership:
"""An instance of `UnionMembership` with all the test's registered `UnionRule`s."""
return self.request(UnionMembership, [])
def new_session(self, build_id: str) -> None:
"""Mutates this RuleRunner to begin a new Session with the same Scheduler."""
self.scheduler = self.scheduler.scheduler.new_session(build_id)
def request(self, output_type: type[_O], inputs: Iterable[Any]) -> _O:
result = assert_single_element(
self.scheduler.product_request(output_type, [Params(*inputs)])
)
return cast(_O, result)
def run_goal_rule(
self,
goal: type[Goal],
*,
global_args: Iterable[str] | None = None,
args: Iterable[str] | None = None,
env: Mapping[str, str] | None = None,
env_inherit: set[str] | None = None,
) -> GoalRuleResult:
merged_args = (*(global_args or []), goal.name, *(args or []))
self.set_options(merged_args, env=env, env_inherit=env_inherit)
raw_specs = self.options_bootstrapper.full_options_for_scopes(
[GlobalOptions.get_scope_info(), goal.subsystem_cls.get_scope_info()]
).specs
specs = SpecsParser(self.build_root).parse_specs(raw_specs)
stdout, stderr = StringIO(), StringIO()
console = Console(stdout=stdout, stderr=stderr, use_colors=False, session=self.scheduler)
exit_code = self.scheduler.run_goal_rule(
goal,
Params(
specs,
console,
Workspace(self.scheduler),
),
)
console.flush()
return GoalRuleResult(exit_code, stdout.getvalue(), stderr.getvalue())
def set_options(
self,
args: Iterable[str],
*,
env: Mapping[str, str] | None = None,
env_inherit: set[str] | None = None,
) -> None:
"""Update the engine session with new options and/or environment variables.
The environment variables will be used to set the `CompleteEnvironment`, which is the
environment variables captured by the parent Pants process. Some rules use this to be able
to read arbitrary env vars. Any options that start with `PANTS_` will also be used to set
options.
Environment variables listed in `env_inherit` and not in `env` will be inherited from the test
runner's environment (os.environ)
This will override any previously configured values.
"""
env = {
**{k: os.environ[k] for k in (env_inherit or set()) if k in os.environ},
**(env or {}),
}
self.options_bootstrapper = create_options_bootstrapper(args=args, env=env)
self.environment = CompleteEnvironment(env)
self.scheduler = self.scheduler.scheduler.new_session(
build_id="buildid_for_test",
session_values=SessionValues(
{
OptionsBootstrapper: self.options_bootstrapper,
CompleteEnvironment: self.environment,
}
),
)
def _invalidate_for(self, *relpaths: str):
"""Invalidates all files from the relpath, recursively up to the root.
Many python operations implicitly create parent directories, so we assume that touching a
file located below directories that do not currently exist will result in their creation.
"""
files = {f for relpath in relpaths for f in recursive_dirname(relpath)}
return self.scheduler.invalidate_files(files)
def chmod(self, relpath: str | PurePath, mode: int) -> None:
"""Change the file mode and permissions.
relpath: The relative path to the file or directory from the build root.
mode: The file mode to set, preferable in octal representation, e.g. `mode=0o750`.
"""
Path(self.build_root, relpath).chmod(mode)
self._invalidate_for(str(relpath))
def create_dir(self, relpath: str) -> str:
"""Creates a directory under the buildroot.
:API: public
relpath: The relative path to the directory from the build root.
"""
path = os.path.join(self.build_root, relpath)
safe_mkdir(path)
self._invalidate_for(relpath)
return path
def _create_file(
self, relpath: str | PurePath, contents: bytes | str = "", mode: str = "w"
) -> str:
"""Writes to a file under the buildroot.
relpath: The relative path to the file from the build root.
contents: A string containing the contents of the file - '' by default..
mode: The mode to write to the file in - over-write by default.
"""
path = os.path.join(self.build_root, relpath)
with safe_open(path, mode=mode) as fp:
fp.write(contents)
self._invalidate_for(str(relpath))
return path
def write_files(self, files: Mapping[str | PurePath, str | bytes]) -> tuple[str, ...]:
"""Write the files to the build root.
:API: public
files: A mapping of file names to contents.
returns: A tuple of absolute file paths created.
"""
paths = []
for path, content in files.items():
paths.append(
self._create_file(path, content, mode="wb" if isinstance(content, bytes) else "w")
)
return tuple(paths)
def make_snapshot(self, files: Mapping[str, str | bytes]) -> Snapshot:
"""Makes a snapshot from a map of file name to file content.
:API: public
"""
with temporary_dir() as temp_dir:
for file_name, content in files.items():
mode = "wb" if isinstance(content, bytes) else "w"
safe_file_dump(os.path.join(temp_dir, file_name), content, mode=mode)
return self.scheduler.capture_snapshots(
(PathGlobsAndRoot(PathGlobs(("**",)), temp_dir),)
)[0]
def make_snapshot_of_empty_files(self, files: Iterable[str]) -> Snapshot:
"""Makes a snapshot with empty content for each file.
This is a convenience around `TestBase.make_snapshot`, which allows specifying the content
for each file.
:API: public
"""
return self.make_snapshot({fp: "" for fp in files})
def get_target(self, address: Address) -> Target:
"""Find the target for a given address.
This requires that the target actually exists, i.e. that you set up its BUILD file.
:API: public
"""
return self.request(WrappedTarget, [address]).target
def write_digest(self, digest: Digest, *, path_prefix: str | None = None) -> None:
"""Write a digest to disk, relative to the test's build root.
Access the written files by using `os.path.join(rule_runner.build_root, <relpath>)`.
"""
native_engine.write_digest(
self.scheduler.py_scheduler, self.scheduler.py_session, digest, path_prefix or ""
)
def run_interactive_process(self, request: InteractiveProcess) -> InteractiveProcessResult:
return native_engine.session_run_interactive_process(self.scheduler.py_session, request)
# -----------------------------------------------------------------------------------------------
# `run_rule_with_mocks()`
# -----------------------------------------------------------------------------------------------
@dataclass(frozen=True)
class MockEffect(Generic[_O, _I]):
output_type: type[_O]
input_type: type[_I]
mock: Callable[[_I], _O]
# TODO(#6742): Improve the type signature by using generics and type vars. `mock` should be
# `Callable[[InputType], OutputType]`.
@dataclass(frozen=True)
class MockGet:
output_type: type
input_type: type
mock: Callable[[Any], Any]
# TODO: Improve the type hints so that the return type can be inferred.
def run_rule_with_mocks(
rule: Callable,
*,
rule_args: Sequence[Any] = (),
mock_gets: Sequence[MockGet | MockEffect] = (),
union_membership: UnionMembership | None = None,
):
"""A test helper function that runs an @rule with a set of arguments and mocked Get providers.
An @rule named `my_rule` that takes one argument and makes no `Get` requests can be invoked
like so:
```
return_value = run_rule_with_mocks(my_rule, rule_args=[arg1])
```
In the case of an @rule that makes Get requests, things get more interesting: the
`mock_gets` argument must be provided as a sequence of `MockGet`s and `MockEffect`s. Each
MockGet takes the Product and Subject type, along with a one-argument function that takes a
subject value and returns a product value.
So in the case of an @rule named `my_co_rule` that takes one argument and makes Get requests
for a product type `Listing` with subject type `Dir`, the invoke might look like:
```
return_value = run_rule_with_mocks(
my_co_rule,
rule_args=[arg1],
mock_gets=[
MockGet(
output_type=Listing,
input_type=Dir,
mock=lambda dir_subject: Listing(..),
),
],
)
```
If any of the @rule's Get requests involve union members, you should pass a `UnionMembership`
mapping the union base to any union members you'd like to test. For example, if your rule has
`await Get(TestResult, TargetAdaptor, target_adaptor)`, you may pass
`UnionMembership({TargetAdaptor: PythonTestsTargetAdaptor})` to this function.
:returns: The return value of the completed @rule.
"""
task_rule = getattr(rule, "rule", None)
if task_rule is None:
raise TypeError(f"Expected to receive a decorated `@rule`; got: {rule}")
if len(rule_args) != len(task_rule.input_selectors):
raise ValueError(
f"Rule expected to receive arguments of the form: {task_rule.input_selectors}; got: {rule_args}"
)
if len(mock_gets) != len(task_rule.input_gets):
raise ValueError(
f"Rule expected to receive Get providers for:\n"
f"{pformat(task_rule.input_gets)}\ngot:\n"
f"{pformat(mock_gets)}"
)
res = rule(*(rule_args or ()))
if not isinstance(res, (CoroutineType, GeneratorType)):
return res
def get(res: Get | Effect):
provider = next(
(
mock_get.mock
for mock_get in mock_gets
if mock_get.output_type == res.output_type
and (
mock_get.input_type == type(res.input) # noqa: E721
or (
union_membership
and mock_get.input_type in union_membership
and union_membership.is_member(mock_get.input_type, res.input)
)
)
),
None,
)
if provider is None:
raise AssertionError(f"Rule requested: {res}, which cannot be satisfied.")
return provider(res.input)
rule_coroutine = res
rule_input = None
while True:
try:
res = rule_coroutine.send(rule_input)
if isinstance(res, (Get, Effect)):
rule_input = get(res)
elif type(res) in (tuple, list):
rule_input = [get(g) for g in res]
else:
return res
except StopIteration as e:
if e.args:
return e.value
@contextmanager
def stdin_context(content: bytes | str | None = None):
if content is None:
yield open("/dev/null")
else:
with temporary_file(binary_mode=isinstance(content, bytes)) as stdin_file:
stdin_file.write(content)
stdin_file.close()
yield open(stdin_file.name)
@contextmanager
def mock_console(
options_bootstrapper: OptionsBootstrapper,
*,
stdin_content: bytes | str | None = None,
) -> Iterator[tuple[Console, StdioReader]]:
global_bootstrap_options = options_bootstrapper.bootstrap_options.for_global_scope()
colors = (
options_bootstrapper.full_options_for_scopes(
[GlobalOptions.get_scope_info()], allow_unknown_options=True
)
.for_global_scope()
.colors
)
with initialize_stdio(global_bootstrap_options), stdin_context(
stdin_content
) as stdin, temporary_file(binary_mode=False) as stdout, temporary_file(
binary_mode=False
) as stderr, stdio_destination(
stdin_fileno=stdin.fileno(),
stdout_fileno=stdout.fileno(),
stderr_fileno=stderr.fileno(),
):
# NB: We yield a Console without overriding the destination argument, because we have
# already done a sys.std* level replacement. The replacement is necessary in order for
# InteractiveProcess to have native file handles to interact with.
yield Console(use_colors=colors), StdioReader(
_stdout=Path(stdout.name), _stderr=Path(stderr.name)
)
@dataclass
class StdioReader:
_stdout: Path
_stderr: Path
def get_stdout(self) -> str:
"""Return all data that has been flushed to stdout so far."""
return self._stdout.read_text()
def get_stderr(self) -> str:
"""Return all data that has been flushed to stderr so far."""
return self._stderr.read_text()
|
|
# oxAuth is available under the MIT License (2008). See http://opensource.org/licenses/MIT for full text.
# Copyright (c) 2016, Gluu
#
# Author: Yuriy Movchan
#
from org.gluu.oxauth.security import Identity
from org.gluu.service.cdi.util import CdiUtil
from org.gluu.model.custom.script.type.auth import PersonAuthenticationType
from org.gluu.oxauth.service import UserService, AuthenticationService
from org.gluu.oxauth.service.net import HttpService
from org.gluu.service import XmlService
from org.gluu.oxauth.service import EncryptionService
from org.gluu.util import StringHelper
from org.gluu.util import ArrayHelper
from java.lang import Boolean
import java
import sys
import json
class PersonAuthentication(PersonAuthenticationType):
def __init__(self, currentTimeMillis):
self.currentTimeMillis = currentTimeMillis
self.client = None
def init(self, configurationAttributes):
print "InWebo. Initialization"
iw_cert_store_type = configurationAttributes.get("iw_cert_store_type").getValue2()
iw_cert_path = configurationAttributes.get("iw_cert_path").getValue2()
iw_creds_file = configurationAttributes.get("iw_creds_file").getValue2()
# Load credentials from file
f = open(iw_creds_file, 'r')
try:
creds = json.loads(f.read())
except:
return False
finally:
f.close()
iw_cert_password = creds["CERT_PASSWORD"]
try:
encryptionService = CdiUtil.bean(EncryptionService)
iw_cert_password = encryptionService.decrypt(iw_cert_password)
except:
return False
httpService = CdiUtil.bean(HttpService)
self.client = httpService.getHttpsClient(None, None, None, iw_cert_store_type, iw_cert_path, iw_cert_password)
print "InWebo. Initialized successfully"
return True
def destroy(self, configurationAttributes):
print "InWebo. Destroy"
print "InWebo. Destroyed successfully"
return True
def getApiVersion(self):
return 1
def isValidAuthenticationMethod(self, usageType, configurationAttributes):
return True
def getAlternativeAuthenticationMethod(self, usageType, configurationAttributes):
return None
def authenticate(self, configurationAttributes, requestParameters, step):
userService = CdiUtil.bean(UserService)
authenticationService = CdiUtil.bean(AuthenticationService)
identity = CdiUtil.bean(Identity)
iw_api_uri = configurationAttributes.get("iw_api_uri").getValue2()
iw_service_id = configurationAttributes.get("iw_service_id").getValue2()
iw_helium_enabled = Boolean(configurationAttributes.get("iw_helium_enabled").getValue2()).booleanValue()
if (iw_helium_enabled):
identity.setWorkingParameter("iw_count_login_steps", 1)
credentials = identity.getCredentials()
user_name = credentials.getUsername()
if (step == 1):
print "InWebo. Authenticate for step 1"
print "InWebo. Authenticate for step 1. iw_helium_enabled:", iw_helium_enabled
user_password = credentials.getPassword()
if (iw_helium_enabled):
login_array = requestParameters.get("login")
if ArrayHelper.isEmpty(login_array):
print "InWebo. Authenticate for step 1. login is empty"
return False
user_name = login_array[0]
password_array = requestParameters.get("password")
if ArrayHelper.isEmpty(password_array):
print "InWebo. Authenticate for step 1. password is empty"
return False
user_password = password_array[0]
response_validation = self.validateInweboToken(iw_api_uri, iw_service_id, user_name, user_password)
if (not response_validation):
return False
logged_in = False
if (StringHelper.isNotEmptyString(user_name)):
userService = CdiUtil.bean(UserService)
logged_in = authenticationService.authenticate(user_name)
return logged_in
else:
logged_in = False
if (StringHelper.isNotEmptyString(user_name) and StringHelper.isNotEmptyString(user_password)):
userService = CdiUtil.bean(UserService)
logged_in = authenticationService.authenticate(user_name, user_password)
return logged_in
return True
elif (step == 2):
print "InWebo. Authenticate for step 2"
passed_step1 = self.isPassedDefaultAuthentication
if (not passed_step1):
return False
iw_token_array = requestParameters.get("iw_token")
if ArrayHelper.isEmpty(iw_token_array):
print "InWebo. Authenticate for step 2. iw_token is empty"
return False
iw_token = iw_token_array[0]
response_validation = self.validateInweboToken(iw_api_uri, iw_service_id, user_name, iw_token)
return response_validation
else:
return False
def prepareForStep(self, configurationAttributes, requestParameters, step):
if (step == 1):
print "InWebo. Prepare for step 1"
identity = CdiUtil.bean(Identity)
iw_helium_enabled = Boolean(configurationAttributes.get("iw_helium_enabled").getValue2()).booleanValue()
identity.setWorkingParameter("helium_enabled", iw_helium_enabled)
iw_helium_alias = None
if (iw_helium_enabled):
iw_helium_alias = configurationAttributes.get("iw_helium_alias").getValue2()
identity.setWorkingParameter("helium_alias", iw_helium_alias)
print "InWebo. Prepare for step 1. Helium status:", iw_helium_enabled
return True
def getExtraParametersForStep(self, configurationAttributes, step):
return None
def getCountAuthenticationSteps(self, configurationAttributes):
identity = CdiUtil.bean(Identity)
if (identity.isSetWorkingParameter("iw_count_login_steps")):
return identity.getWorkingParameter("iw_count_login_steps")
return 2
def getPageForStep(self, configurationAttributes, step):
if (step == 1):
return "/auth/inwebo/iwlogin.xhtml"
if (step == 2):
return "/auth/inwebo/iwauthenticate.xhtml"
else:
return ""
def isPassedDefaultAuthentication(self):
identity = CdiUtil.bean(Identity)
credentials = identity.getCredentials()
user_name = credentials.getUsername()
passed_step1 = StringHelper.isNotEmptyString(user_name)
return passed_step1
def validateInweboToken(self, iw_api_uri, iw_service_id, user_name, iw_token):
httpService = CdiUtil.bean(HttpService)
xmlService = CdiUtil.bean(XmlService)
if StringHelper.isEmpty(iw_token):
print "InWebo. Token verification. iw_token is empty"
return False
request_uri = iw_api_uri + "?action=authenticate" + "&serviceId=" + httpService.encodeUrl(iw_service_id) + "&userId=" + httpService.encodeUrl(user_name) + "&token=" + httpService.encodeUrl(iw_token)
print "InWebo. Token verification. Attempting to send authentication request:", request_uri
# Execute request
http_response = httpService.executeGet(self.client, request_uri)
# Validate response code
response_validation = httpService.isResponseStastusCodeOk(http_response)
if response_validation == False:
print "InWebo. Token verification. Get unsuccessful response code"
return False
authentication_response_bytes = httpService.getResponseContent(http_response)
print "InWebo. Token verification. Get response:", httpService.convertEntityToString(authentication_response_bytes)
# Validate authentication response
response_validation = httpService.isContentTypeXml(http_response)
if response_validation == False:
print "InWebo. Token verification. Get invalid response"
return False
# Parse XML response
try:
xmlDocument = xmlService.getXmlDocument(authentication_response_bytes)
except Exception, err:
print "InWebo. Token verification. Failed to parse XML response:", err
return False
result_code = xmlService.getNodeValue(xmlDocument, "/authenticate", None)
print "InWebo. Token verification. Result after parsing XML response:", result_code
response_validation = StringHelper.equals(result_code, "OK")
print "InWebo. Token verification. Result validation:", response_validation
return response_validation
def logout(self, configurationAttributes, requestParameters):
return True
|
|
# ===============================================================================
# Copyright 2015 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
__author__ = "Simon Oldfield"
import math
import logging
import numpy
import gdal
import os
from gdalconst import *
from enum import Enum
from datacube.api.model import Pq25Bands, Ls57Arg25Bands, Satellite, DatasetType, Ls8Arg25Bands, Wofs25Bands, NdviBands
from datacube.api.model import get_bands, EviBands, NbrBands, TciBands
from datetime import datetime
_log = logging.getLogger(__name__)
# Define PQ mask
# This represents bits 0-13 set which means:
# - 0 = band 10 not saturated
# - 1 = band 20 not saturated
# - 2 = band 30 not saturated
# - 3 = band 40 not saturated
# - 4 = band 50 not saturated
# - 5 = band 61 not saturated
# - 6 = band 62 not saturated
# - 7 = band 70 not saturated
# - 8 = contiguity ok (i.e. all bands present)
# - 9 = land (not sea)
# - 10 = not cloud (ACCA test)
# - 11 = not cloud (FMASK test)
# - 12 = not cloud shadow (ACCA test)
# - 13 = not cloud shadow (FMASK test)
class PqaMask(Enum):
PQ_MASK_CLEAR = 16383 # bits 0 - 13 set
PQ_MASK_SATURATION = 255 # bits 0 - 7 set
PQ_MASK_SATURATION_OPTICAL = 159 # bits 0-4 and 7 set
PQ_MASK_SATURATION_THERMAL = 96 # bits 5,6 set
PQ_MASK_CONTIGUITY = 256 # bit 8 set
PQ_MASK_LAND = 512 # bit 9 set
PQ_MASK_CLOUD = 15360 # bits 10-13
PQ_MASK_CLOUD_ACCA = 1024 # bit 10 set
PQ_MASK_CLOUD_FMASK = 2048 # bit 11 set
PQ_MASK_CLOUD_SHADOW_ACCA = 4096 # bit 12 set
PQ_MASK_CLOUD_SHADOW_FMASK = 8192 # bit 13 set
class WofsMask(Enum):
DRY = 0
NO_DATA = 1
SATURATION_CONTIGUITY = 2
SEA_WATER = 4
TERRAIN_SHADOW = 8
HIGH_SLOPE = 16
CLOUD_SHADOW = 32
CLOUD = 64
WET = 128
class OutputFormat(Enum):
__order__ = "GEOTIFF ENVI"
GEOTIFF = "GTiff"
ENVI = "ENVI"
# Standard no data value
NDV = -999
INT16_MIN = numpy.iinfo(numpy.int16).min
INT16_MAX = numpy.iinfo(numpy.int16).max
UINT16_MIN = numpy.iinfo(numpy.uint16).min
UINT16_MAX = numpy.iinfo(numpy.uint16).max
BYTE_MIN = numpy.iinfo(numpy.ubyte).min
BYTE_MAX = numpy.iinfo(numpy.ubyte).max
NAN = numpy.nan
def empty_array(shape, dtype=numpy.int16, ndv=-999):
"""
Return an empty (i.e. filled with the no data value) array of the given shape and data type
:param shape: shape of the array
:param dtype: data type of the array (defaults to int32)
:param ndv: no data value (defaults to -999)
:return: array
"""
a = None
if ndv == 0:
a = numpy.zeros(shape=shape, dtype=dtype)
else:
a = numpy.empty(shape=shape, dtype=dtype)
a.fill(ndv)
return a
class DatasetBandMetaData:
no_data_value = None
data_type = None
def __init__(self, no_data_value, data_type):
self.no_data_value=no_data_value
self.data_type=data_type
class DatasetMetaData:
shape = None
transform = None
projection = None
bands = None
ul = None
lr = None
pixel_size_x = None
pixel_size_y = None
def __init__(self, shape, transform, projection, bands):
self.shape = shape
self.transform = transform
self.projection = projection
self.bands = bands
self.pixel_size_x = self.transform[1]
self.pixel_size_y = self.transform[5]
self.ul = (self.transform[0], self.transform[3])
self.lr = (self.ul[0] + self.pixel_size_x * self.shape[0], self.ul[1] + self.pixel_size_y * self.shape[1])
def get_dataset_metadata(dataset):
raster = gdal.Open(dataset.path, GA_ReadOnly)
assert raster
band_metadata = dict()
for band in dataset.bands:
raster_band = raster.GetRasterBand(band.value)
assert raster_band
band_metadata[band] = DatasetBandMetaData(raster_band.GetNoDataValue(), raster_band.DataType)
raster_band.FlushCache()
del raster_band
dataset_metadata = DatasetMetaData((raster.RasterXSize, raster.RasterYSize), raster.GetGeoTransform(), raster.GetProjection(), band_metadata)
raster.FlushCache()
del raster
return dataset_metadata
def get_dataset_data(dataset, bands=None, x=0, y=0, x_size=None, y_size=None):
# dataset_types_physical = [
# DatasetType.ARG25, DatasetType.PQ25, DatasetType.FC25,
# DatasetType.WATER,
# DatasetType.DSM, DatasetType.DEM, DatasetType.DEM_HYDROLOGICALLY_ENFORCED, DatasetType.DEM_SMOOTHED]
#
# dataset_types_virtual_nbar = [
# DatasetType.NDVI,
# DatasetType.EVI,
# DatasetType.NBR,
# DatasetType.TCI
# ]
# NDVI calculated using RED and NIR from ARG25
if dataset.dataset_type == DatasetType.NDVI:
bands = get_bands(DatasetType.ARG25, dataset.satellite)
band_red = bands[Ls57Arg25Bands.RED.name]
band_nir = bands[Ls57Arg25Bands.NEAR_INFRARED.name]
data = read_dataset_data(dataset, bands=[band_red, band_nir], x=x, y=y, x_size=x_size, y_size=y_size)
data = calculate_ndvi(data[band_red], data[band_nir])
return {NdviBands.NDVI: data}
# EVI calculated using RED, BLUE and NIR from ARG25
elif dataset.dataset_type == DatasetType.EVI:
bands = get_bands(DatasetType.ARG25, dataset.satellite)
band_red = bands[Ls57Arg25Bands.RED.name]
band_blue = bands[Ls57Arg25Bands.BLUE.name]
band_nir = bands[Ls57Arg25Bands.NEAR_INFRARED.name]
data = read_dataset_data(dataset, bands=[band_red, band_blue, band_nir], x=x, y=y, x_size=x_size, y_size=y_size)
data = calculate_evi(data[band_red], data[band_blue], data[band_nir])
return {EviBands.EVI: data}
# NBR calculated using NIR and SWIR-2 from ARG25
elif dataset.dataset_type == DatasetType.NBR:
bands = get_bands(DatasetType.ARG25, dataset.satellite)
band_nir = bands[Ls57Arg25Bands.NEAR_INFRARED.name]
band_swir = bands[Ls57Arg25Bands.SHORT_WAVE_INFRARED_2.name]
data = read_dataset_data(dataset, bands=[band_nir, band_swir], x=x, y=y, x_size=x_size, y_size=y_size)
data = calculate_nbr(data[band_nir], data[band_swir])
return {NbrBands.NBR: data}
# TCI calculated from ARG25
elif dataset.dataset_type == DatasetType.TCI:
bands = get_bands(DatasetType.ARG25, dataset.satellite)
data = read_dataset_data(dataset, bands=bands, x=x, y=y, x_size=x_size, y_size=y_size)
out = dict()
for index in TasselCapIndex:
out[TciBands[index.name]] = calculate_tassel_cap_index(data, TCI_COEFFICIENTS[dataset.satellite][index])
return out
# It is a "physical" dataset so just read it
else:
return read_dataset_data(dataset, bands, x, y, x_size, y_size)
def read_dataset_data(dataset, bands=None, x=0, y=0, x_size=None, y_size=None):
"""
Return one or more bands from a dataset
:param dataset: The dataset from which to read the band
:param bands: A list of bands to read from the dataset
:param x:
:param y:
:param x_size:
:param y_size:
:return: dictionary of band/data as numpy array
"""
out = dict()
raster = gdal.Open(dataset.path, GA_ReadOnly)
assert raster
if not x_size:
x_size = raster.RasterXSize
if not y_size:
y_size = raster.RasterYSize
if not bands:
bands = dataset.bands
for b in bands:
band = raster.GetRasterBand(b.value)
assert band
data = band.ReadAsArray(x, y, x_size, y_size)
out[b] = data
band.FlushCache()
del band
raster.FlushCache()
del raster
return out
DEFAULT_MASK_PQA = [PqaMask.PQ_MASK_CLEAR]
def get_dataset_data_masked(dataset, bands=None, x=0, y=0, x_size=None, y_size=None, ndv=NDV, mask=None):
"""
Return one or more bands from the dataset with pixel quality applied
:type dataset: datacube.api.model.Dataset
:type bands: list[Band]
:type x: int
:type y: int
:type x_size: int
:type y_size: int
:type ndv: int
:type mask: numpy.array
:rtype: dict[numpy.array]
"""
if not bands:
bands = dataset.bands
out = get_dataset_data(dataset, bands, x=x, y=y, x_size=x_size, y_size=y_size)
if mask is not None:
for band in bands:
out[band] = apply_mask(out[band], mask=mask, ndv=ndv)
return out
def get_dataset_data_with_pq(dataset, dataset_pqa, bands=None, x=0, y=0, x_size=None, y_size=None, masks_pqa=DEFAULT_MASK_PQA, ndv=NDV):
"""
Return one or more bands from the dataset with pixel quality applied
:type dataset: datacube.api.model.Dataset
:type dataset_pqa: datacube.api.model.Dataset
:type bands: list[Band]
:type x: int
:type y: int
:type x_size: int
:type y_size: int
:type masks_pqa: list[datacube.api.util.PqaMask]
:rtype: dict[numpy.array]
"""
if not bands:
bands = dataset.bands
mask_pqa = get_mask_pqa(dataset_pqa, x=x, y=y, x_size=x_size, y_size=y_size, pqa_masks=masks_pqa)
out = get_dataset_data_masked(dataset, bands, x=x, y=y, x_size=x_size, y_size=y_size, mask=mask_pqa, ndv=ndv)
return out
def apply_mask(data, mask, ndv=NDV):
return numpy.ma.array(data, mask=mask).filled(ndv)
def get_mask_pqa(pqa, pqa_masks=DEFAULT_MASK_PQA, x=0, y=0, x_size=None, y_size=None, mask=None):
"""
Return a pixel quality mask
:param pqa: Pixel Quality dataset
:param pqa_masks: which PQ flags to use
:param mask: an optional existing mask to update
:return: the mask
"""
# Consolidate the list of (bit) masks into a single (bit) mask
pqa_mask = consolidate_masks(pqa_masks)
# Read the PQA dataset
data = get_dataset_data(pqa, [Pq25Bands.PQ], x=x, y=y, x_size=x_size, y_size=y_size)[Pq25Bands.PQ]
# Create an empty mask if none provided - just to avoid an if below :)
if mask is None:
mask = numpy.ma.make_mask_none(numpy.shape(data))
# Mask out values where the requested bits in the PQ value are not set
mask = numpy.ma.mask_or(mask, numpy.ma.masked_where(data & pqa_mask != pqa_mask, data).mask)
return mask
def consolidate_masks(masks):
mask = 0x0000
for m in masks:
mask |= m.value
return mask
DEFAULT_MASK_WOFS = [WofsMask.WET]
def get_mask_wofs(wofs, wofs_masks=DEFAULT_MASK_WOFS, x=0, y=0, x_size=None, y_size=None, mask=None):
"""
Return a WOFS mask
:param wofs: WOFS dataset
:param wofs_masks: which WOFS values to mask
:param mask: an optional existing mask to update
:return: the mask
"""
# Read the WOFS dataset
data = get_dataset_data(wofs, bands=[Wofs25Bands.WATER], x=x, y=y, x_size=x_size, y_size=y_size)[Wofs25Bands.WATER]
if mask is None:
mask = numpy.ma.make_mask_none(numpy.shape(data))
# Mask out values where the WOFS value is one of the requested mask values
for wofs_mask in wofs_masks:
mask = numpy.ma.mask_or(mask, numpy.ma.masked_equal(data, wofs_mask.value).mask)
return mask
def get_mask_vector_for_cell(x, y, vector_file, vector_layer, vector_feature, width=4000, height=4000,
pixel_size_x=0.00025, pixel_size_y=-0.00025):
"""
Return a mask for the given cell based on the specified feature in the vector file
:param x: X cell index
:type x: int
:param y: X cell
:type y: int
:param vector_file: Vector file containing the mask polygon
:type vector_file: str
:param vector_layer: Layer name within the vector file
:type vector_layer: str
:param vector_feature: Feature id (index starts at 0) within the layer
:type vector_feature: int
:param width: Width of the mask
:type width: int
:param height: Height of the mask
:type height: int
:param pixel_size_x: X pixel size
:type pixel_size_x: float
:param pixel_size_y: Y pixel size
:type pixel_size_y: float
:return: The mask
:rtype: numpy.ma.MaskedArray.mask (array of boolean)
"""
import gdal
import osr
driver = gdal.GetDriverByName("MEM")
assert driver
raster = driver.Create("", width, height, 1, gdal.GDT_Byte)
assert raster
raster.SetGeoTransform((x, pixel_size_x, 0.0, y+1, 0.0, pixel_size_y))
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
raster.SetProjection(srs.ExportToWkt())
_log.debug("Reading feature [%d] from layer [%d] of file [%s]", vector_feature, vector_layer, vector_file)
import ogr
from gdalconst import GA_ReadOnly
vector = ogr.Open(vector_file, GA_ReadOnly)
assert vector
# layer = vector.GetLayer()
# assert layer
# layer = vector.GetLayerByName(vector_layer)
# assert layer
layer = vector.GetLayerByIndex(vector_layer)
assert layer
layer.SetAttributeFilter("FID={fid}".format(fid=vector_feature))
gdal.RasterizeLayer(raster, [1], layer, burn_values=[1])
del layer
band = raster.GetRasterBand(1)
assert band
data = band.ReadAsArray()
import numpy
_log.debug("Read [%s] from memory AOI mask dataset", numpy.shape(data))
return numpy.ma.masked_not_equal(data, 1, copy=False).mask
# TODO generalise/refactor this!!!
def raster_create(path, data, transform, projection, no_data_value, data_type,
# options=["INTERLEAVE=PIXEL", "COMPRESS=DEFLATE", "PREDICTOR=2", "ZLEVEL=9"]):
# options=["INTERLEAVE=PIXEL", "COMPRESS=DEFLATE", "PREDICTOR=1", "ZLEVEL=6"]):
options=["INTERLEAVE=PIXEL"],
# options=["INTERLEAVE=PIXEL", "COMPRESS=LZW"],
# options=["INTERLEAVE=PIXEL", "COMPRESS=LZW", "TILED=YES"],
width=None, height=None, dataset_metadata=None, band_ids=None):
raster_create_geotiff(path, data, transform, projection, no_data_value, data_type, options, width, height,
dataset_metadata, band_ids)
# TODO I've dodgied this to get band names in. Should redo it properly so you pass in a lit of band data structures
# that have a name, the data, the NDV, etc
def raster_create_geotiff(path, data, transform, projection, no_data_value, data_type,
# options=["INTERLEAVE=PIXEL", "COMPRESS=DEFLATE", "PREDICTOR=2", "ZLEVEL=9"]):
# options=["INTERLEAVE=PIXEL", "COMPRESS=DEFLATE", "PREDICTOR=1", "ZLEVEL=6"]):
options=["INTERLEAVE=PIXEL"],
# options=["INTERLEAVE=PIXEL", "COMPRESS=LZW"],
# options=["INTERLEAVE=PIXEL", "COMPRESS=LZW", "TILED=YES"],
width=None, height=None, dataset_metadata=None, band_ids=None):
"""
Create a raster from a list of numpy arrays
:param path: path to the output raster
:param data: list of numpy arrays
:param transform: geo transform
:param projection: projection
:param no_data_value: no data value
:param data_type: data type
:param options: raster creation options
"""
_log.debug("creating output raster %s", path)
_log.debug("filename=%s | shape = %s | bands = %d | data type = %s", path, (numpy.shape(data[0])[0], numpy.shape(data[0])[1]),
len(data), data_type)
driver = gdal.GetDriverByName("GTiff")
assert driver
width = width or numpy.shape(data[0])[1]
height = height or numpy.shape(data[0])[0]
raster = driver.Create(path, width, height, len(data), data_type, options)
assert raster
raster.SetGeoTransform(transform)
raster.SetProjection(projection)
if dataset_metadata:
raster.SetMetadata(dataset_metadata)
for i in range(0, len(data)):
_log.debug("Writing band %d", i + 1)
band = raster.GetRasterBand(i + 1)
if band_ids and len(band_ids) - 1 >= i:
band.SetDescription(band_ids[i])
band.SetNoDataValue(no_data_value)
band.WriteArray(data[i])
band.ComputeStatistics(True)
band.FlushCache()
del band
raster.FlushCache()
del raster
def raster_create_envi(path, data, transform, projection, no_data_value, data_type,
# options=["INTERLEAVE=PIXEL", "COMPRESS=DEFLATE", "PREDICTOR=2", "ZLEVEL=9"]):
# options=["INTERLEAVE=PIXEL", "COMPRESS=DEFLATE", "PREDICTOR=1", "ZLEVEL=6"]):
options=["INTERLEAVE=BSQ"],
# options=["INTERLEAVE=PIXEL", "COMPRESS=LZW"],
# options=["INTERLEAVE=PIXEL", "COMPRESS=LZW", "TILED=YES"],
width=None, height=None, dataset_metadata=None, band_ids=None):
"""
Create a raster from a list of numpy arrays
:param path: path to the output raster
:param data: list of numpy arrays
:param transform: geo transform
:param projection: projection
:param no_data_value: no data value
:param data_type: data type
:param options: raster creation options
"""
_log.debug("creating output raster %s", path)
_log.debug("filename=%s | shape = %s | bands = %d | data type = %s", path, (numpy.shape(data[0])[0], numpy.shape(data[0])[1]),
len(data), data_type)
driver = gdal.GetDriverByName("ENVI")
assert driver
width = width or numpy.shape(data[0])[1]
height = height or numpy.shape(data[0])[0]
raster = driver.Create(path, width, height, len(data), data_type, options)
assert raster
raster.SetGeoTransform(transform)
raster.SetProjection(projection)
if dataset_metadata:
raster.SetMetadata(dataset_metadata)
for i in range(0, len(data)):
_log.debug("Writing band %d", i + 1)
band = raster.GetRasterBand(i + 1)
if band_ids and len(band_ids) - 1 >= i:
band.SetDescription(band_ids[i])
band.SetNoDataValue(no_data_value)
band.WriteArray(data[i])
band.ComputeStatistics(True)
band.FlushCache()
del band
raster.FlushCache()
del raster
def propagate_using_selected_pixel(a, b, c, d, ndv=NDV):
return numpy.where((a == b) & (a != ndv), c, d)
# def calculate_ndvi(red, nir, input_ndv=NDV, output_ndv=INT16_MAX):
# m_red = numpy.ma.masked_equal(red, input_ndv) #.astype(numpy.float32)
# m_nir = numpy.ma.masked_equal(nir, input_ndv) #.astype(numpy.float32)
#
# ndvi = numpy.true_divide(m_nir - m_red, m_nir + m_red)
# ndvi = (ndvi * 10000).astype(numpy.int16)
# ndvi = ndvi.filled(output_ndv)
#
# return ndvi
def calculate_ndvi(red, nir, input_ndv=NDV, output_ndv=NDV):
"""
Calculate the Normalised Difference Vegetation Index (NDVI) from a Landsat dataset
NDVI is defined as (NIR - RED) / (NIR + RED)
"""
red = numpy.ma.masked_equal(red, input_ndv)
nir = numpy.ma.masked_equal(nir, input_ndv)
ndvi = numpy.true_divide(nir - red, nir + red)
ndvi = ndvi.filled(output_ndv)
return ndvi
def calculate_evi(red, blue, nir, l=1, c1=6, c2=7.5, input_ndv=NDV, output_ndv=NDV):
"""
Calculate the Enhanced Vegetation Index (EVI) from a Landsat dataset first applying Pixel Quality indicators
EVI is defined as 2.5 * (NIR - RED) / (NIR + C1 * RED - C2 * BLUE + L)
Defaults to the standard MODIS EVI of L=1 C1=6 C2=7.5
"""
red = numpy.ma.masked_equal(red, input_ndv)
blue = numpy.ma.masked_equal(blue, input_ndv)
nir = numpy.ma.masked_equal(nir, input_ndv)
evi = 2.5 * numpy.true_divide(nir - red, nir + c1 * red - c2 * blue + 1)
evi = evi.filled(output_ndv)
return evi
def calculate_nbr(nir, swir, input_ndv=NDV, output_ndv=NDV):
"""
Calculate the Normalised Burn Ratio (NBR) from a Landsat dataset
NBR is defined as (NIR - SWIR 2) / (NIR + SWIR 2)
"""
nir = numpy.ma.masked_equal(nir, input_ndv)
swir = numpy.ma.masked_equal(swir, input_ndv)
nbr = numpy.true_divide(nir - swir, nir + swir)
nbr = nbr.filled(output_ndv)
return nbr
class TasselCapIndex(Enum):
__order__ = "BRIGHTNESS GREENNESS WETNESS FOURTH FIFTH SIXTH"
BRIGHTNESS = 1
GREENNESS = 2
WETNESS = 3
FOURTH = 4
FIFTH = 5
SIXTH = 6
TCI_COEFFICIENTS = {
Satellite.LS5:
{
TasselCapIndex.BRIGHTNESS: {
Ls57Arg25Bands.BLUE: 0.3037,
Ls57Arg25Bands.GREEN: 0.2793,
Ls57Arg25Bands.RED: 0.4743,
Ls57Arg25Bands.NEAR_INFRARED: 0.5585,
Ls57Arg25Bands.SHORT_WAVE_INFRARED_1: 0.5082,
Ls57Arg25Bands.SHORT_WAVE_INFRARED_2: 0.1863},
TasselCapIndex.GREENNESS: {
Ls57Arg25Bands.BLUE: -0.2848,
Ls57Arg25Bands.GREEN: -0.2435,
Ls57Arg25Bands.RED: -0.5436,
Ls57Arg25Bands.NEAR_INFRARED: 0.7243,
Ls57Arg25Bands.SHORT_WAVE_INFRARED_1: 0.0840,
Ls57Arg25Bands.SHORT_WAVE_INFRARED_2: -0.1800},
TasselCapIndex.WETNESS: {
Ls57Arg25Bands.BLUE: 0.1509,
Ls57Arg25Bands.GREEN: 0.1973,
Ls57Arg25Bands.RED: 0.3279,
Ls57Arg25Bands.NEAR_INFRARED: 0.3406,
Ls57Arg25Bands.SHORT_WAVE_INFRARED_1: -0.7112,
Ls57Arg25Bands.SHORT_WAVE_INFRARED_2: -0.4572},
TasselCapIndex.FOURTH: {
Ls57Arg25Bands.BLUE: -0.8242,
Ls57Arg25Bands.GREEN: 0.0849,
Ls57Arg25Bands.RED: 0.4392,
Ls57Arg25Bands.NEAR_INFRARED: -0.0580,
Ls57Arg25Bands.SHORT_WAVE_INFRARED_1: 0.2012,
Ls57Arg25Bands.SHORT_WAVE_INFRARED_2: -0.2768},
TasselCapIndex.FIFTH: {
Ls57Arg25Bands.BLUE: -0.3280,
Ls57Arg25Bands.GREEN: 0.0549,
Ls57Arg25Bands.RED: 0.1075,
Ls57Arg25Bands.NEAR_INFRARED: 0.1855,
Ls57Arg25Bands.SHORT_WAVE_INFRARED_1: -0.4357,
Ls57Arg25Bands.SHORT_WAVE_INFRARED_2: 0.8085},
TasselCapIndex.SIXTH: {
Ls57Arg25Bands.BLUE: 0.1084,
Ls57Arg25Bands.GREEN: -0.9022,
Ls57Arg25Bands.RED: 0.4120,
Ls57Arg25Bands.NEAR_INFRARED: 0.0573,
Ls57Arg25Bands.SHORT_WAVE_INFRARED_1: -0.0251,
Ls57Arg25Bands.SHORT_WAVE_INFRARED_2: 0.0238}
},
Satellite.LS7:
{
TasselCapIndex.BRIGHTNESS: {
Ls57Arg25Bands.BLUE: 0.3561,
Ls57Arg25Bands.GREEN: 0.3972,
Ls57Arg25Bands.RED: 0.3904,
Ls57Arg25Bands.NEAR_INFRARED: 0.6966,
Ls57Arg25Bands.SHORT_WAVE_INFRARED_1: 0.2286,
Ls57Arg25Bands.SHORT_WAVE_INFRARED_2: 0.1596},
TasselCapIndex.GREENNESS: {
Ls57Arg25Bands.BLUE: -0.3344,
Ls57Arg25Bands.GREEN: -0.3544,
Ls57Arg25Bands.RED: -0.4556,
Ls57Arg25Bands.NEAR_INFRARED: 0.6966,
Ls57Arg25Bands.SHORT_WAVE_INFRARED_1: -0.0242,
Ls57Arg25Bands.SHORT_WAVE_INFRARED_2: -0.2630},
TasselCapIndex.WETNESS: {
Ls57Arg25Bands.BLUE: 0.2626,
Ls57Arg25Bands.GREEN: 0.2141,
Ls57Arg25Bands.RED: 0.0926,
Ls57Arg25Bands.NEAR_INFRARED: 0.0656,
Ls57Arg25Bands.SHORT_WAVE_INFRARED_1: -0.7629,
Ls57Arg25Bands.SHORT_WAVE_INFRARED_2: -0.5388},
TasselCapIndex.FOURTH: {
Ls57Arg25Bands.BLUE: 0.0805,
Ls57Arg25Bands.GREEN: -0.0498,
Ls57Arg25Bands.RED: 0.1950,
Ls57Arg25Bands.NEAR_INFRARED: -0.1327,
Ls57Arg25Bands.SHORT_WAVE_INFRARED_1: 0.5752,
Ls57Arg25Bands.SHORT_WAVE_INFRARED_2: -0.7775},
TasselCapIndex.FIFTH: {
Ls57Arg25Bands.BLUE: -0.7252,
Ls57Arg25Bands.GREEN: -0.0202,
Ls57Arg25Bands.RED: 0.6683,
Ls57Arg25Bands.NEAR_INFRARED: 0.0631,
Ls57Arg25Bands.SHORT_WAVE_INFRARED_1: -0.1494,
Ls57Arg25Bands.SHORT_WAVE_INFRARED_2: -0.0274},
TasselCapIndex.SIXTH: {
Ls57Arg25Bands.BLUE: 0.4000,
Ls57Arg25Bands.GREEN: -0.8172,
Ls57Arg25Bands.RED: 0.3832,
Ls57Arg25Bands.NEAR_INFRARED: 0.0602,
Ls57Arg25Bands.SHORT_WAVE_INFRARED_1: -0.1095,
Ls57Arg25Bands.SHORT_WAVE_INFRARED_2: 0.0985}
},
Satellite.LS8:
{
TasselCapIndex.BRIGHTNESS: {
Ls8Arg25Bands.BLUE: 0.3029,
Ls8Arg25Bands.GREEN: 0.2786,
Ls8Arg25Bands.RED: 0.4733,
Ls8Arg25Bands.NEAR_INFRARED: 0.5599,
Ls8Arg25Bands.SHORT_WAVE_INFRARED_1: 0.508,
Ls8Arg25Bands.SHORT_WAVE_INFRARED_2: 0.1872},
TasselCapIndex.GREENNESS: {
Ls8Arg25Bands.BLUE: -0.2941,
Ls8Arg25Bands.GREEN: -0.2430,
Ls8Arg25Bands.RED: -0.5424,
Ls8Arg25Bands.NEAR_INFRARED: 0.7276,
Ls8Arg25Bands.SHORT_WAVE_INFRARED_1: 0.0713,
Ls8Arg25Bands.SHORT_WAVE_INFRARED_2: -0.1608},
TasselCapIndex.WETNESS: {
Ls8Arg25Bands.BLUE: 0.1511,
Ls8Arg25Bands.GREEN: 0.1973,
Ls8Arg25Bands.RED: 0.3283,
Ls8Arg25Bands.NEAR_INFRARED: 0.3407,
Ls8Arg25Bands.SHORT_WAVE_INFRARED_1: -0.7117,
Ls8Arg25Bands.SHORT_WAVE_INFRARED_2: -0.4559},
TasselCapIndex.FOURTH: {
Ls8Arg25Bands.BLUE: -0.8239,
Ls8Arg25Bands.GREEN: 0.0849,
Ls8Arg25Bands.RED: 0.4396,
Ls8Arg25Bands.NEAR_INFRARED: -0.058,
Ls8Arg25Bands.SHORT_WAVE_INFRARED_1: 0.2013,
Ls8Arg25Bands.SHORT_WAVE_INFRARED_2: -0.2773},
TasselCapIndex.FIFTH: {
Ls8Arg25Bands.BLUE: -0.3294,
Ls8Arg25Bands.GREEN: 0.0557,
Ls8Arg25Bands.RED: 0.1056,
Ls8Arg25Bands.NEAR_INFRARED: 0.1855,
Ls8Arg25Bands.SHORT_WAVE_INFRARED_1: -0.4349,
Ls8Arg25Bands.SHORT_WAVE_INFRARED_2: 0.8085},
TasselCapIndex.SIXTH: {
Ls8Arg25Bands.BLUE: 0.1079,
Ls8Arg25Bands.GREEN: -0.9023,
Ls8Arg25Bands.RED: 0.4119,
Ls8Arg25Bands.NEAR_INFRARED: 0.0575,
Ls8Arg25Bands.SHORT_WAVE_INFRARED_1: -0.0259,
Ls8Arg25Bands.SHORT_WAVE_INFRARED_2: 0.0252}
}
}
def calculate_tassel_cap_index(bands, coefficients, input_ndv=NDV, output_ndv=numpy.nan):
"""
:param bands:
:param coefficients:
:param input_ndv:
:param output_ndv:
:return:
"""
bands_masked = dict()
# Drop out no data values - do I need this???
for b in bands.iterkeys():
bands_masked[b] = numpy.ma.masked_equal(bands[b], input_ndv).astype(numpy.float32) / 10000
tci = 0
for b in bands:
if b in coefficients:
tci += bands_masked[b] * coefficients[b]
tci = tci.filled(output_ndv)
return tci
def calculate_medoid(X, dist=None):
_log.debug("X is \n%s", X)
_log.debug("X.ndim is %d", X.ndim)
if dist is None:
dist = lambda x, y: numpy.sqrt(numpy.square(x-y).sum())
if X.ndim == 1:
return X
_, n = X.shape
d = numpy.empty(n)
for i in range(n):
d[i] = numpy.sum([dist(X[:, i], X[:, j]) for j in range(n) if j != i])
return X[:, numpy.argmin(d)]
# def calculate_medoid_simon(X):
#
# _log.debug("shape of X is %s", numpy.shape(X))
#
# files, bands, rows, cols = numpy.shape(X)
# _log.debug("files=%d bands=%d rows=%d cols=%d", files, bands, rows, cols)
#
# d = numpy.empty(rows, cols)
# def calculate_medoid_flood(X):
#
# ndx = vectormedian(imagestack)
# medianimg = selectmedianimage(imagestack, ndx)
def latlon_to_xy(lat, lon, transform):
"""
Convert lat/lon to x/y for raster
NOTE: No projection done - assumes raster has native lat/lon projection
:param lat: latitude
:param lon: longitude
:param transform: GDAL GeoTransform
:return: x, y pair
"""
# Get the reverse direction GeoTransform
_, transform = gdal.InvGeoTransform(transform)
ulx, uly = transform[0], transform[3]
psx, psy = transform[1], transform[5]
x = int(math.floor(ulx + psx * lon))
y = int(math.floor(uly + psy * lat))
return x, y
def latlon_to_cell(lat, lon):
"""
Return the cell that contains the given lat/lon pair
NOTE: x of cell represents min (contained) lon value but y of cell represents max (not contained) lat value
that is, 120/-20 contains lon values 120->120.99999 but lat values -19->-19.99999
that is, that is, 120/-20 does NOT contain lat value of -20
:param lat: latitude
:param lon: longitude
:return: cell as x, y pair
"""
x = int(lon)
y = int(lat) - 1
return x, y
# TODO this is a bit of dodginess until the WOFS tiles are ingested
# DO NOT USE THIS IT WON'T STAY!!!!
def extract_fields_from_filename(filename):
"""
:param filename:
:return:
"""
# At the moment I only need this to work for the WOFS WATER extent tile files....
# LS5_TM_WATER_120_-021_2004-09-20T01-40-14.409038.tif
# LS7_ETM_WATER_120_-021_2006-06-30T01-45-48.187525.tif
if filename.endswith(".tif"):
filename = filename[:-len(".tif")]
elif filename.endswith(".tiff"):
filename = filename[:-len(".tiff")]
elif filename.endswith(".vrt"):
filename = filename[:-len(".vrt")]
fields = filename.split("_")
# Satellite
satellite = Satellite[fields[0]]
# Dataset Type
dataset_type = DatasetType[fields[2]]
# Cell
x, y = int(fields[3]), int(fields[4])
# Acquisition Date/Time
acq_str = fields[5].split(".")[0] # go from "2006-06-30T01-45-48.187525" to "2006-06-30T01-45-48"
acq_dt = datetime.strptime(acq_str, "%Y-%m-%dT%H-%M-%S")
return satellite, dataset_type, x, y, acq_dt
def intersection(a, b):
return list(set(a) & set(b))
def union(a, b):
return list(set(a) | set(b))
def subset(a, b):
return set(a) <= set(b)
def get_satellite_string(satellites):
# TODO this assumes everything is Landsat!!!!
return "LS" + "".join([s.value.replace("LS", "") for s in satellites])
def check_overwrite_remove_or_fail(path, overwrite):
if os.path.exists(path):
if overwrite:
_log.info("Removing existing output file [%s]", path)
os.remove(path)
else:
_log.error("Output file [%s] exists", path)
raise Exception("File [%s] exists" % path)
def log_mem(s=None):
if s and len(s) > 0:
_log.info(s)
import psutil
_log.info("Current memory usage is [%s]", psutil.Process().memory_info())
_log.info("Current memory usage is [%d] MB", psutil.Process().memory_info().rss / 1024 / 1024)
import resource
_log.info("Current MAX RSS usage is [%d] MB", resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024)
def date_to_integer(d):
# Return an integer representing the YYYYMMDD value
return d.year * 10000 + d.month * 100 + d.day
def get_dataset_filename(dataset, output_format=OutputFormat.GEOTIFF,
mask_pqa_apply=False, mask_wofs_apply=False, mask_vector_apply=False):
filename = dataset.path
filename = os.path.basename(filename)
dataset_type_from_string = {
DatasetType.ARG25: "_NBAR_",
DatasetType.PQ25: "_PQA_",
DatasetType.FC25: "_FC_",
DatasetType.WATER: "_WATER_",
DatasetType.NDVI: "_NBAR_",
DatasetType.EVI: "_NBAR_",
DatasetType.NBR: "_NBAR_",
DatasetType.TCI: "_NBAR_",
DatasetType.DSM: "DSM_"
}[dataset.dataset_type]
dataset_type_to_string = {
DatasetType.ARG25: "_NBAR_",
DatasetType.PQ25: "_PQA_",
DatasetType.FC25: "_FC_",
DatasetType.WATER: "_WATER_",
DatasetType.NDVI: "_NDVI_",
DatasetType.EVI: "_EVI_",
DatasetType.NBR: "_NBR_",
DatasetType.TCI: "_TCI_",
DatasetType.DSM: "DSM_"
}[dataset.dataset_type]
dataset_type_to_string += ((mask_pqa_apply or mask_wofs_apply or mask_vector_apply) and "WITH_" or "") + \
(mask_pqa_apply and "PQA_" or "") + \
(mask_wofs_apply and "WATER_" or "") + \
(mask_vector_apply and "VECTOR_" or "")
filename = filename.replace(dataset_type_from_string, dataset_type_to_string)
ext = {OutputFormat.GEOTIFF: ".tif", OutputFormat.ENVI: ".dat"}[output_format]
filename = filename.replace(".vrt", ext)
filename = filename.replace(".tiff", ext)
filename = filename.replace(".tif", ext)
return filename
def get_dataset_band_stack_filename(dataset, band, output_format=OutputFormat.GEOTIFF,
mask_pqa_apply=False, mask_wofs_apply=False, mask_vector_apply=False):
filename = dataset.path
filename = os.path.basename(filename)
dataset_type_from_string = {
DatasetType.ARG25: "_NBAR_",
DatasetType.PQ25: "_PQA_",
DatasetType.FC25: "_FC_",
DatasetType.WATER: "_WATER_",
DatasetType.NDVI: "_NBAR_",
DatasetType.EVI: "_NBAR_",
DatasetType.NBR: "_NBAR_",
DatasetType.TCI: "_NBAR_",
DatasetType.DSM: "DSM_"
}[dataset.dataset_type]
dataset_type_to_string = {
DatasetType.ARG25: "_NBAR_",
DatasetType.PQ25: "_PQA_",
DatasetType.FC25: "_FC_",
DatasetType.WATER: "_WATER_",
DatasetType.NDVI: "_NDVI_",
DatasetType.EVI: "_EVI_",
DatasetType.NBR: "_NBR_",
DatasetType.TCI: "_TCI_",
DatasetType.DSM: "DSM_"
}[dataset.dataset_type]
dataset_type_to_string += ((mask_pqa_apply or mask_wofs_apply or mask_vector_apply) and "WITH_" or "") + \
(mask_pqa_apply and "PQA_" or "") + \
(mask_wofs_apply and "WATER_" or "") + \
(mask_vector_apply and "VECTOR_" or "")
dataset_type_to_string += "STACK_" + band.name + "_"
filename = filename.replace(dataset_type_from_string, dataset_type_to_string)
ext = {OutputFormat.GEOTIFF: ".tif", OutputFormat.ENVI: ".dat"}[output_format]
filename = filename.replace(".vrt", ext)
filename = filename.replace(".tiff", ext)
filename = filename.replace(".tif", ext)
return filename
def get_dataset_datatype(dataset):
return {
DatasetType.ARG25: GDT_Int16,
DatasetType.PQ25: GDT_Int16,
DatasetType.FC25: GDT_Int16,
DatasetType.WATER: GDT_Byte,
DatasetType.NDVI: GDT_Float32,
DatasetType.EVI: GDT_Float32,
DatasetType.NBR: GDT_Float32,
DatasetType.TCI: GDT_Float32,
DatasetType.DSM: GDT_Int16
}[dataset.dataset_type]
def get_dataset_ndv(dataset):
return {
DatasetType.ARG25: NDV,
DatasetType.PQ25: UINT16_MAX,
DatasetType.FC25: NDV,
DatasetType.WATER: BYTE_MAX,
DatasetType.NDVI: NAN,
DatasetType.EVI: NAN,
DatasetType.NBR: NAN,
DatasetType.TCI: NAN,
DatasetType.DSM: NDV
}[dataset.dataset_type]
def get_band_name_union(dataset_type, satellites):
bands = [b.name for b in get_bands(dataset_type, satellites[0])]
for satellite in satellites[1:]:
for b in get_bands(dataset_type, satellite):
if b.name not in bands:
bands.append(b.name)
return bands
def get_band_name_intersection(dataset_type, satellites):
bands = [b.name for b in get_bands(dataset_type, satellites[0])]
for satellite in satellites[1:]:
for band in bands:
if band not in [b.name for b in get_bands(dataset_type, satellite)]:
bands.remove(band)
return bands
def format_date(d):
from datetime import datetime
if d:
return datetime.strftime(d, "%Y_%m_%d")
return None
def format_date_time(d):
from datetime import datetime
if d:
return datetime.strftime(d, "%Y_%m_%d_%H_%M_%S")
return None
def extract_feature_geometry_wkb(vector_file, vector_layer=0, vector_feature=0, epsg=4326):
import ogr
import osr
from gdalconst import GA_ReadOnly
vector = ogr.Open(vector_file, GA_ReadOnly)
assert vector
layer = vector.GetLayer(vector_layer)
assert layer
feature = layer.GetFeature(vector_feature)
assert feature
projection = osr.SpatialReference()
projection.ImportFromEPSG(epsg)
geom = feature.GetGeometryRef()
# Transform if required
if not projection.IsSame(geom.GetSpatialReference()):
geom.TransformTo(projection)
return geom.ExportToWkb()
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import datetime
import pickle
from helpers import unittest
import luigi
import luigi.notifications
from luigi.interface import ArgParseInterface
from luigi.mock import MockFile
from luigi.parameter import MissingParameterException
from luigi.util import common_params, copies, delegates, inherits, requires
luigi.notifications.DEBUG = True
class A(luigi.Task):
param1 = luigi.Parameter("class A-specific default")
@inherits(A)
class B(luigi.Task):
param2 = luigi.Parameter("class B-specific default")
@inherits(B)
class C(luigi.Task):
param3 = luigi.Parameter("class C-specific default")
@inherits(B)
class D(luigi.Task):
param1 = luigi.Parameter("class D overwriting class A's default")
@inherits(B)
class D_null(luigi.Task):
param1 = None
@inherits(A)
@inherits(B)
class E(luigi.Task):
param4 = luigi.Parameter("class E-specific default")
class InheritTest(unittest.TestCase):
def setUp(self):
self.a = A()
self.a_changed = A(param1=34)
self.b = B()
self.c = C()
self.d = D()
self.d_null = D_null()
self.e = E()
def test_has_param(self):
b_params = dict(self.b.get_params()).keys()
self.assertTrue("param1" in b_params)
def test_default_param(self):
self.assertEqual(self.b.param1, self.a.param1)
def test_change_of_defaults_not_equal(self):
self.assertNotEqual(self.b.param1, self.a_changed.param1)
def tested_chained_inheritance(self):
self.assertEqual(self.c.param2, self.b.param2)
self.assertEqual(self.c.param1, self.a.param1)
self.assertEqual(self.c.param1, self.b.param1)
def test_overwriting_defaults(self):
self.assertEqual(self.d.param2, self.b.param2)
self.assertNotEqual(self.d.param1, self.b.param1)
self.assertNotEqual(self.d.param1, self.a.param1)
self.assertEqual(self.d.param1, "class D overwriting class A's default")
def test_stacked_inheritance(self):
self.assertEqual(self.e.param1, self.a.param1)
self.assertEqual(self.e.param1, self.b.param1)
self.assertEqual(self.e.param2, self.b.param2)
def test_removing_parameter(self):
self.assertFalse("param1" in dict(self.d_null.get_params()).keys())
def test_wrapper_preserve_attributes(self):
self.assertEqual(B.__name__, 'B')
class F(luigi.Task):
param1 = luigi.Parameter("A parameter on a base task, that will be required later.")
@inherits(F)
class G(luigi.Task):
param2 = luigi.Parameter("A separate parameter that doesn't affect 'F'")
def requires(self):
return F(**common_params(self, F))
@inherits(G)
class H(luigi.Task):
param2 = luigi.Parameter("OVERWRITING")
def requires(self):
return G(**common_params(self, G))
@inherits(G)
class H_null(luigi.Task):
param2 = None
def requires(self):
special_param2 = str(datetime.datetime.now())
return G(param2=special_param2, **common_params(self, G))
@inherits(G)
class I(luigi.Task):
def requires(self):
return F(**common_params(self, F))
class J(luigi.Task):
param1 = luigi.Parameter() # something required, with no default
@inherits(J)
class K_shouldnotinstantiate(luigi.Task):
param2 = luigi.Parameter("A K-specific parameter")
@inherits(J)
class K_shouldfail(luigi.Task):
param1 = None
param2 = luigi.Parameter("A K-specific parameter")
def requires(self):
return J(**common_params(self, J))
@inherits(J)
class K_shouldsucceed(luigi.Task):
param1 = None
param2 = luigi.Parameter("A K-specific parameter")
def requires(self):
return J(param1="Required parameter", **common_params(self, J))
@inherits(J)
class K_wrongparamsorder(luigi.Task):
param1 = None
param2 = luigi.Parameter("A K-specific parameter")
def requires(self):
return J(param1="Required parameter", **common_params(J, self))
class RequiresTest(unittest.TestCase):
def setUp(self):
self.f = F()
self.g = G()
self.g_changed = G(param1="changing the default")
self.h = H()
self.h_null = H_null()
self.i = I()
self.k_shouldfail = K_shouldfail()
self.k_shouldsucceed = K_shouldsucceed()
self.k_wrongparamsorder = K_wrongparamsorder()
def test_inherits(self):
self.assertEqual(self.f.param1, self.g.param1)
self.assertEqual(self.f.param1, self.g.requires().param1)
def test_change_of_defaults(self):
self.assertNotEqual(self.f.param1, self.g_changed.param1)
self.assertNotEqual(self.g.param1, self.g_changed.param1)
self.assertNotEqual(self.f.param1, self.g_changed.requires().param1)
def test_overwriting_parameter(self):
self.h.requires()
self.assertNotEqual(self.h.param2, self.g.param2)
self.assertEqual(self.h.param2, self.h.requires().param2)
self.assertEqual(self.h.param2, "OVERWRITING")
def test_skipping_one_inheritance(self):
self.assertEqual(self.i.requires().param1, self.f.param1)
def test_removing_parameter(self):
self.assertNotEqual(self.h_null.requires().param2, self.g.param2)
def test_not_setting_required_parameter(self):
self.assertRaises(MissingParameterException, self.k_shouldfail.requires)
def test_setting_required_parameters(self):
self.k_shouldsucceed.requires()
def test_should_not_instantiate(self):
self.assertRaises(MissingParameterException, K_shouldnotinstantiate)
def test_resuscitation(self):
k = K_shouldnotinstantiate(param1='hello')
k.requires()
def test_wrong_common_params_order(self):
self.assertRaises(TypeError, self.k_wrongparamsorder.requires)
class X(luigi.Task):
n = luigi.IntParameter(default=42)
@inherits(X)
class Y(luigi.Task):
def requires(self):
return self.clone_parent()
@requires(X)
class Y2(luigi.Task):
pass
@inherits(X)
class Z(luigi.Task):
n = None
def requires(self):
return self.clone_parent()
@requires(X)
class Y3(luigi.Task):
n = luigi.IntParameter(default=43)
class CloneParentTest(unittest.TestCase):
def test_clone_parent(self):
y = Y()
x = X()
self.assertEqual(y.requires(), x)
self.assertEqual(y.n, 42)
z = Z()
self.assertEqual(z.requires(), x)
def test_requires(self):
y2 = Y2()
x = X()
self.assertEqual(y2.requires(), x)
self.assertEqual(y2.n, 42)
def test_requires_override_default(self):
y3 = Y3()
x = X()
self.assertNotEqual(y3.requires(), x)
self.assertEqual(y3.n, 43)
self.assertEqual(y3.requires().n, 43)
def test_names(self):
# Just make sure the decorators retain the original class names
x = X()
self.assertEqual(str(x), 'X(n=42)')
self.assertEqual(x.__class__.__name__, 'X')
class P(luigi.Task):
date = luigi.DateParameter()
def output(self):
return MockFile(self.date.strftime('/tmp/data-%Y-%m-%d.txt'))
def run(self):
f = self.output().open('w')
print('hello, world', file=f)
f.close()
@copies(P)
class PCopy(luigi.Task):
def output(self):
return MockFile(self.date.strftime('/tmp/copy-data-%Y-%m-%d.txt'))
class CopyTest(unittest.TestCase):
def test_copy(self):
luigi.build([PCopy(date=datetime.date(2012, 1, 1))], local_scheduler=True)
self.assertEqual(MockFile.fs.get_data('/tmp/data-2012-01-01.txt'), b'hello, world\n')
self.assertEqual(MockFile.fs.get_data('/tmp/copy-data-2012-01-01.txt'), b'hello, world\n')
class PickleTest(unittest.TestCase):
def test_pickle(self):
# similar to CopyTest.test_copy
p = PCopy(date=datetime.date(2013, 1, 1))
p_pickled = pickle.dumps(p)
p = pickle.loads(p_pickled)
luigi.build([p], local_scheduler=True)
self.assertEqual(MockFile.fs.get_data('/tmp/data-2013-01-01.txt'), b'hello, world\n')
self.assertEqual(MockFile.fs.get_data('/tmp/copy-data-2013-01-01.txt'), b'hello, world\n')
class Subtask(luigi.Task):
k = luigi.IntParameter()
def f(self, x):
return x ** self.k
@delegates
class SubtaskDelegator(luigi.Task):
def subtasks(self):
return [Subtask(1), Subtask(2)]
def run(self):
self.s = 0
for t in self.subtasks():
self.s += t.f(42)
class SubtaskTest(unittest.TestCase):
def test_subtasks(self):
sd = SubtaskDelegator()
luigi.build([sd], local_scheduler=True)
self.assertEqual(sd.s, 42 * (1 + 42))
def test_forgot_subtasks(self):
def trigger_failure():
@delegates
class SubtaskDelegatorBroken(luigi.Task):
pass
self.assertRaises(AttributeError, trigger_failure)
def test_cmdline(self):
# Exposes issue where wrapped tasks are registered twice under
# the same name
from luigi.task import Register
self.assertEqual(Register.get_reg().get('SubtaskDelegator', None), SubtaskDelegator)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
from boto.ec2.securitygroup import SecurityGroup
from boto.rds import RDSConnection
from boto.rds.vpcsecuritygroupmembership import VPCSecurityGroupMembership
from boto.rds.parametergroup import ParameterGroup
from boto.rds.logfile import LogFile, LogFileObject
import xml.sax.saxutils as saxutils
class TestRDSConnection(AWSMockServiceTestCase):
connection_class = RDSConnection
def setUp(self):
super(TestRDSConnection, self).setUp()
def default_body(self):
return """
<DescribeDBInstancesResponse>
<DescribeDBInstancesResult>
<DBInstances>
<DBInstance>
<Iops>2000</Iops>
<BackupRetentionPeriod>1</BackupRetentionPeriod>
<MultiAZ>false</MultiAZ>
<DBInstanceStatus>backing-up</DBInstanceStatus>
<DBInstanceIdentifier>mydbinstance2</DBInstanceIdentifier>
<PreferredBackupWindow>10:30-11:00</PreferredBackupWindow>
<PreferredMaintenanceWindow>wed:06:30-wed:07:00</PreferredMaintenanceWindow>
<OptionGroupMembership>
<OptionGroupName>default:mysql-5-5</OptionGroupName>
<Status>in-sync</Status>
</OptionGroupMembership>
<AvailabilityZone>us-west-2b</AvailabilityZone>
<ReadReplicaDBInstanceIdentifiers/>
<Engine>mysql</Engine>
<PendingModifiedValues/>
<LicenseModel>general-public-license</LicenseModel>
<DBParameterGroups>
<DBParameterGroup>
<ParameterApplyStatus>in-sync</ParameterApplyStatus>
<DBParameterGroupName>default.mysql5.5</DBParameterGroupName>
</DBParameterGroup>
</DBParameterGroups>
<Endpoint>
<Port>3306</Port>
<Address>mydbinstance2.c0hjqouvn9mf.us-west-2.rds.amazonaws.com</Address>
</Endpoint>
<EngineVersion>5.5.27</EngineVersion>
<DBSecurityGroups>
<DBSecurityGroup>
<Status>active</Status>
<DBSecurityGroupName>default</DBSecurityGroupName>
</DBSecurityGroup>
</DBSecurityGroups>
<VpcSecurityGroups>
<VpcSecurityGroupMembership>
<VpcSecurityGroupId>sg-1</VpcSecurityGroupId>
<Status>active</Status>
</VpcSecurityGroupMembership>
</VpcSecurityGroups>
<DBName>mydb2</DBName>
<AutoMinorVersionUpgrade>true</AutoMinorVersionUpgrade>
<InstanceCreateTime>2012-10-03T22:01:51.047Z</InstanceCreateTime>
<AllocatedStorage>200</AllocatedStorage>
<DBInstanceClass>db.m1.large</DBInstanceClass>
<MainUsername>awsuser</MainUsername>
<StatusInfos>
<DBInstanceStatusInfo>
<Message></Message>
<Normal>true</Normal>
<Status>replicating</Status>
<StatusType>read replication</StatusType>
</DBInstanceStatusInfo>
</StatusInfos>
<DBSubnetGroup>
<VpcId>990524496922</VpcId>
<SubnetGroupStatus>Complete</SubnetGroupStatus>
<DBSubnetGroupDescription>My modified DBSubnetGroup</DBSubnetGroupDescription>
<DBSubnetGroupName>mydbsubnetgroup</DBSubnetGroupName>
<Subnets>
<Subnet>
<SubnetStatus>Active</SubnetStatus>
<SubnetIdentifier>subnet-7c5b4115</SubnetIdentifier>
<SubnetAvailabilityZone>
<Name>us-east-1c</Name>
</SubnetAvailabilityZone>
</Subnet>
<Subnet>
<SubnetStatus>Active</SubnetStatus>
<SubnetIdentifier>subnet-7b5b4112</SubnetIdentifier>
<SubnetAvailabilityZone>
<Name>us-east-1b</Name>
</SubnetAvailabilityZone>
</Subnet>
<Subnet>
<SubnetStatus>Active</SubnetStatus>
<SubnetIdentifier>subnet-3ea6bd57</SubnetIdentifier>
<SubnetAvailabilityZone>
<Name>us-east-1d</Name>
</SubnetAvailabilityZone>
</Subnet>
</Subnets>
</DBSubnetGroup>
</DBInstance>
</DBInstances>
</DescribeDBInstancesResult>
</DescribeDBInstancesResponse>
"""
def test_get_all_db_instances(self):
self.set_http_response(status_code=200)
response = self.service_connection.get_all_dbinstances('instance_id')
self.assertEqual(len(response), 1)
self.assert_request_parameters({
'Action': 'DescribeDBInstances',
'DBInstanceIdentifier': 'instance_id',
}, ignore_params_values=['Version'])
db = response[0]
self.assertEqual(db.id, 'mydbinstance2')
self.assertEqual(db.create_time, '2012-10-03T22:01:51.047Z')
self.assertEqual(db.engine, 'mysql')
self.assertEqual(db.status, 'backing-up')
self.assertEqual(db.allocated_storage, 200)
self.assertEqual(
db.endpoint,
(u'mydbinstance2.c0hjqouvn9mf.us-west-2.rds.amazonaws.com', 3306))
self.assertEqual(db.instance_class, 'db.m1.large')
self.assertEqual(db.main_username, 'awsuser')
self.assertEqual(db.availability_zone, 'us-west-2b')
self.assertEqual(db.backup_retention_period, 1)
self.assertEqual(db.preferred_backup_window, '10:30-11:00')
self.assertEqual(db.preferred_maintenance_window,
'wed:06:30-wed:07:00')
self.assertEqual(db.latest_restorable_time, None)
self.assertEqual(db.multi_az, False)
self.assertEqual(db.iops, 2000)
self.assertEqual(db.pending_modified_values, {})
self.assertEqual(db.parameter_group.name,
'default.mysql5.5')
self.assertEqual(db.parameter_group.description, None)
self.assertEqual(db.parameter_group.engine, None)
self.assertEqual(db.security_group.owner_id, None)
self.assertEqual(db.security_group.name, 'default')
self.assertEqual(db.security_group.description, None)
self.assertEqual(db.security_group.ec2_groups, [])
self.assertEqual(db.security_group.ip_ranges, [])
self.assertEqual(len(db.status_infos), 1)
self.assertEqual(db.status_infos[0].message, '')
self.assertEqual(db.status_infos[0].normal, True)
self.assertEqual(db.status_infos[0].status, 'replicating')
self.assertEqual(db.status_infos[0].status_type, 'read replication')
self.assertEqual(db.vpc_security_groups[0].status, 'active')
self.assertEqual(db.vpc_security_groups[0].vpc_group, 'sg-1')
self.assertEqual(db.license_model, 'general-public-license')
self.assertEqual(db.engine_version, '5.5.27')
self.assertEqual(db.auto_minor_version_upgrade, True)
self.assertEqual(db.subnet_group.name, 'mydbsubnetgroup')
class TestRDSCCreateDBInstance(AWSMockServiceTestCase):
connection_class = RDSConnection
def setUp(self):
super(TestRDSCCreateDBInstance, self).setUp()
def default_body(self):
return """
<CreateDBInstanceResponse xmlns="http://rds.amazonaws.com/doc/2013-05-15/">
<CreateDBInstanceResult>
<DBInstance>
<ReadReplicaDBInstanceIdentifiers/>
<Engine>mysql</Engine>
<PendingModifiedValues>
<MainUserPassword>****</MainUserPassword>
</PendingModifiedValues>
<BackupRetentionPeriod>0</BackupRetentionPeriod>
<MultiAZ>false</MultiAZ>
<LicenseModel>general-public-license</LicenseModel>
<DBSubnetGroup>
<VpcId>990524496922</VpcId>
<SubnetGroupStatus>Complete</SubnetGroupStatus>
<DBSubnetGroupDescription>description</DBSubnetGroupDescription>
<DBSubnetGroupName>subnet_grp1</DBSubnetGroupName>
<Subnets>
<Subnet>
<SubnetStatus>Active</SubnetStatus>
<SubnetIdentifier>subnet-7c5b4115</SubnetIdentifier>
<SubnetAvailabilityZone>
<Name>us-east-1c</Name>
</SubnetAvailabilityZone>
</Subnet>
<Subnet>
<SubnetStatus>Active</SubnetStatus>
<SubnetIdentifier>subnet-7b5b4112</SubnetIdentifier>
<SubnetAvailabilityZone>
<Name>us-east-1b</Name>
</SubnetAvailabilityZone>
</Subnet>
<Subnet>
<SubnetStatus>Active</SubnetStatus>
<SubnetIdentifier>subnet-3ea6bd57</SubnetIdentifier>
<SubnetAvailabilityZone>
<Name>us-east-1d</Name>
</SubnetAvailabilityZone>
</Subnet>
</Subnets>
</DBSubnetGroup>
<DBInstanceStatus>creating</DBInstanceStatus>
<EngineVersion>5.1.50</EngineVersion>
<DBInstanceIdentifier>simcoprod01</DBInstanceIdentifier>
<DBParameterGroups>
<DBParameterGroup>
<ParameterApplyStatus>in-sync</ParameterApplyStatus>
<DBParameterGroupName>default.mysql5.1</DBParameterGroupName>
</DBParameterGroup>
</DBParameterGroups>
<DBSecurityGroups>
<DBSecurityGroup>
<Status>active</Status>
<DBSecurityGroupName>default</DBSecurityGroupName>
</DBSecurityGroup>
</DBSecurityGroups>
<PreferredBackupWindow>00:00-00:30</PreferredBackupWindow>
<AutoMinorVersionUpgrade>true</AutoMinorVersionUpgrade>
<PreferredMaintenanceWindow>sat:07:30-sat:08:00</PreferredMaintenanceWindow>
<AllocatedStorage>10</AllocatedStorage>
<DBInstanceClass>db.m1.large</DBInstanceClass>
<MainUsername>main</MainUsername>
</DBInstance>
</CreateDBInstanceResult>
<ResponseMetadata>
<RequestId>2e5d4270-8501-11e0-bd9b-a7b1ece36d51</RequestId>
</ResponseMetadata>
</CreateDBInstanceResponse>
"""
def test_create_db_instance_param_group_name(self):
self.set_http_response(status_code=200)
db = self.service_connection.create_dbinstance(
'SimCoProd01',
10,
'db.m1.large',
'main',
'Password01',
param_group='default.mysql5.1',
db_subnet_group_name='dbSubnetgroup01',
backup_retention_period=0)
self.assert_request_parameters({
'Action': 'CreateDBInstance',
'AllocatedStorage': 10,
'AutoMinorVersionUpgrade': 'true',
'BackupRetentionPeriod': 0,
'DBInstanceClass': 'db.m1.large',
'DBInstanceIdentifier': 'SimCoProd01',
'DBParameterGroupName': 'default.mysql5.1',
'DBSubnetGroupName': 'dbSubnetgroup01',
'Engine': 'MySQL5.1',
'MainUsername': 'main',
'MainUserPassword': 'Password01',
'Port': 3306
}, ignore_params_values=['Version'])
self.assertEqual(db.id, 'simcoprod01')
self.assertEqual(db.engine, 'mysql')
self.assertEqual(db.status, 'creating')
self.assertEqual(db.allocated_storage, 10)
self.assertEqual(db.instance_class, 'db.m1.large')
self.assertEqual(db.main_username, 'main')
self.assertEqual(db.multi_az, False)
self.assertEqual(db.pending_modified_values,
{'MainUserPassword': '****'})
self.assertEqual(db.parameter_group.name,
'default.mysql5.1')
self.assertEqual(db.parameter_group.description, None)
self.assertEqual(db.parameter_group.engine, None)
self.assertEqual(db.backup_retention_period, 0)
def test_create_db_instance_param_group_instance(self):
self.set_http_response(status_code=200)
param_group = ParameterGroup()
param_group.name = 'default.mysql5.1'
db = self.service_connection.create_dbinstance(
'SimCoProd01',
10,
'db.m1.large',
'main',
'Password01',
param_group=param_group,
db_subnet_group_name='dbSubnetgroup01')
self.assert_request_parameters({
'Action': 'CreateDBInstance',
'AllocatedStorage': 10,
'AutoMinorVersionUpgrade': 'true',
'DBInstanceClass': 'db.m1.large',
'DBInstanceIdentifier': 'SimCoProd01',
'DBParameterGroupName': 'default.mysql5.1',
'DBSubnetGroupName': 'dbSubnetgroup01',
'Engine': 'MySQL5.1',
'MainUsername': 'main',
'MainUserPassword': 'Password01',
'Port': 3306,
}, ignore_params_values=['Version'])
self.assertEqual(db.id, 'simcoprod01')
self.assertEqual(db.engine, 'mysql')
self.assertEqual(db.status, 'creating')
self.assertEqual(db.allocated_storage, 10)
self.assertEqual(db.instance_class, 'db.m1.large')
self.assertEqual(db.main_username, 'main')
self.assertEqual(db.multi_az, False)
self.assertEqual(db.pending_modified_values,
{'MainUserPassword': '****'})
self.assertEqual(db.parameter_group.name,
'default.mysql5.1')
self.assertEqual(db.parameter_group.description, None)
self.assertEqual(db.parameter_group.engine, None)
class TestRDSConnectionRestoreDBInstanceFromPointInTime(AWSMockServiceTestCase):
connection_class = RDSConnection
def setUp(self):
super(TestRDSConnectionRestoreDBInstanceFromPointInTime, self).setUp()
def default_body(self):
return """
<RestoreDBInstanceToPointInTimeResponse xmlns="http://rds.amazonaws.com/doc/2013-05-15/">
<RestoreDBInstanceToPointInTimeResult>
<DBInstance>
<ReadReplicaDBInstanceIdentifiers/>
<Engine>mysql</Engine>
<PendingModifiedValues/>
<BackupRetentionPeriod>1</BackupRetentionPeriod>
<MultiAZ>false</MultiAZ>
<LicenseModel>general-public-license</LicenseModel>
<DBInstanceStatus>creating</DBInstanceStatus>
<EngineVersion>5.1.50</EngineVersion>
<DBInstanceIdentifier>restored-db</DBInstanceIdentifier>
<DBParameterGroups>
<DBParameterGroup>
<ParameterApplyStatus>in-sync</ParameterApplyStatus>
<DBParameterGroupName>default.mysql5.1</DBParameterGroupName>
</DBParameterGroup>
</DBParameterGroups>
<DBSecurityGroups>
<DBSecurityGroup>
<Status>active</Status>
<DBSecurityGroupName>default</DBSecurityGroupName>
</DBSecurityGroup>
</DBSecurityGroups>
<PreferredBackupWindow>00:00-00:30</PreferredBackupWindow>
<AutoMinorVersionUpgrade>true</AutoMinorVersionUpgrade>
<PreferredMaintenanceWindow>sat:07:30-sat:08:00</PreferredMaintenanceWindow>
<AllocatedStorage>10</AllocatedStorage>
<DBInstanceClass>db.m1.large</DBInstanceClass>
<MainUsername>main</MainUsername>
</DBInstance>
</RestoreDBInstanceToPointInTimeResult>
<ResponseMetadata>
<RequestId>1ef546bc-850b-11e0-90aa-eb648410240d</RequestId>
</ResponseMetadata>
</RestoreDBInstanceToPointInTimeResponse>
"""
def test_restore_dbinstance_from_point_in_time(self):
self.set_http_response(status_code=200)
db = self.service_connection.restore_dbinstance_from_point_in_time(
'simcoprod01',
'restored-db',
True)
self.assert_request_parameters({
'Action': 'RestoreDBInstanceToPointInTime',
'SourceDBInstanceIdentifier': 'simcoprod01',
'TargetDBInstanceIdentifier': 'restored-db',
'UseLatestRestorableTime': 'true',
}, ignore_params_values=['Version'])
self.assertEqual(db.id, 'restored-db')
self.assertEqual(db.engine, 'mysql')
self.assertEqual(db.status, 'creating')
self.assertEqual(db.allocated_storage, 10)
self.assertEqual(db.instance_class, 'db.m1.large')
self.assertEqual(db.main_username, 'main')
self.assertEqual(db.multi_az, False)
self.assertEqual(db.parameter_group.name,
'default.mysql5.1')
self.assertEqual(db.parameter_group.description, None)
self.assertEqual(db.parameter_group.engine, None)
def test_restore_dbinstance_from_point_in_time__db_subnet_group_name(self):
self.set_http_response(status_code=200)
db = self.service_connection.restore_dbinstance_from_point_in_time(
'simcoprod01',
'restored-db',
True,
db_subnet_group_name='dbsubnetgroup')
self.assert_request_parameters({
'Action': 'RestoreDBInstanceToPointInTime',
'SourceDBInstanceIdentifier': 'simcoprod01',
'TargetDBInstanceIdentifier': 'restored-db',
'UseLatestRestorableTime': 'true',
'DBSubnetGroupName': 'dbsubnetgroup',
}, ignore_params_values=['Version'])
def test_create_db_instance_vpc_sg_str(self):
self.set_http_response(status_code=200)
vpc_security_groups = [
VPCSecurityGroupMembership(self.service_connection, 'active', 'sg-1'),
VPCSecurityGroupMembership(self.service_connection, None, 'sg-2')]
db = self.service_connection.create_dbinstance(
'SimCoProd01',
10,
'db.m1.large',
'main',
'Password01',
param_group='default.mysql5.1',
db_subnet_group_name='dbSubnetgroup01',
vpc_security_groups=vpc_security_groups)
self.assert_request_parameters({
'Action': 'CreateDBInstance',
'AllocatedStorage': 10,
'AutoMinorVersionUpgrade': 'true',
'DBInstanceClass': 'db.m1.large',
'DBInstanceIdentifier': 'SimCoProd01',
'DBParameterGroupName': 'default.mysql5.1',
'DBSubnetGroupName': 'dbSubnetgroup01',
'Engine': 'MySQL5.1',
'MainUsername': 'main',
'MainUserPassword': 'Password01',
'Port': 3306,
'VpcSecurityGroupIds.member.1': 'sg-1',
'VpcSecurityGroupIds.member.2': 'sg-2'
}, ignore_params_values=['Version'])
def test_create_db_instance_vpc_sg_obj(self):
self.set_http_response(status_code=200)
sg1 = SecurityGroup(name='sg-1')
sg2 = SecurityGroup(name='sg-2')
vpc_security_groups = [
VPCSecurityGroupMembership(self.service_connection, 'active', sg1.name),
VPCSecurityGroupMembership(self.service_connection, None, sg2.name)]
db = self.service_connection.create_dbinstance(
'SimCoProd01',
10,
'db.m1.large',
'main',
'Password01',
param_group='default.mysql5.1',
db_subnet_group_name='dbSubnetgroup01',
vpc_security_groups=vpc_security_groups)
self.assert_request_parameters({
'Action': 'CreateDBInstance',
'AllocatedStorage': 10,
'AutoMinorVersionUpgrade': 'true',
'DBInstanceClass': 'db.m1.large',
'DBInstanceIdentifier': 'SimCoProd01',
'DBParameterGroupName': 'default.mysql5.1',
'DBSubnetGroupName': 'dbSubnetgroup01',
'Engine': 'MySQL5.1',
'MainUsername': 'main',
'MainUserPassword': 'Password01',
'Port': 3306,
'VpcSecurityGroupIds.member.1': 'sg-1',
'VpcSecurityGroupIds.member.2': 'sg-2'
}, ignore_params_values=['Version'])
class TestRDSOptionGroups(AWSMockServiceTestCase):
connection_class = RDSConnection
def setUp(self):
super(TestRDSOptionGroups, self).setUp()
def default_body(self):
return """
<DescribeOptionGroupsResponse xmlns="http://rds.amazonaws.com/doc/2013-05-15/">
<DescribeOptionGroupsResult>
<OptionGroupsList>
<OptionGroup>
<MajorEngineVersion>11.2</MajorEngineVersion>
<OptionGroupName>myoptiongroup</OptionGroupName>
<EngineName>oracle-se1</EngineName>
<OptionGroupDescription>Test option group</OptionGroupDescription>
<Options/>
</OptionGroup>
<OptionGroup>
<MajorEngineVersion>11.2</MajorEngineVersion>
<OptionGroupName>default:oracle-se1-11-2</OptionGroupName>
<EngineName>oracle-se1</EngineName>
<OptionGroupDescription>Default Option Group.</OptionGroupDescription>
<Options/>
</OptionGroup>
</OptionGroupsList>
</DescribeOptionGroupsResult>
<ResponseMetadata>
<RequestId>e4b234d9-84d5-11e1-87a6-71059839a52b</RequestId>
</ResponseMetadata>
</DescribeOptionGroupsResponse>
"""
def test_describe_option_groups(self):
self.set_http_response(status_code=200)
response = self.service_connection.describe_option_groups()
self.assertEqual(len(response), 2)
options = response[0]
self.assertEqual(options.name, 'myoptiongroup')
self.assertEqual(options.description, 'Test option group')
self.assertEqual(options.engine_name, 'oracle-se1')
self.assertEqual(options.major_engine_version, '11.2')
options = response[1]
self.assertEqual(options.name, 'default:oracle-se1-11-2')
self.assertEqual(options.description, 'Default Option Group.')
self.assertEqual(options.engine_name, 'oracle-se1')
self.assertEqual(options.major_engine_version, '11.2')
class TestRDSLogFile(AWSMockServiceTestCase):
connection_class = RDSConnection
def setUp(self):
super(TestRDSLogFile, self).setUp()
def default_body(self):
return """
<DescribeDBLogFilesResponse xmlns="http://rds.amazonaws.com/doc/2013-02-12/">
<DescribeDBLogFilesResult>
<DescribeDBLogFiles>
<DescribeDBLogFilesDetails>
<LastWritten>1364403600000</LastWritten>
<LogFileName>error/mysql-error-running.log</LogFileName>
<Size>0</Size>
</DescribeDBLogFilesDetails>
<DescribeDBLogFilesDetails>
<LastWritten>1364338800000</LastWritten>
<LogFileName>error/mysql-error-running.log.0</LogFileName>
<Size>0</Size>
</DescribeDBLogFilesDetails>
<DescribeDBLogFilesDetails>
<LastWritten>1364342400000</LastWritten>
<LogFileName>error/mysql-error-running.log.1</LogFileName>
<Size>0</Size>
</DescribeDBLogFilesDetails>
<DescribeDBLogFilesDetails>
<LastWritten>1364346000000</LastWritten>
<LogFileName>error/mysql-error-running.log.2</LogFileName>
<Size>0</Size>
</DescribeDBLogFilesDetails>
<DescribeDBLogFilesDetails>
<LastWritten>1364349600000</LastWritten>
<LogFileName>error/mysql-error-running.log.3</LogFileName>
<Size>0</Size>
</DescribeDBLogFilesDetails>
<DescribeDBLogFilesDetails>
<LastWritten>1364405700000</LastWritten>
<LogFileName>error/mysql-error.log</LogFileName>
<Size>0</Size>
</DescribeDBLogFilesDetails>
</DescribeDBLogFiles>
</DescribeDBLogFilesResult>
<ResponseMetadata>
<RequestId>d70fb3b3-9704-11e2-a0db-871552e0ef19</RequestId>
</ResponseMetadata>
</DescribeDBLogFilesResponse>
"""
def test_get_all_logs_simple(self):
self.set_http_response(status_code=200)
response = self.service_connection.get_all_logs('db1')
self.assert_request_parameters({
'Action': 'DescribeDBLogFiles',
'DBInstanceIdentifier': 'db1',
}, ignore_params_values=['Version'])
self.assertEqual(len(response), 6)
self.assertTrue(isinstance(response[0], LogFile))
self.assertEqual(response[0].log_filename, 'error/mysql-error-running.log')
self.assertEqual(response[0].last_written, '1364403600000')
self.assertEqual(response[0].size, '0')
def test_get_all_logs_filtered(self):
self.set_http_response(status_code=200)
response = self.service_connection.get_all_logs('db_instance_1', max_records=100, marker='error/mysql-error.log', file_size=2000000, filename_contains='error', file_last_written=12345678)
self.assert_request_parameters({
'Action': 'DescribeDBLogFiles',
'DBInstanceIdentifier': 'db_instance_1',
'MaxRecords': 100,
'Marker': 'error/mysql-error.log',
'FileSize': 2000000,
'FilenameContains': 'error',
'FileLastWritten': 12345678,
}, ignore_params_values=['Version'])
self.assertEqual(len(response), 6)
self.assertTrue(isinstance(response[0], LogFile))
self.assertEqual(response[0].log_filename, 'error/mysql-error-running.log')
self.assertEqual(response[0].last_written, '1364403600000')
self.assertEqual(response[0].size, '0')
class TestRDSLogFileDownload(AWSMockServiceTestCase):
connection_class = RDSConnection
logfile_sample = """
??2014-01-26 23:59:00.01 spid54 Microsoft SQL Server 2012 - 11.0.2100.60 (X64)
Feb 10 2012 19:39:15
Copyright (c) Microsoft Corporation
Web Edition (64-bit) on Windows NT 6.1 <X64> (Build 7601: Service Pack 1) (Hypervisor)
2014-01-26 23:59:00.01 spid54 (c) Microsoft Corporation.
2014-01-26 23:59:00.01 spid54 All rights reserved.
2014-01-26 23:59:00.01 spid54 Server process ID is 2976.
2014-01-26 23:59:00.01 spid54 System Manufacturer: 'Xen', System Model: 'HVM domU'.
2014-01-26 23:59:00.01 spid54 Authentication mode is MIXED.
2014-01-26 23:59:00.01 spid54 Logging SQL Server messages in file 'D:\RDSDBDATA\Log\ERROR'.
2014-01-26 23:59:00.01 spid54 The service account is 'WORKGROUP\AMAZONA-NUQUUMV$'. This is an informational message; no user action is required.
2014-01-26 23:59:00.01 spid54 The error log has been reinitialized. See the previous log for older entries.
2014-01-27 00:00:56.42 spid25s This instance of SQL Server has been using a process ID of 2976 since 10/21/2013 2:16:50 AM (local) 10/21/2013 2:16:50 AM (UTC). This is an informational message only; no user action is required.
2014-01-27 09:35:15.43 spid71 I/O is frozen on database model. No user action is required. However, if I/O is not resumed promptly, you could cancel the backup.
2014-01-27 09:35:15.44 spid72 I/O is frozen on database msdb. No user action is required. However, if I/O is not resumed promptly, you could cancel the backup.
2014-01-27 09:35:15.44 spid74 I/O is frozen on database rdsadmin. No user action is required. However, if I/O is not resumed promptly, you could cancel the backup.
2014-01-27 09:35:15.44 spid73 I/O is frozen on database main. No user action is required. However, if I/O is not resumed promptly, you could cancel the backup.
2014-01-27 09:35:25.57 spid73 I/O was resumed on database main. No user action is required.
2014-01-27 09:35:25.57 spid74 I/O was resumed on database rdsadmin. No user action is required.
2014-01-27 09:35:25.57 spid71 I/O was resumed on database model. No user action is required.
2014-01-27 09:35:25.57 spid72 I/O was resumed on database msdb. No user action is required.
"""
def setUp(self):
super(TestRDSLogFileDownload, self).setUp()
def default_body(self):
return """
<DownloadDBLogFilePortionResponse xmlns="http://rds.amazonaws.com/doc/2013-09-09/">
<DownloadDBLogFilePortionResult>
<Marker>0:4485</Marker>
<LogFileData>%s</LogFileData>
<AdditionalDataPending>false</AdditionalDataPending>
</DownloadDBLogFilePortionResult>
<ResponseMetadata>
<RequestId>27143615-87ae-11e3-acc9-fb64b157268e</RequestId>
</ResponseMetadata>
</DownloadDBLogFilePortionResponse>
""" % self.logfile_sample
def test_single_download(self):
self.set_http_response(status_code=200)
response = self.service_connection.get_log_file('db1', 'foo.log')
self.assertTrue(isinstance(response, LogFileObject))
self.assertEqual(response.marker, '0:4485')
self.assertEqual(response.dbinstance_id, 'db1')
self.assertEqual(response.log_filename, 'foo.log')
self.assertEqual(response.data, saxutils.unescape(self.logfile_sample))
self.assert_request_parameters({
'Action': 'DownloadDBLogFilePortion',
'DBInstanceIdentifier': 'db1',
'LogFileName': 'foo.log',
}, ignore_params_values=['Version'])
def test_multi_args(self):
self.set_http_response(status_code=200)
response = self.service_connection.get_log_file('db1', 'foo.log', marker='0:4485', number_of_lines=10)
self.assertTrue(isinstance(response, LogFileObject))
self.assert_request_parameters({
'Action': 'DownloadDBLogFilePortion',
'DBInstanceIdentifier': 'db1',
'Marker': '0:4485',
'NumberOfLines': 10,
'LogFileName': 'foo.log',
}, ignore_params_values=['Version'])
class TestRDSOptionGroupOptions(AWSMockServiceTestCase):
connection_class = RDSConnection
def setUp(self):
super(TestRDSOptionGroupOptions, self).setUp()
def default_body(self):
return """
<DescribeOptionGroupOptionsResponse xmlns="http://rds.amazonaws.com/doc/2013-05-15/">
<DescribeOptionGroupOptionsResult>
<OptionGroupOptions>
<OptionGroupOption>
<MajorEngineVersion>11.2</MajorEngineVersion>
<PortRequired>true</PortRequired>
<OptionsDependedOn/>
<Description>Oracle Enterprise Manager</Description>
<DefaultPort>1158</DefaultPort>
<Name>OEM</Name>
<EngineName>oracle-se1</EngineName>
<MinimumRequiredMinorEngineVersion>0.2.v3</MinimumRequiredMinorEngineVersion>
<Persistent>false</Persistent>
<Permanent>false</Permanent>
</OptionGroupOption>
</OptionGroupOptions>
</DescribeOptionGroupOptionsResult>
<ResponseMetadata>
<RequestId>d9c8f6a1-84c7-11e1-a264-0b23c28bc344</RequestId>
</ResponseMetadata>
</DescribeOptionGroupOptionsResponse>
"""
def test_describe_option_group_options(self):
self.set_http_response(status_code=200)
response = self.service_connection.describe_option_group_options()
self.assertEqual(len(response), 1)
options = response[0]
self.assertEqual(options.name, 'OEM')
self.assertEqual(options.description, 'Oracle Enterprise Manager')
self.assertEqual(options.engine_name, 'oracle-se1')
self.assertEqual(options.major_engine_version, '11.2')
self.assertEqual(options.min_minor_engine_version, '0.2.v3')
self.assertEqual(options.port_required, True)
self.assertEqual(options.default_port, 1158)
self.assertEqual(options.permanent, False)
self.assertEqual(options.persistent, False)
self.assertEqual(options.depends_on, [])
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from six.moves import http_client
import webtest
from keystone.tests import unit
from keystone.tests.unit import default_fixtures
from keystone.tests.unit import ksfixtures
from keystone.tests.unit.ksfixtures import database
class RestfulTestCase(unit.TestCase):
"""Performs restful tests against the WSGI app over HTTP.
This class launches public & admin WSGI servers for every test, which can
be accessed by calling ``public_request()`` or ``admin_request()``,
respectfully.
``restful_request()`` and ``request()`` methods are also exposed if you
need to bypass restful conventions or access HTTP details in your test
implementation.
Three new asserts are provided:
* ``assertResponseSuccessful``: called automatically for every request
unless an ``expected_status`` is provided
* ``assertResponseStatus``: called instead of ``assertResponseSuccessful``,
if an ``expected_status`` is provided
* ``assertValidResponseHeaders``: validates that the response headers
appear as expected
Requests are automatically serialized according to the defined
``content_type``. Responses are automatically deserialized as well, and
available in the ``response.body`` attribute. The original body content is
available in the ``response.raw`` attribute.
"""
# default content type to test
content_type = 'json'
def setUp(self, app_conf='keystone'):
super(RestfulTestCase, self).setUp()
self.auth_plugin_config_override()
self.useFixture(database.Database())
self.load_backends()
self.load_fixtures(default_fixtures)
self.public_app = webtest.TestApp(
self.loadapp(app_conf, name='main'))
self.addCleanup(delattr, self, 'public_app')
self.admin_app = webtest.TestApp(
self.loadapp(app_conf, name='admin'))
self.addCleanup(delattr, self, 'admin_app')
def auth_plugin_config_override(self, methods=None, **method_classes):
self.useFixture(
ksfixtures.ConfigAuthPlugins(self.config_fixture,
methods,
**method_classes))
def request(self, app, path, body=None, headers=None, token=None,
expected_status=None, **kwargs):
if headers:
headers = {str(k): str(v) for k, v in headers.items()}
else:
headers = {}
if token:
headers['X-Auth-Token'] = str(token)
# sets environ['REMOTE_ADDR']
kwargs.setdefault('remote_addr', 'localhost')
response = app.request(path, headers=headers,
status=expected_status, body=body,
**kwargs)
return response
def assertResponseSuccessful(self, response):
"""Assert that a status code lies inside the 2xx range.
:param response: :py:class:`httplib.HTTPResponse` to be
verified to have a status code between 200 and 299.
example::
self.assertResponseSuccessful(response)
"""
self.assertTrue(
200 <= response.status_code <= 299,
'Status code %d is outside of the expected range (2xx)\n\n%s' %
(response.status, response.body))
def assertResponseStatus(self, response, expected_status):
"""Assert a specific status code on the response.
:param response: :py:class:`httplib.HTTPResponse`
:param expected_status: The specific ``status`` result expected
example::
self.assertResponseStatus(response, http_client.NO_CONTENT)
"""
self.assertEqual(
expected_status, response.status_code,
'Status code %s is not %s, as expected\n\n%s' %
(response.status_code, expected_status, response.body))
def assertValidResponseHeaders(self, response):
"""Ensure that response headers appear as expected."""
self.assertIn('X-Auth-Token', response.headers.get('Vary'))
def assertValidErrorResponse(self, response,
expected_status=http_client.BAD_REQUEST):
"""Verify that the error response is valid.
Subclasses can override this function based on the expected response.
"""
self.assertEqual(expected_status, response.status_code)
error = response.result['error']
self.assertEqual(response.status_code, error['code'])
self.assertIsNotNone(error.get('title'))
def _to_content_type(self, body, headers, content_type=None):
"""Attempt to encode JSON and XML automatically."""
content_type = content_type or self.content_type
if content_type == 'json':
headers['Accept'] = 'application/json'
if body:
headers['Content-Type'] = 'application/json'
# NOTE(davechen):dump the body to bytes since WSGI requires
# the body of the response to be `Bytestrings`.
# see pep-3333:
# https://www.python.org/dev/peps/pep-3333/#a-note-on-string-types
return jsonutils.dump_as_bytes(body)
def _from_content_type(self, response, content_type=None):
"""Attempt to decode JSON and XML automatically, if detected."""
content_type = content_type or self.content_type
if response.body is not None and response.body.strip():
# if a body is provided, a Content-Type is also expected
header = response.headers.get('Content-Type')
self.assertIn(content_type, header)
if content_type == 'json':
response.result = jsonutils.loads(response.body)
else:
response.result = response.body
def restful_request(self, method='GET', headers=None, body=None,
content_type=None, response_content_type=None,
**kwargs):
"""Serialize/deserialize json as request/response body.
.. WARNING::
* Existing Accept header will be overwritten.
* Existing Content-Type header will be overwritten.
"""
# Initialize headers dictionary
headers = {} if not headers else headers
body = self._to_content_type(body, headers, content_type)
# Perform the HTTP request/response
response = self.request(method=method, headers=headers, body=body,
**kwargs)
response_content_type = response_content_type or content_type
self._from_content_type(response, content_type=response_content_type)
# we can save some code & improve coverage by always doing this
if (method != 'HEAD' and
response.status_code >= http_client.BAD_REQUEST):
self.assertValidErrorResponse(response)
# Contains the decoded response.body
return response
def _request(self, convert=True, **kwargs):
if convert:
response = self.restful_request(**kwargs)
else:
response = self.request(**kwargs)
self.assertValidResponseHeaders(response)
return response
def public_request(self, **kwargs):
return self._request(app=self.public_app, **kwargs)
def admin_request(self, **kwargs):
return self._request(app=self.admin_app, **kwargs)
def _get_token(self, body):
"""Convenience method so that we can test authenticated requests."""
r = self.public_request(method='POST', path='/v2.0/tokens', body=body)
return self._get_token_id(r)
def get_admin_token(self):
return self._get_token({
'auth': {
'passwordCredentials': {
'username': self.user_reqadmin['name'],
'password': self.user_reqadmin['password']
},
'tenantId': default_fixtures.SERVICE_TENANT_ID
}
})
def get_unscoped_token(self):
"""Convenience method so that we can test authenticated requests."""
return self._get_token({
'auth': {
'passwordCredentials': {
'username': self.user_foo['name'],
'password': self.user_foo['password'],
},
},
})
def get_scoped_token(self, tenant_id=None):
"""Convenience method so that we can test authenticated requests."""
if not tenant_id:
tenant_id = self.tenant_bar['id']
return self._get_token({
'auth': {
'passwordCredentials': {
'username': self.user_foo['name'],
'password': self.user_foo['password'],
},
'tenantId': tenant_id,
},
})
def _get_token_id(self, r):
"""Helper method to return a token ID from a response.
This needs to be overridden by child classes for on their content type.
"""
raise NotImplementedError()
|
|
#!/usr/bin/env python
# Copyright (c) 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Checks for copyright notices in all the files that need them under the
current directory. Optionally insert them. When inserting, replaces
an MIT or Khronos free use license with Apache 2.
"""
import argparse
import fileinput
import fnmatch
import inspect
import os
import re
import sys
# List of designated copyright owners.
AUTHORS = ['The Khronos Group Inc.',
'LunarG Inc.',
'Google Inc.',
'Google LLC',
'Pierre Moreau',
'Samsung Inc']
CURRENT_YEAR='2019'
YEARS = '(2014-2016|2015-2016|2016|2016-2017|2017|2018|2019)'
COPYRIGHT_RE = re.compile(
'Copyright \(c\) {} ({})'.format(YEARS, '|'.join(AUTHORS)))
MIT_BEGIN_RE = re.compile('Permission is hereby granted, '
'free of charge, to any person obtaining a')
MIT_END_RE = re.compile('MATERIALS OR THE USE OR OTHER DEALINGS IN '
'THE MATERIALS.')
APACHE2_BEGIN_RE = re.compile('Licensed under the Apache License, '
'Version 2.0 \(the "License"\);')
APACHE2_END_RE = re.compile('limitations under the License.')
LICENSED = """Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
LICENSED_LEN = 10 # Number of lines in LICENSED
def find(top, filename_glob, skip_glob_dir_list, skip_glob_files_list):
"""Returns files in the tree rooted at top matching filename_glob but not
in directories matching skip_glob_dir_list nor files matching
skip_glob_dir_list."""
file_list = []
for path, dirs, files in os.walk(top):
for glob in skip_glob_dir_list:
for match in fnmatch.filter(dirs, glob):
dirs.remove(match)
for filename in fnmatch.filter(files, filename_glob):
full_file = os.path.join(path, filename)
if full_file not in skip_glob_files_list:
file_list.append(full_file)
return file_list
def filtered_descendants(glob):
"""Returns glob-matching filenames under the current directory, but skips
some irrelevant paths."""
return find('.', glob, ['third_party', 'external', 'CompilerIdCXX',
'build*', 'out*'], ['./utils/clang-format-diff.py'])
def skip(line):
"""Returns true if line is all whitespace or shebang."""
stripped = line.lstrip()
return stripped == '' or stripped.startswith('#!')
def comment(text, prefix):
"""Returns commented-out text.
Each line of text will be prefixed by prefix and a space character. Any
trailing whitespace will be trimmed.
"""
accum = ['{} {}'.format(prefix, line).rstrip() for line in text.split('\n')]
return '\n'.join(accum)
def insert_copyright(author, glob, comment_prefix):
"""Finds all glob-matching files under the current directory and inserts the
copyright message, and license notice. An MIT license or Khronos free
use license (modified MIT) is replaced with an Apache 2 license.
The copyright message goes into the first non-whitespace, non-shebang line
in a file. The license notice follows it. Both are prefixed on each line
by comment_prefix and a space.
"""
copyright = comment('Copyright (c) {} {}'.format(CURRENT_YEAR, author),
comment_prefix) + '\n\n'
licensed = comment(LICENSED, comment_prefix) + '\n\n'
for file in filtered_descendants(glob):
# Parsing states are:
# 0 Initial: Have not seen a copyright declaration.
# 1 Seen a copyright line and no other interesting lines
# 2 In the middle of an MIT or Khronos free use license
# 9 Exited any of the above
state = 0
update_file = False
for line in fileinput.input(file, inplace=1):
emit = True
if state is 0:
if COPYRIGHT_RE.search(line):
state = 1
elif skip(line):
pass
else:
# Didn't see a copyright. Inject copyright and license.
sys.stdout.write(copyright)
sys.stdout.write(licensed)
# Assume there isn't a previous license notice.
state = 1
elif state is 1:
if MIT_BEGIN_RE.search(line):
state = 2
emit = False
elif APACHE2_BEGIN_RE.search(line):
# Assume an Apache license is preceded by a copyright
# notice. So just emit it like the rest of the file.
state = 9
elif state is 2:
# Replace the MIT license with Apache 2
emit = False
if MIT_END_RE.search(line):
state = 9
sys.stdout.write(licensed)
if emit:
sys.stdout.write(line)
def alert_if_no_copyright(glob, comment_prefix):
"""Prints names of all files missing either a copyright or Apache 2 license.
Finds all glob-matching files under the current directory and checks if they
contain the copyright message and license notice. Prints the names of all the
files that don't meet both criteria.
Returns the total number of file names printed.
"""
printed_count = 0
for file in filtered_descendants(glob):
has_copyright = False
has_apache2 = False
line_num = 0
apache_expected_end = 0
with open(file) as contents:
for line in contents:
line_num += 1
if COPYRIGHT_RE.search(line):
has_copyright = True
if APACHE2_BEGIN_RE.search(line):
apache_expected_end = line_num + LICENSED_LEN
if (line_num is apache_expected_end) and APACHE2_END_RE.search(line):
has_apache2 = True
if not (has_copyright and has_apache2):
message = file
if not has_copyright:
message += ' has no copyright'
if not has_apache2:
message += ' has no Apache 2 license notice'
print(message)
printed_count += 1
return printed_count
class ArgParser(argparse.ArgumentParser):
def __init__(self):
super(ArgParser, self).__init__(
description=inspect.getdoc(sys.modules[__name__]))
self.add_argument('--update', dest='author', action='store',
help='For files missing a copyright notice, insert '
'one for the given author, and add a license '
'notice. The author must be in the AUTHORS '
'list in the script.')
def main():
glob_comment_pairs = [('*.h', '//'), ('*.hpp', '//'), ('*.sh', '#'),
('*.py', '#'), ('*.cpp', '//'),
('CMakeLists.txt', '#')]
argparser = ArgParser()
args = argparser.parse_args()
if args.author:
if args.author not in AUTHORS:
print('error: --update argument must be in the AUTHORS list in '
'check_copyright.py: {}'.format(AUTHORS))
sys.exit(1)
for pair in glob_comment_pairs:
insert_copyright(args.author, *pair)
sys.exit(0)
else:
count = sum([alert_if_no_copyright(*p) for p in glob_comment_pairs])
sys.exit(count > 0)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
# Copyright (c) 2013. Mark E. Madsen <mark@madsenlab.org>
#
# This work is licensed under the terms of the Apache Software License, Version 2.0. See the file LICENSE for details.
import ming
import csv
import logging as log
import argparse
import ctmixtures.data as data
import pytransmission.popgen as pg
import math as m
############################################################################
def setup():
global args, config, simconfig
parser = argparse.ArgumentParser()
parser.add_argument("--experiment", help="provide name for experiment, to be used as prefix for database collections")
parser.add_argument("--debug", help="turn on debugging output")
parser.add_argument("--dbhost", help="database hostname, defaults to localhost", default="localhost")
parser.add_argument("--dbport", help="database port, defaults to 27017", default="27017")
parser.add_argument("--configuration", help="Path to configuration file")
parser.add_argument("--filename", help="path and base filename for exports (DO NOT include *.csv extension)", required=True)
args = parser.parse_args()
if args.debug == 1:
log.basicConfig(level=log.DEBUG, format='%(asctime)s %(levelname)s: %(message)s')
else:
log.basicConfig(level=log.INFO, format='%(asctime)s %(levelname)s: %(message)s')
#### main program ####
log.info("EXPORT DATA TO CSV - Experiment: %s", args.experiment)
data.set_experiment_name(args.experiment)
data.set_database_hostname(args.dbhost)
data.set_database_port(args.dbport)
config = data.getMingConfiguration(data.modules)
ming.configure(**config)
############################################################################
def export_simulation_record():
# ## Export a simulation record file, with all params and classes used, random
### seed, whatever is needed to replicate the simulations
full_filename = ''
full_filename += args.filename
full_filename += "-simulation-data.csv"
sim_fields = data.mixture_model_stats.sim_record_columns_to_export()
ofile = open(full_filename, "wb")
writer = csv.DictWriter(ofile, fieldnames=sim_fields, quotechar='"', quoting=csv.QUOTE_ALL)
headers = dict((n, n) for n in sim_fields)
writer.writerow(headers)
cursor = data.MixtureModelStats.m.find(dict(), dict(timeout=False))
for sample in cursor:
row = dict()
for field in sim_fields:
row[field] = sample[field]
# correct kandler_interval from timesteps to generations
row['kandler_interval'] = int(row['kandler_interval']) / int(row['population_size'])
#log.info("sim data row: %s", row)
writer.writerow(row)
ofile.close()
############################################################################
# # whole population statistics
# slatkin_exact = Field([float])
# shannon_entropy = Field([float])
# iqv_diversity = Field([float])
# num_trait_configurations = Field(int)
# trait_configuration_counts = Field([])
# configuration_slatkin = Field(float)
# unlabeled_frequencies = Field([])
# unlabeled_counts = Field([])
# pop_richness = Field([int])
def export_population_stats():
# ## Export a full population census statistics file ###
full_filename = ''
full_filename += args.filename
full_filename += "-richness-data.csv"
pop_fields = data.mixture_model_stats.pop_columns_to_export()
# adjust the fields for the new summary statistics
pop_fields.append('innovation_rate')
pop_fields.append('locus-richness')
pop_fields.append('locus-slatkin')
ofile = open(full_filename, "wb")
writer = csv.DictWriter(ofile, fieldnames=pop_fields, quotechar='"', quoting=csv.QUOTE_ALL)
headers = dict((n, n) for n in pop_fields)
writer.writerow(headers)
cursor = data.MixtureModelStats.m.find(dict(), dict(timeout=False))
for sample in cursor:
richness_list = sample['pop_richness']
slatkin_list = sample['slatkin_exact']
numloci = int(sample['num_features'])
for i in xrange(0, numloci):
row = dict()
row['simulation_run_id'] = sample['simulation_run_id']
row['model_class_label'] = sample['model_class_label']
row['innovation_rate'] = sample['innovation_rate']
row['locus-richness'] = richness_list[i]
row['locus-slatkin'] = slatkin_list[i]
#log.info("sim data row: %s", row)
writer.writerow(row)
ofile.close()
############################################################################
# # results by sample size
# unlabeled_freq_ssize = Field(schema.Anything)
# unlabeled_counts_ssize = Field(schema.Anything)
# unlabeled_config_counts_ssize = Field(schema.Anything)
# num_configurations_ssize = Field(schema.Anything)
# config_slatkin_ssize = Field(schema.Anything)
# entropy_ssize = Field(schema.Anything)
# iqv_ssize = Field(schema.Anything)
# richness_ssize = Field(schema.Anything)
# slatkin_ssize = Field(schema.Anything)
# kandler_remaining_count = Field([int])
def export_sampled_stats():
## export a file with sampled statistics
full_filename = ''
full_filename += args.filename
full_filename += "-pop-sampled-richness.csv"
ssize_fields = data.mixture_model_stats.ssize_columns_to_export()
# adjust the fields for the new summary statistics
ssize_fields.append('innovation_rate')
ssize_fields.append('pop_richness')
ssize_fields.append('ssize_25')
ssize_fields.append('expected_25')
ssize_fields.append('sd_25')
ssize_fields.append('ssize_50')
ssize_fields.append('expected_50')
ssize_fields.append('sd_50')
ofile = open(full_filename, "wb")
writer = csv.DictWriter(ofile, fieldnames=ssize_fields, quotechar='"', quoting=csv.QUOTE_ALL)
headers = dict((n, n) for n in ssize_fields)
writer.writerow(headers)
cursor = data.MixtureModelStats.m.find(dict(), dict(timeout=False))
for sample in cursor:
richness_map = sample['richness_ssize']
pop_richness_list = sample['pop_richness']
slatkin_list = sample['slatkin_exact']
numloci = int(sample['num_features'])
rich25 = richness_map['25']
rich50 = richness_map['50']
rich100 = richness_map['100']
for i in xrange(0, numloci):
row = dict()
row['simulation_run_id'] = sample['simulation_run_id']
row['model_class_label'] = sample['model_class_label']
row['innovation_rate'] = sample['innovation_rate']
row['pop_richness'] = pop_richness_list[i]
res25 = pg.moran_expected_traits_at_locus(float(sample['innovation_rate']), 25)
res50 = pg.moran_expected_traits_at_locus(float(sample['innovation_rate']), 50)
row['ssize_25'] = rich25[i]
row['expected_25'] = res25[0]
row['sd_25'] = m.sqrt(res25[1])
row['ssize_50'] = rich50[i]
row['expected_50'] = res50[0]
row['sd_50'] = m.sqrt(res50[1])
#log.info("sim data row: %s", row)
writer.writerow(row)
ofile.close()
############################################################################
# # results for TA intervals over all sample sizes
# unlabeled_freq_ta_ssize = Field(schema.Anything)
# richness_ta_ssize = Field(schema.Anything)
# slatkin_ta_ssize = Field(schema.Anything)
# entropy_ta_ssize = Field(schema.Anything)
# iqv_ta_ssize = Field(schema.Anything)
# unlabeled_config_counts_ta_ssize = Field(schema.Anything)
# num_configurations_ta_ssize = Field(schema.Anything)
# config_slatkin_ta_ssize = Field(schema.Anything)
# config_entropy_ta_ssize = Field(schema.Anything)
# config_iqv_ta_ssize = Field(schema.Anything)
# kandler_remaining_tassize = Field(schema.Anything)
def export_ta_sampled_stats():
## export a file with sampled statistics
full_filename = ''
full_filename += args.filename
full_filename += "-tasampled-data.csv"
sim_fields = data.mixture_model_stats.tassize_columns_to_export()
ofile = open(full_filename, "wb")
writer = csv.DictWriter(ofile, fieldnames=sim_fields, quotechar='"', quoting=csv.QUOTE_ALL)
headers = dict((n, n) for n in sim_fields)
writer.writerow(headers)
cursor = data.MixtureModelStats.m.find(dict(), dict(timeout=False))
for sample in cursor:
pass
ofile.close()
############################################################################
if __name__ == "__main__":
setup()
export_sampled_stats()
|
|
# Copyright 2016,2017 by MPI-SWS and Data-Ken Research.
# Licensed under the Apache 2.0 License.
"""
Base functionality for ThingFlow. All the core abstractions
are defined here. Everything else is just subclassing or using
these abstractions.
The key abstractions are:
* Thing - a unit of computation in the data flow graph. Things can be
Filters (with inputs and outputs) or Adapters (with only inputs
or only outputs).
* OutputThing - Base class and interface for things that emit event streams on
output ports.
* Sensor - an object that is (indirectly) connected to the physical world.
It can provide its current value through a sample() method.
Sensors can be turned into Things by wrapping them with
the SensorAsInputThing class.
* InputThing - interface for things that receive a stream of events on one or
more input ports.
* Filter - a thing that is both an InputThing and an OutputThing, with one
input and one output. Filters transform data streams.
* Scheduler - The scheduler wraps an event loop. It provides periodic and
one-time scheduling of OutputThings that originate events.
* event - ThingFlow largely does not care about the particulars of the
events it processes. However, we define a generic SensorEvent
datatype that can be used when the details of the event matter
to a thing.
See the README.rst file for more details.
"""
from collections import namedtuple
import threading
import time
import queue
import traceback as tb
import logging
logger = logging.getLogger(__name__)
from thingflow.internal import noop
class InputThing:
"""This is the interface for the default input
port of a Thing. Other (named) input ports will
define similar methods with the names as
on_PORT_next(), on_PORT_error(), and
on_PORT_completed().
"""
def on_next(self, x):
pass
def on_error(self, e):
pass
def on_completed(self):
pass
def _on_next_name(port):
if port==None or port=='default':
return 'on_next'
else:
return 'on_%s_next' % port
def _on_error_name(port):
if port==None or port=='default':
return 'on_error'
else:
return 'on_%s_error' % port
def _on_completed_name(port):
if port==None or port=='default':
return 'on_completed'
else:
return 'on_%s_completed' % port
class CallableAsInputThing:
"""Wrap any callable with the InputThing interface.
We only pass it the on_next() calls. on_error and on_completed
can be passed in or default to noops.
"""
def __init__(self, on_next=None, on_error=None, on_completed=None,
port=None):
setattr(self, _on_next_name(port), on_next or noop)
if on_error:
setattr(self, _on_error_name(port), on_error)
else:
def default_error(err):
if isinstance(err, FatalError):
raise err.with_traceback(err.__traceback__)
else:
logger.error("%s: Received on_error(%s)" %
(self, err))
setattr(self, _on_error_name(port), default_error)
setattr(self, _on_completed_name(port), on_completed or noop)
def __str__(self):
return 'CallableAsInputThing(%s)' % str(self.on_next)
def __repr__(self):
return 'CallableAsInputThing(on_next=%s, on_error=%s, on_completed=%s)' % \
(repr(self.on_next), repr(self.on_error), repr(self.on_completed))
class FatalError(Exception):
"""This is the base class for exceptions that should terminate the event
loop. This should be for out-of-bound errors, not for normal errors in
the data stream. Examples of out-of-bound errors include an exception
in the infrastructure or an error in configuring or dispatching an event
stream (e.g. publishing to a non-existant port).
"""
pass
class InvalidPortError(FatalError):
pass
class UnknownPortError(FatalError):
pass
class PortAlreadyClosed(FatalError):
pass
class ExcInDispatch(FatalError):
"""Dispatching an event should not raise an error, other than a
fatal error.
"""
pass
# Internal representation of a connection. The first three fields
# are functions which dispatch to the InputThing. The InputThing and input_port
# fields are not needed at runtime, but helpful in debugging.
# We use a class with slots instead of a named tuple because we want to
# change the values of the on_next, etc. functions when tracing (tuples
# are read-only. The attribute access of a named tuple by name is no
# faster than slots. If we need to spead this up at some point, use a
# named tuple but access via the index values (at a cost to readability
# of the code).
class _Connection:
__slots__ = ('on_next', 'on_completed', 'on_error', 'input_thing',
'input_port')
def __init__(self, on_next, on_completed, on_error, input_thing,
input_port):
self.on_next = on_next
self.on_completed = on_completed
self.on_error = on_error
self.input_thing = input_thing
self.input_port = input_port
def __repr__(self):
return '_Connection(%s,%s,%s,%s,%s)' % \
(repr(self.on_next), repr(self.on_completed), repr(self.on_error),
repr(self.input_thing), repr(self.input_port))
def __str__(self):
return '_Connection(%s,%s)' % \
(str(self.input_thing), str(self.input_port))
class OutputThing:
"""Base class for event generators (output things). The non-underscore
methods are the public end-user interface. The methods starting with
underscores are for interactions with the scheduler.
"""
def __init__(self, ports=None):
self.__connections__ = {} # map from port to InputThing set
if ports is None:
self.__ports__ = set(['default',])
else:
self.__ports__ = set(ports)
for port in self.__ports__:
self.__connections__[port] = []
self.__enqueue_fn__ = None
self.__closed_ports__ = []
def connect(self, input_thing, port_mapping=None):
"""Connect the InputThing to events on a specific port. The port
mapping is a tuple of the OutputThing's port name and InputThing's port
name. It defaults to (default, default).
This returns a fuction that can be called to remove the connection.
"""
if port_mapping==None:
output_port = 'default'
input_port = 'default'
else:
(output_port, input_port) = port_mapping
if output_port not in self.__ports__:
raise InvalidPortError("Invalid publish port '%s', valid ports are %s" %
(output_port,
', '.join([str(s) for s in self.__ports__])))
if not hasattr(input_thing, _on_next_name(input_port)) and callable(input_thing):
input_thing = CallableAsInputThing(input_thing, port=input_port)
try:
connection = \
_Connection(on_next=getattr(input_thing, _on_next_name(input_port)),
on_completed=getattr(input_thing, _on_completed_name(input_port)),
on_error=getattr(input_thing, _on_error_name(input_port)),
input_thing=input_thing,
input_port=input_port)
except AttributeError:
raise InvalidPortError("Invalid input port '%s', missing method(s) on InputThing %s" %
(input_port, input_thing))
new_connections = self.__connections__[output_port].copy()
new_connections.append(connection)
self.__connections__[output_port] = new_connections
def disconnect():
# To remove the connection, we replace the entire list with a copy
# that is missing the connection. This allows disconnect() to be
# called within a _dispatch method. Otherwise, we get an error if
# we attempt to change the list of connections while iterating over
# it.
new_connections = self.__connections__[output_port].copy()
#new_connections.remove(connection)
# we look for a connection to the same port and thing rather than
# the same object - the object may have changed due to tracing
found = False
for c in self.__connections__[output_port]:
if c.input_thing==input_thing and c.input_port==input_port:
new_connections.remove(c)
found = True
break
assert found
self.__connections__[output_port] = new_connections
return disconnect
def _has_connections(self):
"""Used by the scheduler to see the thing has any more outgoing connections.
If a scheduled thing no longer has output connections, it is descheduled.
"""
for (port, conns) in self.__connections__.items():
if len(conns)>0:
return True
return False
def _schedule(self, enqueue_fn):
"""This method is used by the scheduler to specify an enqueue function
to be called
when dispatching events to the connections. This is used when the
OutputThing runs in a separate thread from the main event loop. If
that is not the case, the enqueue function should be None.
"""
self.__enqueue_fn__ = enqueue_fn
def _close_port(self, port):
"""Port will receive no more messages. Remove the port from
this OutputThing.
"""
#print("Closing port %s on %s" % (port, self)) # XXX
del self.__connections__[port]
self.__ports__.remove(port)
self.__closed_ports__.append(port)
def _dispatch_next(self, x, port=None):
#print("Dispatch next called on %s, port %s, msg %s" % (self, port, str(x)))
if port==None:
port = 'default'
try:
connections = self.__connections__[port]
except KeyError as e:
if port in self.__closed_ports__:
raise PortAlreadyClosed("Port '%s' on OutputThing %s already had an on_completed or on_error_event" %
(port, self))
else:
raise UnknownPortError("Unknown port '%s' in OutputThing %s" %
(port, self)) from e
if len(connections) == 0:
return
enq = self.__enqueue_fn__
if enq:
for s in connections:
enq(s.on_next, x)
else:
try:
for s in connections:
s.on_next(x)
except FatalError:
raise
except Exception as e:
raise ExcInDispatch("Unexpected exception when dispatching event '%s' to InputThing %s from OutputThing %s" %
(repr(x), s.input_thing, self)) from e
def _dispatch_completed(self, port=None):
if port==None:
port = 'default'
try:
connections = self.__connections__[port]
except KeyError as e:
if port in self.__closed_ports__:
raise PortAlreadyClosed("Port '%s' on OutputThing %s already had an on_completed or on_error_event" %
(port, self))
else:
raise UnknownPortError("Unknown port '%s' in OutputThing %s" % (port, self)) from e
enq = self.__enqueue_fn__
if enq:
for s in connections:
enq(s.on_completed)
else:
try:
for s in connections:
s.on_completed()
except FatalError:
raise
except Exception as e:
raise ExcInDispatch("Unexpected exception when dispatching completed to InputThing %s from OutputThing %s" %
(s.input_thing, self)) from e
self._close_port(port)
def _dispatch_error(self, e, port=None):
if port==None:
port = 'default'
try:
connections = self.__connections__[port]
except KeyError as e:
if port in self.__closed_ports__:
raise PortAlreadyClosed("Port '%s' on OutputThing %s already had an on_completed or on_error_event" %
(port, self))
else:
raise UnknownPortError("Unknown port '%s' in OutputThing %s" % (port, self)) from e
enq = self.__enqueue_fn__
if enq:
for s in connections:
enq(s.on_error, e)
else:
try:
for s in connections:
s.on_error(e)
except FatalError:
raise
except Exception as e:
raise ExcInDispatch("Unexpected exception when dispatching error '%s' to InputThing %s from OutputThing %s" %
(repr(e), s.input_thing, self)) from e
self._close_port(port)
def print_downstream(self):
"""Recursively print all the downstream paths. This is for debugging.
"""
def has_connections(thing):
if not hasattr(thing, '__connections__'):
return False
return thing._has_connections()
def print_from(current_seq, thing):
if has_connections(thing):
for (port, connections) in thing.__connections__.items():
for connection in connections:
if port=='default' and \
connection.input_port=='default':
next_seq = " => %s" % connection.input_thing
else:
next_seq = " [%s]=>[%s] %s" % \
(port, connection.input_port,
connection.input_thing)
print_from(current_seq + next_seq,
connection.input_thing)
else:
print(current_seq)
print("***** Dump of all paths from %s *****" % self.__str__())
print_from(" " + self.__str__(), self)
print("*"*(12+len(self.__str__())))
def trace_downstream(self):
"""Install wrappers that print a trace message for each
event on this thing and all downsteam things.
"""
def has_connections(thing):
if not hasattr(thing, '__connections__'):
return False
return thing._has_connections()
def fmt(thing, port):
return '%s.%s' % (str(thing), port) if port!='default' \
else str(thing)
def trace_on_next(thing, output_port, connection, x):
print(" %s => (%s) => %s" %
(fmt(thing, output_port), str(x),
fmt(connection.input_thing,
connection.input_port)))
connection.on_next(x)
def trace_on_error(thing, output_port, connection, error):
print(" %s => on_error(%s) => %s" %
(fmt(thing, output_port), str(error),
fmt(connection.input_thing,
connection.input_port)))
connection.on_error(error)
def trace_on_completed(thing, output_port, connection):
print(" %s => on_completed => %s" %
(fmt(thing, output_port),
fmt(connection.input_thing,
connection.input_port)))
connection.on_completed()
def make_trace_connection(src_thing, output_port, old_connection):
return _Connection(
on_next=lambda x: trace_on_next(src_thing, output_port,
old_connection, x),
on_error=lambda e: trace_on_error(src_thing, output_port,
old_connection, e),
on_completed=lambda : trace_on_completed(src_thing,
output_port,
old_connection),
input_thing=old_connection.input_thing,
input_port=old_connection.input_port)
def trace_from(thing):
if has_connections(thing):
new_connections = {}
for (port, connections) in thing.__connections__.items():
connections_for_port = []
for connection in connections:
trace_from(connection.input_thing)
connections_for_port.append(make_trace_connection(thing,
port,
connection))
new_connections[port] = connections_for_port
thing.__connections__ = new_connections
trace_from(self)
print("***** installed tracing in all paths starting from %s" %
str(self))
def pp_connections(self):
"""pretty print the set of connections"""
h1 = "***** InputThings for %s *****" % self
print(h1)
for port in sorted(self.__connections__.keys()):
print(" Port %s" % port)
for s in self.__connections__[port]:
print(" [%s] => %s" % (s.input_port, s.input_thing))
print(" on_next: %s" % s.on_next)
print(" on_completed: %s" % s.on_completed)
print(" on_error: %s" % s.on_error)
print("*"*len(h1))
def __str__(self):
return self.__class__.__name__ + '()'
class Filter(OutputThing, InputThing):
"""A filter has a default input port and a default output port. It is
used for data transformations. The default implementations of on_next(),
on_completed(), and on_error() just pass the event on to the downstream
connection.
"""
def __init__(self, previous_in_chain):
super().__init__()
# connect to the previous filter
self.disconnect_from_upstream = previous_in_chain.connect(self)
def on_next(self, x):
self._dispatch_next(x)
def on_error(self, e):
self._dispatch_error(e)
def on_completed(self):
self._dispatch_completed()
def __str__(self):
return self.__class__.__name__ + '()'
class XformOrDropFilter(Filter):
"""Implements a slightly more complex filter protocol where events may be
transformed or dropped. Subclasses just need to implement the _filter() and
_complete() methods.
"""
def __init__(self, previous_in_chain):
super().__init__(previous_in_chain)
def on_next(self, x):
"""Calls _filter(x) to process
the event. If _filter() returns None, nothing futher is done. Otherwise,
the return value is passed to the downstream connection. This allows you
to both transform as well as send only selected events.
Errors other than FatalError are handled gracefully by calling
self.on_error() and then disconnecing from the upstream OutputThing.
"""
try:
x_prime = self._filter(x)
except FatalError:
raise
except Exception as e:
logger.exception("Got an exception on %s._filter(%s)" %
(self, x))
self.on_error(e)
self.disconnect_from_upstream()
else:
if x_prime is not None:
self._dispatch_next(x_prime)
def _filter(self, x):
"""Filtering method to be implemented by subclasses.
"""
return x
def _complete(self):
"""Method to be overridden by subclasses. It is called as a part of
on_error() and on_completed() to give a chance to pass down a held-back
event. Return None if there is no such event.
You can also clean up any state in this method (e.g. close connections).
Shold not throw any exceptions other than FatalError.
"""
return None
def on_error(self, e):
"""Passes on any final event and then passes the notification to the
next Thing.
If you need to clean up any state, do it in _complete().
"""
x = self._complete()
if x is not None:
self._dispatch_next(x)
self._dispatch_error(e)
def on_completed(self):
"""Passes on any final event and then passes the notification to the
next Thing.
If you need to clean up any state, do it in _complete().
"""
x = self._complete()
if x is not None:
self._dispatch_next(x)
self._dispatch_completed()
class FunctionFilter(Filter):
"""Implement a filter by providing functions that implement the
on_next, on_completed, and one_error logic. This is useful
when the logic is really simple or when a more functional programming
style is more convenient.
Each function takes a "self" parameter, so it works almost like it was
defined as a bound method. The signatures are then::
on_next(self, x)
on_completed(self)
on_error(self, e)
If a function is not provided to __init__, we just dispatch the call downstream.
"""
def __init__(self, previous_in_chain,
on_next=None, on_completed=None,
on_error=None, name=None):
"""name is an option name to be used in __str__() calls.
"""
super().__init__(previous_in_chain)
self._on_next = on_next
self._on_error = on_error
self._on_completed = on_completed
if name:
self.name = name
def on_next(self, x):
try:
if self._on_next:
# we pass in an extra "self" since this is a function, not a method
self._on_next(self, x)
else:
self._dispatch_next(x)
except FatalError:
raise
except Exception as e:
logger.exception("Got an exception on %s.on_next(%s)" %
(self, x))
self.on_error(e)
self.disconnect_from_upstream() # stop from getting upstream events
def on_error(self, e):
if self._on_error:
self._on_error(self, e)
else:
self._dispatch_error(e)
def on_completed(self):
if self._on_completed:
self._on_completed(self)
else:
self._dispatch_completed()
def __str__(self):
if hasattr(self, 'name'):
return self.name
else:
return self.__class__.__name__ + '()'
def _is_thunk(t):
return hasattr(t, '__thunk__')
def _make_thunk(t):
setattr(t, '__thunk__', True)
class _ThunkBuilder:
"""This is used to create a thunk from a linq-style
method.
"""
def __init__(self, func):
self.func = func
self.__name__ = func.__name__
def __call__(self, *args, **kwargs):
if len(args)==0 and len(kwargs)==0:
_make_thunk(self.func)
return self.func
def apply(this):
return self.func(this, *args, **kwargs)
apply.__name__ = self.__name__
_make_thunk(apply)
return apply
def __repr__(self):
return "_ThunkBuilder(%s)" % self.__name__
def _connect_thunk(prev, thunk):
"""Connect the thunk to the previous in the chain. Handles
all the cases where we might be given a filter, a thunk,
a thunk builder (unevaluated linq function), or a bare callable."""
if callable(thunk):
if _is_thunk(thunk):
return thunk(prev)
elif isinstance(thunk, _ThunkBuilder):
real_thunk = thunk()
assert _is_thunk(real_thunk)
return real_thunk(prev)
else: # bare callable, will be wrapped by the connect() method
prev.connect(thunk)
return None
else:
return prev.connect(thunk) # assumed to be a filter
def filtermethod(base, alias=None):
"""Function decorator that creates a linq-style filter out of the
specified function. As described in the thingflow.linq documentation,
it should take a OutputThing as its first argument (the source of events)
and return a OutputThing (representing the end the filter sequence once
the filter is included. The returned OutputThing is typically an instance
of thingflow.base.Filter.
The specified function is used in two places:
1. A method with the specified name is added to the specified class
(usually the OutputThing base class). This is for the fluent (method
chaining) API.
2. A function is created in the local namespace for use in the functional API.
This function does not take the OutputThing as an argument. Instead,
it takes the remaining arguments and then returns a function which,
when passed a OutputThing, connects to it and returns a filter.
Decorator arguments:
* param T base: Base class to extend with method
(usually thingflow.base.OutputThing)
* param string alias: an alias for this function or list of aliases
(e.g. map for select, etc.).
* returns: A function that takes the class to be decorated.
* rtype: func -> func
This was adapted from the RxPy extensionmethod decorator.
"""
def inner(func):
"""This function is returned by the outer filtermethod()
:param types.FunctionType func: Function to be decorated
"""
func_names = [func.__name__,]
if alias:
aliases = alias if isinstance(alias, list) else [alias]
func_names += aliases
_thunk = _ThunkBuilder(func)
# For the primary name and all aliases, set the name on the
# base class as well as in the local namespace.
for func_name in func_names:
setattr(base, func_name, func)
func.__globals__[func_name] = _thunk
return _thunk
return inner
class DirectOutputThingMixin:
"""This is the interface for OutputThings that should be directly
scheduled by the scheduler (e.g. through schedule_recurring(),
schedule_periodic(), or schedule_periodic_on_separate_thread).
"""
def _observe(self):
"""Get an event and call the appropriate dispatch function.
"""
raise NotImplemented
class EventLoopOutputThingMixin:
"""OutputThing that gets messages from an event loop, either the same
loop as the scheduler or a separate one.
"""
def _observe_event_loop(self):
"""Call the event OutputThing's event loop. When
an event occurs, the appropriate _dispatch method should
be called.
"""
raise NotImplemented
def _stop_loop(self):
"""When this method is called, the OutputThing should exit the
event loop as soon as possible.
"""
raise NotImplemented
class IterableAsOutputThing(OutputThing, DirectOutputThingMixin):
"""Convert any interable to an OutputThing. This can be
used with the schedule_recurring() and schedule_periodic()
methods of the scheduler.
"""
def __init__(self, iterable, name=None):
super().__init__()
self.iterable = iterable
self.name = name
def _observe(self):
try:
event = self.iterable.__next__()
except StopIteration:
self._close()
self._dispatch_completed()
except FatalError:
self._close()
raise
except Exception as e:
# If the iterable throws an exception, we treat it as non-fatal.
# The error is dispatched downstream and the connection closed.
# If other sensors are running, things will continue.
tb.print_exc()
self._close()
self._dispatch_error(e)
else:
self._dispatch_next(event)
def _close(self):
"""This method is called when we stop the iteration, either due to
reaching the end of the sequence or an error. It can be overridden by
subclasses to clean up any state and release resources (e.g. closing
open files/connections).
"""
pass
def __str__(self):
if hasattr(self, 'name') and self.name:
return self.name
else:
return super().__str__()
def from_iterable(i):
return IterableAsOutputThing(i)
def from_list(l):
return IterableAsOutputThing(iter(l))
# XXX Move this out of base.py
class FunctionIteratorAsOutputThing(OutputThing, DirectOutputThingMixin):
"""Generates an OutputThing sequence by running a state-driven loop
producing the sequence's elements. Example::
res = GenerateOutputThing(0,
lambda x: x < 10,
lambda x: x + 1,
lambda x: x)
initial_state: Initial state.
condition: Condition to terminate generation (upon returning False).
iterate: Iteration step function.
result_selector: Selector function for results produced in the sequence.
Returns the generated sequence.
"""
def __init__(self, initial_state, condition, iterate, result_selector):
super().__init__()
self.value = initial_state
self.condition = condition
self.iterate = iterate
self.result_selector = result_selector
self.first = True
def _observe(self):
try:
if self.first: # first time: just send the value
self.first = False
if self.condition(self.value):
r = self.result_selector(self.value)
self._dispatch_next(r)
else:
self._dispatch_completed()
else:
if self.condition(self.value):
self.value = self.iterate(self.value)
r = self.result_selector(self.value)
self._dispatch_next(r)
else:
self._dispatch_completed()
except Exception as e:
self._dispatch_error(e)
def from_func(init, cond, iter, selector):
return FunctionIteratorAsOutputThing(init, cond, iter, selector)
# Define a default sensor event as a tuple of sensor id, timestamp, and value.
SensorEvent = namedtuple('SensorEvent', ['sensor_id', 'ts', 'val'])
def make_sensor_event(sensor, sample):
"""Given a sensor object and a sample taken from that sensor,
return a SensorEvent tuple."""
return SensorEvent(sensor_id=sensor.sensor_id, ts=time.time(),
val=sample)
class SensorAsOutputThing(OutputThing, DirectOutputThingMixin):
"""OutputThing that samples a sensor upon its observe call, creates
an event from the sample, and dispatches it forward. A sensor is just
an object that has a sensor_id property and a sample() method. If the
sensor wants to complete the stream, it should throw a StopIteration
exception.
By default, it generates SensorEvent instances. This behavior can be
changed by passing in a different function for make_event_fn.
"""
def __init__(self, sensor, make_event_fn=make_sensor_event):
super().__init__()
self.sensor = sensor
self.make_event_fn = make_event_fn
def _observe(self):
try:
self._dispatch_next(self.make_event_fn(self.sensor,
self.sensor.sample()))
except FatalError:
raise
except StopIteration:
self._dispatch_completed()
except Exception as e:
self._dispatch_error(e)
def __repr__(self):
return 'SensorAsOutputThing(%s)' % repr(self.sensor)
class BlockingInputThing:
"""This implements a InputThing which may potential block when sending an
event outside the system. The InputThing is run on a separate thread. We
create proxy methods for each port that can be called directly - these
methods just queue up the call to run in the worker thread.
The actual implementation of the InputThing goes in the _on_next,
_on_completed, and _on_error methods. Note that we don't dispatch to separate
methods for each port. This is because the port is likely to end up as
just a message field rather than as a separate destination in the lower
layers.
"""
def __init__(self, scheduler, ports=None):
if ports==None:
self.ports = ['default',]
else:
self.ports = ports
self.num_closed_ports = 0
# create local proxy methods for each port
for port in self.ports:
setattr(self, _on_next_name(port),
lambda x: self.__queue__.put((self._on_next, False,
[port, x]),))
setattr(self, _on_completed_name(port),
lambda: self.__queue__.put((self._on_completed, True,
[port]),))
setattr(self, _on_error_name(port),
lambda e: self.__queue__.put((self._on_error, True,
[port, e]),))
self.__queue__ = queue.Queue()
self.scheduler = scheduler
self.thread = _ThreadForBlockingInputThing(self, scheduler)
self.scheduler.active_schedules[self] = self.request_stop
def start():
self.thread.start()
self.scheduler.event_loop.call_soon(start)
def request_stop(self):
"""This can be called to stop the thread before it is automatically
stopped when all ports are closed. The close() method will be
called and the InputThing cannot be restarted later.
"""
if self.thread==None:
return # no thread to stop
self.__queue__.put(None) # special stop token
def _wait_and_dispatch(self):
"""Called by main loop of blocking thread to block for a request
and then dispatch it. Returns True if it processed a normal request
and False if it got a stop message or there is no more events possible.
"""
action = self.__queue__.get()
if action is not None:
(method, closing_port, args) = action
method(*args)
if closing_port:
self.num_closed_ports += 1
if self.num_closed_ports==len(self.ports):
# no more ports can receive events, treat this
# as a stop.
print("Stopping blocking InputThing %s" % self)
return False
return True # more work possible
else:
return False # stop requested
def _on_next(self, port, x):
"""Process the on_next event. Called in blocking thread."""
pass
def _on_completed(self, port):
"""Process the on_completed event. Called in blocking thread."""
pass
def _on_error(self, port, e):
"""Process the on_error event. Called in blocking thread."""
pass
def _close(self):
"""This is called when all ports have been closed. This can be used
to close any connections, etc.
"""
pass
class _ThreadForBlockingInputThing(threading.Thread):
"""Background thread for a InputThing that passes events to the
external world and might block.
"""
def __init__(self, input_thing, scheduler):
self.input_thing = input_thing
self.scheduler= scheduler
self.stop_requested = False
super().__init__()
def run(self):
try:
more = True
while more:
more = self.input_thing._wait_and_dispatch()
except Exception as e:
msg = "_wait_and_dispatch for %s exited with error: %s" % \
(self.input_thing, e)
logger.exception(msg)
self.input_thing._close()
self.input_thing.thread = None # disassociate this thread
def die(): # need to stop the scheduler in the main loop
del self.scheduler.active_schedules[self.input_thing]
raise ScheduleError(msg) from e
self.scheduler.event_loop.call_soon_threadsafe(die)
else:
self.input_thing._close()
self.input_thing.thread = None # disassociate this thread
def done():
self.scheduler._remove_from_active_schedules(self.input_thing)
self.scheduler.event_loop.call_soon_threadsafe(done)
class _ThreadForBlockingOutputThing(threading.Thread):
"""Background thread for OutputThings that might block.
"""
def __init__(self, output_thing, interval, scheduler):
self.output_thing = output_thing
self.interval = interval
self.scheduler = scheduler
self.stop_requested = False
super().__init__()
def _stop_loop(self):
self.stop_requested = True
def run(self):
def enqueue_fn(fn, *args):
self.scheduler.event_loop.call_soon_threadsafe(fn, *args)
self.output_thing._schedule(enqueue_fn=enqueue_fn)
try:
while True:
if self.stop_requested:
break
start = time.time()
self.output_thing._observe()
if self.output_thing._has_connections():
break
time_left = self.interval - (time.time() - start)
if time_left > 0 and (not self.stop_requested):
time.sleep(time_left)
except Exception as e:
msg = "_observe for %s exited with error" % self.output_thing
logger.exception(msg)
def die(): # need to stop the scheduler in the main loop
del self.scheduler.active_schedules[self.output_thing]
raise ScheduleError(msg) from e
self.scheduler.event_loop.call_soon_threadsafe(die)
else:
def done():
self.scheduler._remove_from_active_schedules(self.output_thing)
self.scheduler.event_loop.call_soon_threadsafe(done)
class ScheduleError(FatalError):
pass
class Scheduler:
"""Wrap an asyncio event loop and provide methods for various kinds of
periodic scheduling.
"""
def __init__(self, event_loop):
self.event_loop = event_loop
self.active_schedules = {} # mapping from task to schedule handle
self.pending_futures = {}
self.next_future_id = 1
# Set the following to an exception if we are exiting the loop due to
# an exception. We will then raise a SchedulerError when the event loop
# exits.
self.fatal_error = None
# we set the exception handler to stop all active schedules and
# break out of the event loop if we get an unexpected error.
def exception_handler(loop, context):
assert loop==self.event_loop
self.fatal_error = context['exception']
self.stop()
self.event_loop.set_exception_handler(exception_handler)
def _remove_from_active_schedules(self, output_thing):
"""Remove the specified OutputThing from the active_schedules map.
If there are no more active schedules, we will request exiting of
the event loop. This method must be run from the main thread.
"""
del self.active_schedules[output_thing]
if len(self.active_schedules)==0:
print("No more active schedules, will exit event loop")
self.stop()
def schedule_periodic(self, output_thing, interval):
"""Returns a callable that can be used to remove the OutputThing from the
scheduler.
"""
def cancel():
try:
handle = self.active_schedules[output_thing]
except KeyError:
raise ScheduleError("Attempt to de-schedule OutputThing %s, which does not have an active schedule" %
output_thing)
handle.cancel()
self._remove_from_active_schedules(output_thing)
def run():
assert output_thing in self.active_schedules
output_thing._observe()
more = output_thing._has_connections()
if not more and output_thing in self.active_schedules:
self._remove_from_active_schedules(output_thing)
elif output_thing in self.active_schedules:
handle = self.event_loop.call_later(interval, run)
self.active_schedules[output_thing] = handle
output_thing._schedule(enqueue_fn=None)
handle = self.event_loop.call_later(interval, run)
self.active_schedules[output_thing] = handle
output_thing._schedule(enqueue_fn=None)
return cancel
def schedule_sensor(self, sensor, interval, *input_thing_sequence,
make_event_fn=make_sensor_event,
print_downstream=False):
"""Create a OutputThing wrapper for the sensor and schedule it at the
specified interval. Compose the specified connections (and/or thunks)
into a sequence and connect the sequence to the sensor's OutputThing.
Returns a thunk that can be used to remove the OutputThing from the
scheduler.
"""
output_thing = SensorAsOutputThing(sensor, make_event_fn=make_event_fn)
prev = output_thing
for s in input_thing_sequence:
assert prev,\
"attempted to compose a terminal InputThing/thunk in a non-final position"
prev = _connect_thunk(prev, s)
if print_downstream:
output_thing.print_downstream() # just for debugging
return self.schedule_periodic(output_thing, interval)
def schedule_recurring(self, output_thing):
"""Takes a DirectOutputThingMixin and calls _observe() to get events. If,
after the call, there are no downstream connections, the scheduler will
deschedule the output thing.
This variant is useful for something like an iterable. If the call to get
the next event would block, don't use this! Instead, one of the calls
that runs in a separate thread (e.g. schedule_recuring_separate_thread()
or schedule_periodic_separate_thread()).
Returns a callable that can be used to remove the OutputThing from the
scheduler.
"""
def cancel():
print("canceling schedule of %s" % output_thing)
try:
handle = self.active_schedules[output_thing]
except KeyError:
raise ScheduleError("Attempt to de-schedule OutputThing %s, which does not have an active schedule" %
output_thing)
handle.cancel()
self._remove_from_active_schedules(output_thing)
def run():
assert output_thing in self.active_schedules
output_thing._observe()
more = output_thing._has_connections()
if not more and output_thing in self.active_schedules:
self._remove_from_active_schedules(output_thing)
elif output_thing in self.active_schedules:
handle = self.event_loop.call_soon(run)
self.active_schedules[output_thing] = handle
output_thing._schedule(enqueue_fn=None)
handle = self.event_loop.call_soon(run)
self.active_schedules[output_thing] = handle
output_thing._schedule(enqueue_fn=None)
return cancel
def schedule_on_main_event_loop(self, output_thing):
"""Schedule an OutputThing that runs on the main event loop.
The OutputThing is assumed to implement EventLoopOutputThingMixin.
Returns a callable that can be used to unschedule the OutputThing.
"""
def stop():
# tell the OutputThing to stop. When the OutputThing has finished
# processing any messages, it MUST call
# _remove_from_active_schedules() on the scheduler.
output_thing._stop_loop()
self.active_schedules[output_thing] = stop
self.event_loop.call_soon(output_thing._observe_event_loop)
return stop
def schedule_on_private_event_loop(self, output_thing):
"""Schedule an OutputThing that has its own event loop on another thread.
The OutputThing is assumed to implement EventLoopOutputThingMixin.
Returns a callable that can be used to unschedule the OutputThing, by
requesting that the event loop stop.
"""
def enqueue_fn(fn, *args):
self.event_loop.call_soon_threadsafe(fn, *args)
def thread_main():
try:
output_thing._schedule(enqueue_fn=enqueue_fn)
# ok, lets run the event loop
output_thing._observe_event_loop()
except Exception as e:
msg = "Event loop for %s exited with error" % output_thing
logger.exception(msg)
def die(): # need to stop the scheduler in the main loop
del self.active_schedules[output_thing]
raise ScheduleError(msg) from e
self.event_loop.call_soon_threadsafe(die)
else:
def loop_done():
self._remove_from_active_schedules(output_thing)
self.event_loop.call_soon_threadsafe(loop_done)
t = threading.Thread(target=thread_main)
self.active_schedules[output_thing] = output_thing._stop_loop
self.event_loop.call_soon(t.start)
return output_thing._stop_loop
def schedule_periodic_on_separate_thread(self, output_thing, interval):
"""Schedule an OutputThing to run in a separate thread. It should
implement the DirectOutputThingMixin.
Returns a callable that can be used to unschedule the OutputThing, by
requesting that the child thread stop.
"""
t = _ThreadForBlockingOutputThing(output_thing, interval, self)
self.active_schedules[output_thing] = t._stop_loop
self.event_loop.call_soon(t.start)
return t._stop_loop
def schedule_sensor_on_separate_thread(self, sensor, interval, *input_thing_sequence,
make_event_fn=make_sensor_event):
"""Create a OutputThing wrapper for the sensor and schedule it at the
specified interval. Compose the specified connections (and/or thunks)
into a sequence and connect the sequence to the sensor's OutputThing.
Returns a thunk that can be used to remove the OutputThing from the
scheduler.
"""
output_thing = SensorAsOutputThing(sensor, make_event_fn=make_event_fn)
prev = output_thing
for s in input_thing_sequence:
assert prev,\
"attempted to compose a terminal InputThing/thunk in a non-final position"
prev = _connect_thunk(prev, s)
return self.schedule_periodic_on_separate_thread(output_thing, interval)
def schedule_later_one_time(self, output_thing, interval):
def cancel():
print("canceling schedule of %s" % output_thing)
try:
handle = self.active_schedules[output_thing]
except KeyError:
raise ScheduleError("Attempt to de-schedule OutputThing %s, which does not have an active schedule" %
output_thing)
handle.cancel()
self._remove_from_active_schedules(output_thing)
def run():
assert output_thing in self.active_schedules
# Remove from the active schedules since this was a one-time schedule.
# Note that the _observe() call could potentially reschedule the
# OutputThing through another call to the scheduler.
self._remove_from_active_schedules(output_thing)
output_thing._observe()
handle = self.event_loop.call_later(interval, run)
self.active_schedules[output_thing] = handle
output_thing._schedule(enqueue_fn=None)
return cancel
def run_forever(self):
"""Call the event loop's run_forever(). We don't really run forever:
the event loop is exited if we run out of scheduled events or if stop()
is called.
"""
try:
self.event_loop.run_forever()
except KeyboardInterrupt:
# If someone hit Control-C to break out of the loop,
# they might be trying to diagonose a hang. Print the
# active OutputThings here before passing on the interrupt.
print("Active OutputThings: %s" %
', '.join([('%s'%o) for o in self.active_schedules.keys()]))
raise
if self.fatal_error is not None:
raise ScheduleError("Scheduler aborted due to fatal error") \
from self.fatal_error
def _schedule_coroutine(self, coro, done_callback):
"""This is for low-level components that deal directly with
the event loop to to schedule a coroutine. We
track them so we can either wait for or cancel them when stop()
is called.
"""
fid = self.next_future_id
future = self.event_loop.create_task(coro)
# the combined callback. To avoid race conditions, always
# call the provided done callback before we remove the future.
def cb(f):
done_callback(f)
del self.pending_futures[fid]
self.pending_futures[fid] = future
future.add_done_callback(cb)
self.next_future_id += 1
return future
def stop(self):
"""Stop any active schedules for output things and then call stop() on
the event loop.
"""
for (task, handle) in self.active_schedules.items():
#print("Stopping %s" % task)
# The handles are either event scheduler handles (with a cancel
# method) or just callables to be called directly.
if hasattr(handle, 'cancel'):
handle.cancel()
else:
handle()
self.active_schedules = {}
# go through the pending futures. We don't stop the
# event loop until all the pending futures have been
# completed or stopped by their callers.
for (fid, f) in self.pending_futures.items():
if f.done() == False:
# if we still have pending futures, we try the
# stop again after the first one we see has
# completed.
#print("Waiting for future %d (%s)" % (fid, repr(f)))
def recheck_stop(f):
exc = f.exception()
if exc:
raise FatalError("Exception in coroutine %s" % repr(f)) from exc
else:
self.stop()
f.add_done_callback(recheck_stop)
return
elif f.exception():
raise FatalError("Exception in coroutine %s" % repr(f)) from exc
self.event_loop.stop()
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# moldynplot.dataset.ChemicalShiftDataset.py
#
# Copyright (C) 2015-2017 Karl T Debiec
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""
Represents an NMR chemical shift data
"""
################################### MODULES ###################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
if __name__ == "__main__":
__package__ = str("moldynplot.dataset")
import moldynplot.dataset
from IPython import embed
import numpy as np
import pandas as pd
from .SequenceDataset import SequenceDataset
from ..myplotspec import sformat, wiprint
################################### CLASSES ###################################
class ChemicalShiftDataset(SequenceDataset):
"""
Represents an NMR chemical shift data
"""
@staticmethod
def construct_argparser(parser_or_subparsers=None, **kwargs):
"""
Adds arguments to an existing argument parser, constructs a
subparser, or constructs a new parser
Arguments:
parser_or_subparsers (ArgumentParser, _SubParsersAction,
optional): If ArgumentParser, existing parser to which
arguments will be added; if _SubParsersAction, collection of
subparsers to which a new argument parser will be added; if
None, a new argument parser will be generated
kwargs (dict): Additional keyword arguments
Returns:
ArgumentParser: Argument parser or subparser
"""
import argparse
# Process arguments
help_message = """Process NMR chemical shift data"""
if isinstance(parser_or_subparsers, argparse.ArgumentParser):
parser = parser_or_subparsers
elif isinstance(parser_or_subparsers, argparse._SubParsersAction):
parser = parser_or_subparsers.add_parser(name="chemical_shift",
description=help_message, help=help_message)
elif parser_or_subparsers is None:
parser = argparse.ArgumentParser(description=help_message)
# Defaults
if parser.get_default("cls") is None:
parser.set_defaults(cls=ChemicalShiftDataset)
# Arguments unique to this class
arg_groups = {ag.title: ag for ag in parser._action_groups}
# Input arguments
input_group = arg_groups.get("input",
parser.add_argument_group("input"))
try:
input_group.add_argument("-delays", dest="delays", metavar="DELAY",
nargs="+", type=float, help="""delays for each infile,
if infiles represent a series; number of delays must match
number of infiles""")
except argparse.ArgumentError:
pass
# Action arguments
action_group = arg_groups.get("action",
parser.add_argument_group("action"))
try:
action_group.add_argument("-relax", dest="calc_relax", type=str,
nargs="?", default=None, const="r1", help="""Calculate
relaxation rates and standard errors; may additionally
specify type of relaxation being measured (e.g. r1, r2)""")
except argparse.ArgumentError:
pass
# Arguments inherited from superclass
SequenceDataset.construct_argparser(parser)
return parser
def __init__(self, delays=None, calc_relax=False, calc_pdist=False,
outfile=None, interactive=False, **kwargs):
"""
Arguments:
infile{s} (list): Path(s) to input file(s); may contain
environment variables and wildcards
delays (list): Delays corresponding to series of infiles; used to
name columns of merged sequence DataFrame
use_indexes (list): Residue indexes to select from DataFrame,
once DataFrame has already been loaded
calc_pdist (bool): Calculate probability distribution
pdist_kw (dict): Keyword arguments used to configure
probability distribution calculation
dataset_cache (dict): Cache of previously-loaded Datasets
verbose (int): Level of verbose output
kwargs (dict): Additional keyword arguments
"""
# Process arguments
verbose = kwargs.get("verbose", 1)
self.dataset_cache = kwargs.get("dataset_cache", None)
# Read data
if delays is not None:
kwargs["infile_column_prefixes"] = ["{0:3.1f} ms".format(delay) for
delay in delays]
self.sequence_df = self.read(**kwargs)
# Cut data
if "use_indexes" in kwargs:
use_indexes = np.array(kwargs.pop("use_indexes"))
res_index = np.array(
[int(i.split(":")[1]) for i in self.sequence_df.index.values])
self.sequence_df = self.sequence_df[
np.in1d(res_index, use_indexes)]
# Calculate relaxation
if calc_relax:
relax_kw = kwargs.pop("relax_kw", {})
relax_kw["kind"] = calc_relax
self.sequence_df = self.calc_relax(df=self.sequence_df,
relax_kw=relax_kw, **kwargs)
# Calculate probability distribution
if calc_pdist:
self.pdist_df = self.calc_pdist(df=self.sequence_df, **kwargs)
# Output data
if verbose >= 2:
print("Processed sequence DataFrame:")
print(self.sequence_df)
if calc_pdist:
print("Processed pdist DataFrame:")
print(self.pdist_df)
# Write data
if outfile is not None:
self.write(df=self.sequence_df, outfile=outfile, **kwargs)
# Interactive prompt
if interactive:
embed()
def read(self, **kwargs):
"""
Reads sequence from one or more *infiles* into a DataFrame.
Extends :class:`Dataset<myplotspec.Dataset.Dataset>` with
option to read in residue indexes.
"""
from os import devnull
import re
from subprocess import Popen, PIPE
from ..myplotspec import multi_pop_merged
# Functions
def convert_name(name):
return "{0}:{1}".format(name[-4:-1].upper(), name[2:-4])
# Process arguments
infile_args = multi_pop_merged(["infile", "infiles"], kwargs)
infiles = self.infiles = self.process_infiles(infiles=infile_args)
if len(infiles) == 0:
raise Exception(sformat("""No infiles found matching
'{0}'""".format(infile_args)))
re_h5 = re.compile(
r"^(?P<path>(.+)\.(h5|hdf5))((:)?(/)?(?P<address>.+))?$",
flags=re.UNICODE)
infile_column_prefixes = kwargs.get("infile_column_prefixes",
range(len(infiles)))
# Load Data
dfs = []
for infile in infiles:
if re_h5.match(infile):
df = self._read_hdf5(infile, **kwargs)
else:
with open(devnull, "w") as fnull:
header = " ".join(
Popen("head -n 1 {0}".format(infile), stdout=PIPE,
stderr=fnull, shell=True).stdout.read().strip().split(
"\t"))
ccpnmr_header = sformat("""Number # Position F1 Position F2
Assign F1 Assign F2 Height Volume Line Width F1 (Hz) Line
Width F2 (Hz) Merit Details Fit Method Vol. Method""")
if (header == ccpnmr_header):
read_csv_kw = dict(index_col=None, delimiter="\t",
dtype={"Position F1": np.float32,
"Position F2": np.float32, "Assign F1": np.str,
"Height": np.float32, "Volume": np.float32},
usecols=[str("Position F1"), str("Position F2"),
str("Assign F1"), str("Height"), str("Volume")],
converters={"Assign F1": convert_name})
read_csv_kw.update(kwargs.get("read_csv_kw", {}))
kwargs["read_csv_kw"] = read_csv_kw
df = self._read_text(infile, **kwargs)
df.columns = ["1H", "15N", "residue", "height", "volume"]
df.set_index("residue", inplace=True)
else:
df = self._read_text(infile, **kwargs)
dfs.append(df)
if len(dfs) == 1:
df = dfs[0]
else:
df = dfs[0][["1H", "15N"]]
if len(dfs) != len(infile_column_prefixes):
raise Exception(sformat("""Numb of infile column prefixes
must match number of provided infiles"""))
for df_i, prefix_i in zip(dfs, infile_column_prefixes):
df["{0} height".format(prefix_i)] = df_i["height"]
df["{0} volume".format(prefix_i)] = df_i["volume"]
self.dfs = dfs
# Sort
if df.index.name == "residue":
df = df.loc[
sorted(df.index.values, key=lambda x: int(x.split(":")[1]))]
else:
df = df.loc[sorted(df.index.values)]
return df
def calc_relax(self, **kwargs):
"""
Calculates relaxation rates.
Arguments:
df (DataFrame): DataFrame; probability distribution will be
calculated for each column using rows as data points
relax_kw (dict): Keyword arguments used to configure
relaxation rate calculation
relax_kw[kind] (str): Kind of relaxation rate being
calculated; will be used to name column
relax_kw[intensity_method] (str): Metric to use for peak
instensity; may be 'height' (default) or 'volume'
relax_kw[error_method] (str): Metric to use for error
calculation; may be 'rmse' for root-mean-square error
(default) or 'mae' for mean absolute error
relax_kw[n_synth_datasets] (int): Number of synthetic datasets
to use for error calculation
Returns:
DataFrame: Sequence DataFrame with additional columns for
relaxation rate and standard error
"""
import re
from scipy.optimize import curve_fit
# Process arguments
verbose = kwargs.get("verbose", 1)
df = kwargs.get("df")
if df is None:
if hasattr(self, "sequence_df"):
df = self.sequence_df
else:
raise ()
relax_kw = kwargs.get("relax_kw", {})
kind = relax_kw.get("kind", "r1")
intensity_method = relax_kw.get("intensity_method", "height")
error_method = relax_kw.get("error_method", "mae")
n_synth_datasets = relax_kw.get("n_synth_datasets", 1000)
# Calculate relaxation rates
re_column = re.compile(
"^(?P<delay>\d+\.?\d*?) ms {0}".format(intensity_method))
columns = [c for c in df.columns.values if re_column.match(c)]
delays = np.array(
[re.match(re_column, c).groupdict()["delay"] for c in columns],
np.float) / 1000
def calc_relax_rate(residue, **kwargs):
"""
"""
from .. import multiprocess_map
if verbose >= 1:
wiprint(
"""Calculating {0} relaxation rate for {1}""".format(kind,
residue.name))
def model_function(delay, intensity, relaxation):
return intensity * np.exp(-1 * delay * relaxation)
I = np.array(residue.filter(columns, np.float64))
I0, R = curve_fit(model_function, delays, I, p0=(I[0], 1.0))[0]
# Calculate error
if error_method == "rmse":
error = np.sqrt(
np.mean((I - model_function(delays, I0, R)) ** 2))
elif error_method == "mae":
error = np.mean(
np.sqrt((I - model_function(delays, I0, R)) ** 2))
# Construct synthetic relaxation profiles
synth_datasets = np.zeros((n_synth_datasets, I.size))
for i, I_mean in enumerate(model_function(delays, I0, R)):
synth_datasets[:, i] = np.random.normal(I_mean, error,
n_synth_datasets)
def synth_fit_decay(synth_intensity):
try:
synth_I0, synth_R = \
curve_fit(model_function, delays, synth_intensity,
p0=(I0, R))[0]
return synth_I0, synth_R
except RuntimeError:
if verbose >= 1:
wiprint("""Unable to calculate standard error for {0}
""".format(residue.name))
return (np.nan, np.nan)
# Calculate standard error
synth_I0_Rs = np.array(multiprocess_map(synth_fit_decay,
synth_datasets, 16))
I0_se = np.std(synth_I0_Rs[:,0])
R_se = np.std(synth_I0_Rs[:,1])
return pd.Series([I0, I0_se, R, R_se])
# Calculate relaxation rates and standard errors
fit = df.apply(calc_relax_rate, axis=1)
# Format and return
fit.columns = ["I0", "I0 se", kind, kind + " se"]
df = df.join(fit)
return df
#################################### MAIN #####################################
if __name__ == "__main__":
ChemicalShiftDataset.main()
|
|
''' Provide basic Bokeh server objects that use a Tornado ``HTTPServer`` and
``BokeTornado`` Tornado Application to service Bokeh Server Applications.
There are two public classes in this module:
:class:`~bokeh.server.server.BaseServer`
This is a lightweight class to explicitly coordinate the components needed
to run a Bokeh server (A :class:`~bokeh.server.tornado.BokehTornado`
instance, and Tornado ``HTTPServer`` and a Tornado ``IOLoop``)
:class:`~bokeh.server.server.Server`
This higher-level convenience class only needs to be configured with Bokeh
:class:`~bokeh.application.application.Application` instances, and will
automatically create and coordinate the lower level Tornado components.
'''
from __future__ import absolute_import, print_function
import atexit
import logging
log = logging.getLogger(__name__)
import signal
import sys
import tornado
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from .. import __version__
from ..application import Application
from ..core.properties import Bool, Int, List, String
from ..resources import DEFAULT_SERVER_PORT
from ..util.options import Options
from .util import bind_sockets, create_hosts_whitelist
from .tornado import BokehTornado
# This class itself is intentionally undocumented (it is used to generate
# documentation elsewhere)
class _ServerOpts(Options):
num_procs = Int(default=1, help="""
The number of worker processes to start for the HTTP server. If an explicit
``io_loop`` is also configured, then ``num_procs=1`` is the only compatible
value. Use ``BaseServer`` to coordinate an explicit ``IOLoop`` with a
multi-process HTTP server.
A value of 0 will auto detect number of cores.
Note that due to limitations inherent in Tornado, Windows does not support
``num_procs`` values greater than one! In this case consider running
multiple Bokeh server instances behind a load balancer.
""")
address = String(default=None, help="""
The address the server should listen on for HTTP requests.
""")
port = Int(default=DEFAULT_SERVER_PORT, help="""
The port number the server should listen on for HTTP requests.
""")
prefix = String(default="", help="""
A URL prefix to use for all Bokeh server paths.
""")
allow_websocket_origin = List(String, default=None, help="""
A list of hosts that can connect to the websocket.
This is typically required when embedding a Bokeh server app in an external
web site using :func:`~bokeh.embed.server_document` or similar.
If None, "localhost" is used.
""")
use_xheaders = Bool(default=False, help="""
Whether to have the Bokeh server override the remote IP and URI scheme
and protocol for all requests with ``X-Real-Ip``, ``X-Forwarded-For``,
``X-Scheme``, ``X-Forwarded-Proto`` headers (if they are provided).
""")
class BaseServer(object):
''' Explicitly coordinate the level Tornado components required to run a
Bokeh server:
* A Tornado ``IOLoop`` to run the Bokeh server machinery.
* a ``BokehTornado`` Tornado application that defines the Bokeh server
machinery.
* a Tornado ``HTTPServer`` to direct HTTP requests
All three of these components must be passed to ``BaseServer``, which will
initialize the ``BokehTornado`` instance on the ``io_loop``. The
``http_server`` must have been previously created and initialized with the
``BokehTornado`` instance.
'''
def __init__(self, io_loop, tornado_app, http_server):
''' Create a ``BaseServer`` instance.
Args:
io_loop (IOLoop) :
A Tornado ``IOLoop`` to run the Bokeh Tornado application on.
tornado_app (BokehTornado) :
An instance of the Bokeh Tornado application that generates
Bokeh Documents and Sessions.
http_server (HTTPServer) :
A Tornado ``HTTPServer`` to service HTTP requests for Bokeh
applications. Should have already be configured with the
``tornado_app`` when created.
'''
self._started = False
self._stopped = False
self._http = http_server
self._loop = io_loop
self._tornado = tornado_app
self._tornado.initialize(io_loop)
@property
def io_loop(self):
''' The Tornado ``IOLoop`` that this Bokeh Server is running on.
'''
return self._loop
def start(self):
''' Install the Bokeh Server and its background tasks on a Tornado
``IOLoop``.
This method does *not* block and does *not* affect the state of the
Tornado ``IOLoop`` You must start and stop the loop yourself, i.e.
this method is typically useful when you are already explicitly
managing an ``IOLoop`` yourself.
To start a Bokeh server and immediately "run forever" in a blocking
manner, see :func:`~bokeh.server.server.BaseServer.run_until_shutdown`.
'''
assert not self._started, "Already started"
self._started = True
self._tornado.start()
def stop(self, wait=True):
''' Stop the Bokeh Server.
This stops and removes all Bokeh Server ``IOLoop`` callbacks, as well
as stops the ``HTTPServer`` that this instance was configured with.
Args:
fast (bool):
Whether to wait for orderly cleanup (default: True)
Returns:
None
'''
assert not self._stopped, "Already stopped"
self._stopped = True
self._tornado.stop(wait)
self._http.stop()
def unlisten(self):
''' Stop listening on ports. The server will no longer be usable after
calling this function.
Returns:
None
'''
self._http.close_all_connections()
self._http.stop()
def run_until_shutdown(self):
''' Run the Bokeh Server until shutdown is requested by the user,
either via a Keyboard interrupt (Ctrl-C) or SIGTERM.
Calling this method will start the Tornado ``IOLoop`` and block
all execution in the calling process.
Returns:
None
'''
if not self._started:
self.start()
# Install shutdown hooks
atexit.register(self._atexit)
signal.signal(signal.SIGTERM, self._sigterm)
try:
self._loop.start()
except KeyboardInterrupt:
print("\nInterrupted, shutting down")
self.stop()
def get_session(self, app_path, session_id):
''' Get an active a session by name application path and session ID.
Args:
app_path (str) :
The configured application path for the application to return
a session for.
session_id (str) :
The session ID of the session to retrieve.
Returns:
ServerSession
'''
return self._tornado.get_session(app_path, session_id)
def get_sessions(self, app_path=None):
''' Gets all currently active sessions for applications.
Args:
app_path (str, optional) :
The configured application path for the application to return
sessions for. If None, return active sessions for all
applications. (default: None)
Returns:
list[ServerSession]
'''
if app_path is not None:
return self._tornado.get_sessions(app_path)
all_sessions = []
for path in self._tornado.app_paths:
all_sessions += self._tornado.get_sessions(path)
return all_sessions
def show(self, app_path, browser=None, new='tab'):
''' Opens an app in a browser window or tab.
This method is useful for testing or running Bokeh server applications
on a local machine but should not call when running Bokeh server for
an actual deployment.
Args:
app_path (str) : the app path to open
The part of the URL after the hostname:port, with leading slash.
browser (str, optional) : browser to show with (default: None)
For systems that support it, the **browser** argument allows
specifying which browser to display in, e.g. "safari", "firefox",
"opera", "windows-default" (see the ``webbrowser`` module
documentation in the standard lib for more details).
new (str, optional) : window or tab (default: "tab")
If ``new`` is 'tab', then opens a new tab.
If ``new`` is 'window', then opens a new window.
Returns:
None
'''
if not app_path.startswith("/"):
raise ValueError("app_path must start with a /")
address_string = 'localhost'
if self.address is not None and self.address != '':
address_string = self.address
url = "http://%s:%d%s%s" % (address_string, self.port, self.prefix, app_path)
from bokeh.util.browser import view
view(url, browser=browser, new=new)
_atexit_ran = False
def _atexit(self):
if self._atexit_ran:
return
self._atexit_ran = True
log.debug("Shutdown: cleaning up")
if not self._stopped:
self.stop(wait=False)
def _sigterm(self, signum, frame):
print("Received signal %d, shutting down" % (signum,))
# Tell self._loop.start() to return.
self._loop.add_callback_from_signal(self._loop.stop)
class Server(BaseServer):
''' A high level convenience class to run a Bokeh server.
This class can automatically coordinate the three the base level
components required to run a Bokeh server:
* A Tornado ``IOLoop`` to run the Bokeh server machinery.
* a ``BokehTornado`` Tornado application that defines the Bokeh server
machinery.
* a Tornado ``HTTPServer`` to direct HTTP requests
This high level ``Server`` class has some limitations. In particular, it is
not possible to set an explicit ``io_loop`` and ``num_procs`` other than 1
at the same time. To do that, it is necessary to use ``BaseServer`` and
coordinate the three components above explicitly.
'''
def __init__(self, applications, io_loop=None, http_server_kwargs=None, **kwargs):
''' Create a ``Server`` instance.
Args:
applications (dict[str, Application] or Application or callable) :
A mapping from URL paths to Application instances, or a single
Application to put at the root URL.
The Application is a factory for Documents, with a new Document
initialized for each Session. Each application is identified
by a path that corresponds to a URL, like "/" or "/myapp"
If a single Application is provided, it is mapped to the URL
path "/" automatically.
As a convenience, a callable may also be provided, in which
an Application will be created for it using FunctionHandler.
io_loop (IOLoop, optional) :
An explicit Tornado ``IOLoop`` to run Bokeh Server code on. If
None, ``IOLoop.current()`` will be used (default: None)
http_server_kwargs (dict, optional) :
Extra arguments passed to ``tornado.httpserver.HTTPServer``.
E.g. ``max_buffer_size`` to specify the maximum upload size.
More details can be found at:
http://www.tornadoweb.org/en/stable/httpserver.html#http-server
If None, no extra arguments are passed (default: None)
Additionally, the following options may be passed to configure the
operation of ``Server``:
.. bokeh-options:: _ServerOpts
:module: bokeh.server.server
Any remaining keyword arguments will be passed as-is to
``BokehTornado``.
'''
log.info("Starting Bokeh server version %s (running on Tornado %s)" % (__version__, tornado.version))
from bokeh.application.handlers.function import FunctionHandler
if callable(applications):
applications = Application(FunctionHandler(applications))
if isinstance(applications, Application):
applications = { '/' : applications }
for k, v in list(applications.items()):
if callable(v):
applications[k] = Application(FunctionHandler(v))
opts = _ServerOpts(kwargs)
self._port = opts.port
self._address = opts.address
self._prefix = opts.prefix
if opts.num_procs != 1:
assert all(app.safe_to_fork for app in applications.values()), (
'User application code has run before attempting to start '
'multiple processes. This is considered an unsafe operation.')
if opts.num_procs > 1 and io_loop is not None:
raise RuntimeError(
"Setting both num_procs and io_loop in Server is incompatible. Use BaseServer to coordinate an explicit IOLoop and multi-process HTTPServer"
)
if opts.num_procs > 1 and sys.platform == "win32":
raise RuntimeError("num_procs > 1 not supported on Windows")
if http_server_kwargs is None:
http_server_kwargs = {}
http_server_kwargs.setdefault('xheaders', opts.use_xheaders)
sockets, self._port = bind_sockets(self.address, self.port)
extra_websocket_origins = create_hosts_whitelist(opts.allow_websocket_origin, self.port)
try:
tornado_app = BokehTornado(applications, extra_websocket_origins=extra_websocket_origins, prefix=self.prefix, **kwargs)
http_server = HTTPServer(tornado_app, **http_server_kwargs)
http_server.start(opts.num_procs)
http_server.add_sockets(sockets)
except Exception:
for s in sockets:
s.close()
raise
# Can only refer to IOLoop after HTTPServer.start() is called, see #5524
if io_loop is None:
io_loop = IOLoop.current()
super(Server, self).__init__(io_loop, tornado_app, http_server)
@property
def prefix(self):
''' The configured URL prefix to use for all Bokeh server paths.
'''
return self._prefix
@property
def port(self):
''' The configured port number that the server listens on for HTTP
requests.
'''
return self._port
@property
def address(self):
''' The configured address that the server listens on for HTTP
requests.
'''
return self._address
|
|
# -*- coding: utf-8 -*-
"""
Modbus Payload Builders
------------------------
A collection of utilities for building and decoding
modbus messages payloads.
"""
from struct import pack, unpack
from pymodbus3.interfaces import IPayloadBuilder
from pymodbus3.constants import Endian
from pymodbus3.utilities import pack_bitstring
from pymodbus3.utilities import unpack_bitstring
from pymodbus3.exceptions import ParameterException
class BinaryPayloadBuilder(IPayloadBuilder):
"""
A utility that helps build payload messages to be
written with the various modbus messages. It really is just
a simple wrapper around the struct module, however it saves
time looking up the format strings. What follows is a simple
example::
builder = BinaryPayloadBuilder(endian=Endian.Little)
builder.add_8bit_uint(1)
builder.add_16bit_uint(2)
payload = builder.build()
"""
def __init__(self, payload=None, endian=Endian.Little):
""" Initialize a new instance of the payload builder
:param payload: Raw payload data to initialize with
:param endian: The endianess of the payload
"""
self._payload = payload or []
self._endian = endian
def to_string(self):
""" Return the payload buffer as a string
:returns: The payload buffer as a string
"""
return b''.join(self._payload)
def __str__(self):
""" Return the payload buffer as a string
:returns: The payload buffer as a string
"""
return self.to_string().decode('utf-8')
def reset(self):
""" Reset the payload buffer
"""
self._payload = []
def build(self):
""" Return the payload buffer as a list
This list is two bytes per element and can
thus be treated as a list of registers.
:returns: The payload buffer as a list
"""
string = self.to_string()
length = len(string)
string = string + (b'\x00' * (length % 2))
return [string[i:i+2] for i in range(0, length, 2)]
def add_bits(self, values):
""" Adds a collection of bits to be encoded
If these are less than a multiple of eight,
they will be left padded with 0 bits to make
it so.
:param values: The value to add to the buffer
"""
value = pack_bitstring(values)
self._payload.append(value)
def add_8bit_uint(self, value):
""" Adds a 8 bit unsigned int to the buffer
:param value: The value to add to the buffer
"""
fstring = self._endian + 'B'
self._payload.append(pack(fstring, value))
def add_16bit_uint(self, value):
""" Adds a 16 bit unsigned int to the buffer
:param value: The value to add to the buffer
"""
fstring = self._endian + 'H'
self._payload.append(pack(fstring, value))
def add_32bit_uint(self, value):
""" Adds a 32 bit unsigned int to the buffer
:param value: The value to add to the buffer
"""
fstring = self._endian + 'I'
self._payload.append(pack(fstring, value))
def add_64bit_uint(self, value):
""" Adds a 64 bit unsigned int to the buffer
:param value: The value to add to the buffer
"""
fstring = self._endian + 'Q'
self._payload.append(pack(fstring, value))
def add_8bit_int(self, value):
""" Adds a 8 bit signed int to the buffer
:param value: The value to add to the buffer
"""
fstring = self._endian + 'b'
self._payload.append(pack(fstring, value))
def add_16bit_int(self, value):
""" Adds a 16 bit signed int to the buffer
:param value: The value to add to the buffer
"""
fstring = self._endian + 'h'
self._payload.append(pack(fstring, value))
def add_32bit_int(self, value):
""" Adds a 32 bit signed int to the buffer
:param value: The value to add to the buffer
"""
fstring = self._endian + 'i'
self._payload.append(pack(fstring, value))
def add_64bit_int(self, value):
""" Adds a 64 bit signed int to the buffer
:param value: The value to add to the buffer
"""
fstring = self._endian + 'q'
self._payload.append(pack(fstring, value))
def add_32bit_float(self, value):
""" Adds a 32 bit float to the buffer
:param value: The value to add to the buffer
"""
fstring = self._endian + 'f'
self._payload.append(pack(fstring, value))
def add_64bit_float(self, value):
""" Adds a 64 bit float(double) to the buffer
:param value: The value to add to the buffer
"""
fstring = self._endian + 'd'
self._payload.append(pack(fstring, value))
def add_string(self, value):
""" Adds a string to the buffer
:param value: The value to add to the buffer
"""
fstring = self._endian + str(len(value)) + 's'
self._payload.append(pack(fstring, value))
class BinaryPayloadDecoder(object):
"""
A utility that helps decode payload messages from a modbus
response message. It really is just a simple wrapper around
the struct module, however it saves time looking up the format
strings. What follows is a simple example::
decoder = BinaryPayloadDecoder(payload)
first = decoder.decode_8bit_uint()
second = decoder.decode_16bit_uint()
"""
def __init__(self, payload, endian=Endian.Little):
""" Initialize a new payload decoder
:param payload: The payload to decode with
:param endian: The endianess of the payload
"""
self._payload = payload
self._pointer = 0x00
self._endian = endian
@classmethod
def from_registers(cls, registers, endian=Endian.Little):
""" Initialize a payload decoder with the result of
reading a collection of registers from a modbus device.
The registers are treated as a list of 2 byte values.
We have to do this because of how the data has already
been decoded by the rest of the library.
:param registers: The register results to initialize with
:param endian: The endianess of the payload
:returns: An initialized PayloadDecoder
"""
if isinstance(registers, list): # repack into flat binary
payload = b''.join(pack('>H', x) for x in registers)
return cls(payload, endian)
raise ParameterException('Invalid collection of registers supplied')
@classmethod
def from_coils(cls, coils, endian=Endian.Little):
""" Initialize a payload decoder with the result of
reading a collection of coils from a modbus device.
The coils are treated as a list of bit(boolean) values.
:param coils: The coil results to initialize with
:param endian: The endianess of the payload
:returns: An initialized PayloadDecoder
"""
if isinstance(coils, list):
payload = pack_bitstring(coils)
return cls(payload, endian)
raise ParameterException('Invalid collection of coils supplied')
def reset(self):
""" Reset the decoder pointer back to the start
"""
self._pointer = 0x00
def decode_8bit_uint(self):
""" Decodes a 8 bit unsigned int from the buffer
"""
self._pointer += 1
fstring = self._endian + 'B'
handle = self._payload[self._pointer - 1:self._pointer]
return unpack(fstring, handle)[0]
def decode_bits(self):
""" Decodes a byte worth of bits from the buffer
"""
self._pointer += 1
handle = self._payload[self._pointer - 1:self._pointer]
return unpack_bitstring(handle)
def decode_16bit_uint(self):
""" Decodes a 16 bit unsigned int from the buffer
"""
self._pointer += 2
fstring = self._endian + 'H'
handle = self._payload[self._pointer - 2:self._pointer]
return unpack(fstring, handle)[0]
def decode_32bit_uint(self):
""" Decodes a 32 bit unsigned int from the buffer
"""
self._pointer += 4
fstring = self._endian + 'I'
handle = self._payload[self._pointer - 4:self._pointer]
return unpack(fstring, handle)[0]
def decode_64bit_uint(self):
""" Decodes a 64 bit unsigned int from the buffer
"""
self._pointer += 8
fstring = self._endian + 'Q'
handle = self._payload[self._pointer - 8:self._pointer]
return unpack(fstring, handle)[0]
def decode_8bit_int(self):
""" Decodes a 8 bit signed int from the buffer
"""
self._pointer += 1
fstring = self._endian + 'b'
handle = self._payload[self._pointer - 1:self._pointer]
return unpack(fstring, handle)[0]
def decode_16bit_int(self):
""" Decodes a 16 bit signed int from the buffer
"""
self._pointer += 2
fstring = self._endian + 'h'
handle = self._payload[self._pointer - 2:self._pointer]
return unpack(fstring, handle)[0]
def decode_32bit_int(self):
""" Decodes a 32 bit signed int from the buffer
"""
self._pointer += 4
fstring = self._endian + 'i'
handle = self._payload[self._pointer - 4:self._pointer]
return unpack(fstring, handle)[0]
def decode_64bit_int(self):
""" Decodes a 64 bit signed int from the buffer
"""
self._pointer += 8
fstring = self._endian + 'q'
handle = self._payload[self._pointer - 8:self._pointer]
return unpack(fstring, handle)[0]
def decode_32bit_float(self):
""" Decodes a 32 bit float from the buffer
"""
self._pointer += 4
fstring = self._endian + 'f'
handle = self._payload[self._pointer - 4:self._pointer]
return unpack(fstring, handle)[0]
def decode_64bit_float(self):
""" Decodes a 64 bit float(double) from the buffer
"""
self._pointer += 8
fstring = self._endian + 'd'
handle = self._payload[self._pointer - 8:self._pointer]
return unpack(fstring, handle)[0]
def decode_string(self, size=1):
""" Decodes a string from the buffer
:param size: The size of the string to decode
"""
self._pointer += size
return self._payload[self._pointer - size:self._pointer]
# Exported Identifiers
__all__ = ['BinaryPayloadBuilder', 'BinaryPayloadDecoder']
|
|
# (c) Crown Owned Copyright, 2016. Dstl.
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django_webtest import WebTest
from apps.organisations.models import Organisation
from apps.teams.models import Team
class UserWebTest(WebTest):
def test_update_button_shows_on_user_profile(self):
# Create the two users
get_user_model().objects.create_user(userid='user@0001.com')
get_user_model().objects.create_user(userid='user@0002.com')
# Login as the first user
form = self.app.get(reverse('login')).form
form['userid'] = 'user@0001.com'
response = form.submit()
# Now goto the profile page for the 1st user and see if the button
# exists
response = self.app.get(reverse(
'user-detail',
kwargs={'slug': 'user0001com'}))
button = response.html.find(
'a',
attrs={'id': 'update_profile_link'})
self.assertTrue(button)
# Now visit the profile page for the not logged in user
response = self.app.get(reverse(
'user-detail',
kwargs={'slug': 'user0002com'}))
button = response.html.find(
'a',
attrs={'id': 'update_profile_link'})
self.assertFalse(button)
# Test that a user can join an existing team when editing their
# own profile
def test_adding_new_existing_team(self):
get_user_model().objects.create_user(userid='user@0001.com')
o = Organisation(name='org0001')
o.save()
t = Team(name='team0001', organisation=o)
t.save()
# Log in as user
form = self.app.get(reverse('login')).form
form['userid'] = 'user@0001.com'
form.submit()
# Go to the user's profile page and assert that the team is NOT
# showing up in the list of teams they are a member of.
response = self.app.get(reverse(
'user-detail',
kwargs={'slug': 'user0001com'}))
self.assertFalse(response.html.find('a', text='team0001'))
# Now go to the update profile page and check the first team
# in the list of teams.
form = self.app.get(reverse(
'user-update-teams',
kwargs={'slug': 'user0001com'})).form
form.get('team', index=0).checked = True
form.submit()
# Go back to the users profile page to see if the team is now
# on the list of teams
response = self.app.get(reverse(
'user-detail',
kwargs={'slug': 'user0001com'}))
self.assertTrue(response.html.find('a', text='team0001'))
# Test that the user can join a new team connecting it to an existsing
# organisation
def test_adding_new_team_existing_organisation(self):
get_user_model().objects.create_user(userid='user@0001.com')
o = Organisation(name='org0001')
o.save()
# Log in as user
form = self.app.get(reverse('login')).form
form['userid'] = 'user@0001.com'
form.submit()
# Now go to the update profile page and check the first team
# in the list of teams.
form = self.app.get(reverse(
'user-update-teams',
kwargs={'slug': 'user0001com'})).form
form['teamname'] = 'team0001'
form['organisation'].value = o.pk
form.submit()
# Go back to the users profile page to see if the team is now
# on the list of teams
response = self.app.get(reverse(
'user-detail',
kwargs={'slug': 'user0001com'}))
self.assertTrue(response.html.find('a', text='team0001'))
# Test that the user can join a new team connecting it to an existsing
# organisation
def test_adding_new_team_new_organisation(self):
get_user_model().objects.create_user(userid='user@0001.com')
# Log in as user
form = self.app.get(reverse('login')).form
form['userid'] = 'user@0001.com'
form.submit()
# Now go to the update profile "teams" page and add a new team
form = self.app.get(reverse(
'user-update-teams',
kwargs={'slug': 'user0001com'})).form
form['teamname'] = 'team0001'
form['new_organisation'] = 'org0001'
form.submit()
# Go back to the users profile page to see if the team and
# organisation is now on the list of teams
response = self.app.get(reverse(
'user-detail',
kwargs={'slug': 'user0001com'}))
self.assertTrue(response.html.find('a', text='team0001'))
self.assertTrue(response.html.find('a', text='org0001'))
def test_alert_for_missing_name(self):
# This user doesn't have a name
get_user_model().objects.create_user(userid='user@0001.com')
# Log in as user
form = self.app.get(reverse('login')).form
form['userid'] = 'user@0001.com'
response = form.submit()
# Now go to the update user information page for this user-detail
response = self.app.get(reverse(
'user-updateprofile',
kwargs={'slug': 'user0001com'}))
# Check that we have an error summary at the top
self.assertTrue(
response.html.find(
'h3',
attrs={'class': 'error-summary-heading'}
)
)
def test_alert_for_missing_other_information(self):
update_page = reverse(
'user-updateprofile',
kwargs={'slug': 'user0001com'})
check_str = 'Please add additional information'
def find_alert(response):
return response.html.find(
'h3',
attrs={'class': 'alert-summary-heading'}
)
# create the user and log them in
u = get_user_model().objects.create_user(
userid='user@0001.com',
name='User 0001',
)
form = self.app.get(reverse('login')).form
form['userid'] = 'user@0001.com'
response = form.submit()
# go to the update page and check for the alert
response = self.app.get(update_page)
self.assertTrue(find_alert(response), check_str)
u.best_way_to_find = 'In the kitchen'
u.best_way_to_contact = 'By phone'
u.phone = '01777777'
u.email = ''
u.save()
# Check that we have an alert summary at the top
response = self.app.get(update_page)
self.assertTrue(find_alert(response), check_str)
u.best_way_to_find = 'In the kitchen'
u.best_way_to_contact = 'By phone'
u.phone = ''
u.email = 'test@test.com'
u.save()
# Check that we have an alert summary at the top
response = self.app.get(update_page)
self.assertTrue(find_alert(response), check_str)
u.best_way_to_find = 'In the kitchen'
u.best_way_to_contact = ''
u.phone = '01777777'
u.email = 'test@test.com'
u.save()
# Check that we have an alert summary at the top
response = self.app.get(update_page)
self.assertTrue(find_alert(response), check_str)
u.best_way_to_find = ''
u.best_way_to_contact = 'By phone'
u.phone = '01777777'
u.email = 'test@test.com'
u.save()
# Check that we have an alert summary at the top
response = self.app.get(update_page)
self.assertTrue(find_alert(response), check_str)
def test_no_error_alert_for_all_information(self):
# This user has all the information
get_user_model().objects.create_user(
userid='user@0001.com',
name='User 0001',
best_way_to_find='In the kitchen',
best_way_to_contact='By phone',
phone='01777777',
email='test@test.com',
)
# Log in as user
form = self.app.get(reverse('login')).form
form['userid'] = 'user@0001.com'
response = form.submit()
# Now go to the update user information page for this user-detail
response = self.app.get(reverse(
'user-updateprofile',
kwargs={'slug': 'user0001com'}))
# Check that we don't have an error or alert summary
self.assertFalse(
response.html.find(
'h1',
attrs={'class': 'error-summary-heading'}
)
)
self.assertFalse(
response.html.find(
'h1',
attrs={'class': 'alert-summary-heading'}
)
)
def test_can_see_own_update_profile_page(self):
# Create the user
get_user_model().objects.create_user(userid='user@0001.com')
# Log in as user
form = self.app.get(reverse('login')).form
form['userid'] = 'user@0001.com'
form.submit()
# Go view the update profile page, it should be a form
response = self.app.get(reverse(
'user-updateprofile',
kwargs={'slug': 'user0001com'}))
self.assertEquals(
response.html.find(
'h1',
attrs={'class': 'form-title'}
).get_text(strip=True),
'Update profileYour personal details'
)
def test_cant_see_other_update_profile_page(self):
# Create the users
get_user_model().objects.create_user(userid='user@0001.com')
get_user_model().objects.create_user(userid='user@0002.com')
# Log in as 1st user.
form = self.app.get(reverse('login')).form
form['userid'] = 'user@0001.com'
form.submit()
# Try to go to the update profile page for the 2nd user.
response = self.app.get(reverse(
'user-updateprofile',
kwargs={'slug': 'user0002com'}))
# We should be getting a redirect :)
self.assertEquals(response.status_int, 302)
|
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Stardust Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import StardustTestFramework
from test_framework.util import *
import time
'''
Test behavior of -maxuploadtarget.
* Verify that getdata requests for old blocks (>1week) are dropped
if uploadtarget has been reached.
* Verify that getdata requests for recent blocks are respecteved even
if uploadtarget has been reached.
* Verify that the upload counters are reset after 24 hours.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
self.block_receive_map = {}
def add_connection(self, conn):
self.connection = conn
self.peer_disconnected = False
def on_inv(self, conn, message):
pass
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
def on_block(self, conn, message):
message.block.calc_sha256()
try:
self.block_receive_map[message.block.sha256] += 1
except KeyError as e:
self.block_receive_map[message.block.sha256] = 1
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
def veracked():
return self.verack_received
return wait_until(veracked, timeout=10)
def wait_for_disconnect(self):
def disconnected():
return self.peer_disconnected
return wait_until(disconnected, timeout=10)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
def on_close(self, conn):
self.peer_disconnected = True
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
def received_pong():
return (self.last_pong.nonce == self.ping_counter)
self.connection.send_message(msg_ping(nonce=self.ping_counter))
success = wait_until(received_pong, timeout)
self.ping_counter += 1
return success
class MaxUploadTest(StardustTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("STARDUSTD", "stardustd"),
help="stardustd binary to test")
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
self.utxo = []
self.txouts = gen_return_txouts()
def setup_network(self):
# Start a node with maxuploadtarget of 200 MB (/24h)
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-maxuploadtarget=800", "-blockmaxsize=999000"]))
def mine_full_block(self, node, address):
# Want to create a full block
# We'll generate a 66k transaction below, and 14 of them is close to the 1MB block limit
for j in range(14):
if len(self.utxo) < 14:
self.utxo = node.listunspent()
inputs=[]
outputs = {}
t = self.utxo.pop()
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
remchange = t["amount"] - Decimal("0.001000")
outputs[address]=remchange
# Create a basic transaction that will send change back to ourself after account for a fee
# And then insert the 128 generated transaction outs in the middle rawtx[92] is where the #
# of txouts is stored and is the only thing we overwrite from the original transaction
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + self.txouts
newtx = newtx + rawtx[94:]
# Appears to be ever so slightly faster to sign with SIGHASH_NONE
signresult = node.signrawtransaction(newtx,None,None,"NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
# Mine a full sized block which will be these transactions we just created
node.generate(1)
def run_test(self):
# Before we connect anything, we first set the time on the node
# to be in the past, otherwise things break because the CNode
# time counters can't be reset backward after initialization
old_time = int(time.time() - 2*60*60*24*7)
self.nodes[0].setmocktime(old_time)
# Generate some old blocks
self.nodes[0].generate(130)
# test_nodes[0] will only request old blocks
# test_nodes[1] will only request new blocks
# test_nodes[2] will test resetting the counters
test_nodes = []
connections = []
for i in range(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start() # Start up network handling in another thread
[x.wait_for_verack() for x in test_nodes]
# Test logic begins here
# Now mine a big block
self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())
# Store the hash; we'll request this later
big_old_block = self.nodes[0].getbestblockhash()
old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
big_old_block = int(big_old_block, 16)
# Advance to two days ago
self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)
# Mine one more block, so that the prior block looks old
self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())
# We'll be requesting this new block too
big_new_block = self.nodes[0].getbestblockhash()
new_block_size = self.nodes[0].getblock(big_new_block)['size']
big_new_block = int(big_new_block, 16)
# test_nodes[0] will test what happens if we just keep requesting the
# the same big old block too many times (expect: disconnect)
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, big_old_block))
max_bytes_per_day = 800*1024*1024
daily_buffer = 144 * 4000000
max_bytes_available = max_bytes_per_day - daily_buffer
success_count = max_bytes_available // old_block_size
# 576MB will be reserved for relaying new blocks, so expect this to
# succeed for ~235 tries.
for i in range(success_count):
test_nodes[0].send_message(getdata_request)
test_nodes[0].sync_with_ping()
assert_equal(test_nodes[0].block_receive_map[big_old_block], i+1)
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
# At most a couple more tries should succeed (depending on how long
# the test has been running so far).
for i in range(3):
test_nodes[0].send_message(getdata_request)
test_nodes[0].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
print("Peer 0 disconnected after downloading old block too many times")
# Requesting the current block on test_nodes[1] should succeed indefinitely,
# even when over the max upload target.
# We'll try 800 times
getdata_request.inv = [CInv(2, big_new_block)]
for i in range(800):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
print("Peer 1 able to repeatedly download new block")
# But if test_nodes[1] tries for an old block, it gets disconnected too.
getdata_request.inv = [CInv(2, big_old_block)]
test_nodes[1].send_message(getdata_request)
test_nodes[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 1)
print("Peer 1 disconnected after trying to download old block")
print("Advancing system time on node to clear counters...")
# If we advance the time by 24 hours, then the counters should reset,
# and test_nodes[2] should be able to retrieve the old block.
self.nodes[0].setmocktime(int(time.time()))
test_nodes[2].sync_with_ping()
test_nodes[2].send_message(getdata_request)
test_nodes[2].sync_with_ping()
assert_equal(test_nodes[2].block_receive_map[big_old_block], 1)
print("Peer 2 able to download old block")
[c.disconnect_node() for c in connections]
#stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
print("Restarting nodes with -whitelist=127.0.0.1")
stop_node(self.nodes[0], 0)
self.nodes[0] = start_node(0, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000"])
#recreate/reconnect 3 test nodes
test_nodes = []
connections = []
for i in range(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start() # Start up network handling in another thread
[x.wait_for_verack() for x in test_nodes]
#retrieve 20 blocks which should be enough to break the 1MB limit
getdata_request.inv = [CInv(2, big_new_block)]
for i in range(20):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
getdata_request.inv = [CInv(2, big_old_block)]
test_nodes[1].send_message(getdata_request)
test_nodes[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 3) #node is still connected because of the whitelist
print("Peer 1 still connected after trying to download old block (whitelisted)")
[c.disconnect_node() for c in connections]
if __name__ == '__main__':
MaxUploadTest().main()
|
|
"""Support for vacuum cleaner robots (botvacs)."""
from datetime import timedelta
from functools import partial
import logging
import voluptuous as vol
from homeassistant.components import group
from homeassistant.const import ( # noqa: F401 # STATE_PAUSED/IDLE are API
ATTR_BATTERY_LEVEL,
ATTR_COMMAND,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
STATE_PAUSED,
STATE_IDLE,
)
from homeassistant.loader import bind_hass
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import ( # noqa: F401
ENTITY_SERVICE_SCHEMA,
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
)
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.entity import ToggleEntity, Entity
from homeassistant.helpers.icon import icon_for_battery_level
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
DOMAIN = "vacuum"
SCAN_INTERVAL = timedelta(seconds=20)
GROUP_NAME_ALL_VACUUMS = "all vacuum cleaners"
ENTITY_ID_ALL_VACUUMS = group.ENTITY_ID_FORMAT.format("all_vacuum_cleaners")
ATTR_BATTERY_ICON = "battery_icon"
ATTR_CLEANED_AREA = "cleaned_area"
ATTR_FAN_SPEED = "fan_speed"
ATTR_FAN_SPEED_LIST = "fan_speed_list"
ATTR_PARAMS = "params"
ATTR_STATUS = "status"
SERVICE_CLEAN_SPOT = "clean_spot"
SERVICE_LOCATE = "locate"
SERVICE_RETURN_TO_BASE = "return_to_base"
SERVICE_SEND_COMMAND = "send_command"
SERVICE_SET_FAN_SPEED = "set_fan_speed"
SERVICE_START_PAUSE = "start_pause"
SERVICE_START = "start"
SERVICE_PAUSE = "pause"
SERVICE_STOP = "stop"
VACUUM_SET_FAN_SPEED_SERVICE_SCHEMA = ENTITY_SERVICE_SCHEMA.extend(
{vol.Required(ATTR_FAN_SPEED): cv.string}
)
VACUUM_SEND_COMMAND_SERVICE_SCHEMA = ENTITY_SERVICE_SCHEMA.extend(
{
vol.Required(ATTR_COMMAND): cv.string,
vol.Optional(ATTR_PARAMS): vol.Any(dict, cv.ensure_list),
}
)
STATE_CLEANING = "cleaning"
STATE_DOCKED = "docked"
STATE_RETURNING = "returning"
STATE_ERROR = "error"
STATES = [STATE_CLEANING, STATE_DOCKED, STATE_RETURNING, STATE_ERROR]
DEFAULT_NAME = "Vacuum cleaner robot"
SUPPORT_TURN_ON = 1
SUPPORT_TURN_OFF = 2
SUPPORT_PAUSE = 4
SUPPORT_STOP = 8
SUPPORT_RETURN_HOME = 16
SUPPORT_FAN_SPEED = 32
SUPPORT_BATTERY = 64
SUPPORT_STATUS = 128
SUPPORT_SEND_COMMAND = 256
SUPPORT_LOCATE = 512
SUPPORT_CLEAN_SPOT = 1024
SUPPORT_MAP = 2048
SUPPORT_STATE = 4096
SUPPORT_START = 8192
@bind_hass
def is_on(hass, entity_id=None):
"""Return if the vacuum is on based on the statemachine."""
entity_id = entity_id or ENTITY_ID_ALL_VACUUMS
return hass.states.is_state(entity_id, STATE_ON)
async def async_setup(hass, config):
"""Set up the vacuum component."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL, GROUP_NAME_ALL_VACUUMS
)
await component.async_setup(config)
component.async_register_entity_service(
SERVICE_TURN_ON, ENTITY_SERVICE_SCHEMA, "async_turn_on"
)
component.async_register_entity_service(
SERVICE_TURN_OFF, ENTITY_SERVICE_SCHEMA, "async_turn_off"
)
component.async_register_entity_service(
SERVICE_TOGGLE, ENTITY_SERVICE_SCHEMA, "async_toggle"
)
component.async_register_entity_service(
SERVICE_START_PAUSE, ENTITY_SERVICE_SCHEMA, "async_start_pause"
)
component.async_register_entity_service(
SERVICE_START, ENTITY_SERVICE_SCHEMA, "async_start"
)
component.async_register_entity_service(
SERVICE_PAUSE, ENTITY_SERVICE_SCHEMA, "async_pause"
)
component.async_register_entity_service(
SERVICE_RETURN_TO_BASE, ENTITY_SERVICE_SCHEMA, "async_return_to_base"
)
component.async_register_entity_service(
SERVICE_CLEAN_SPOT, ENTITY_SERVICE_SCHEMA, "async_clean_spot"
)
component.async_register_entity_service(
SERVICE_LOCATE, ENTITY_SERVICE_SCHEMA, "async_locate"
)
component.async_register_entity_service(
SERVICE_STOP, ENTITY_SERVICE_SCHEMA, "async_stop"
)
component.async_register_entity_service(
SERVICE_SET_FAN_SPEED,
VACUUM_SET_FAN_SPEED_SERVICE_SCHEMA,
"async_set_fan_speed",
)
component.async_register_entity_service(
SERVICE_SEND_COMMAND, VACUUM_SEND_COMMAND_SERVICE_SCHEMA, "async_send_command"
)
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
class _BaseVacuum(Entity):
"""Representation of a base vacuum.
Contains common properties and functions for all vacuum devices.
"""
@property
def supported_features(self):
"""Flag vacuum cleaner features that are supported."""
raise NotImplementedError()
@property
def battery_level(self):
"""Return the battery level of the vacuum cleaner."""
return None
@property
def fan_speed(self):
"""Return the fan speed of the vacuum cleaner."""
return None
@property
def fan_speed_list(self):
"""Get the list of available fan speed steps of the vacuum cleaner."""
raise NotImplementedError()
def stop(self, **kwargs):
"""Stop the vacuum cleaner."""
raise NotImplementedError()
async def async_stop(self, **kwargs):
"""Stop the vacuum cleaner.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(partial(self.stop, **kwargs))
def return_to_base(self, **kwargs):
"""Set the vacuum cleaner to return to the dock."""
raise NotImplementedError()
async def async_return_to_base(self, **kwargs):
"""Set the vacuum cleaner to return to the dock.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(partial(self.return_to_base, **kwargs))
def clean_spot(self, **kwargs):
"""Perform a spot clean-up."""
raise NotImplementedError()
async def async_clean_spot(self, **kwargs):
"""Perform a spot clean-up.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(partial(self.clean_spot, **kwargs))
def locate(self, **kwargs):
"""Locate the vacuum cleaner."""
raise NotImplementedError()
async def async_locate(self, **kwargs):
"""Locate the vacuum cleaner.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(partial(self.locate, **kwargs))
def set_fan_speed(self, fan_speed, **kwargs):
"""Set fan speed."""
raise NotImplementedError()
async def async_set_fan_speed(self, fan_speed, **kwargs):
"""Set fan speed.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(
partial(self.set_fan_speed, fan_speed, **kwargs)
)
def send_command(self, command, params=None, **kwargs):
"""Send a command to a vacuum cleaner."""
raise NotImplementedError()
async def async_send_command(self, command, params=None, **kwargs):
"""Send a command to a vacuum cleaner.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(
partial(self.send_command, command, params=params, **kwargs)
)
class VacuumDevice(_BaseVacuum, ToggleEntity):
"""Representation of a vacuum cleaner robot."""
@property
def status(self):
"""Return the status of the vacuum cleaner."""
return None
@property
def battery_icon(self):
"""Return the battery icon for the vacuum cleaner."""
charging = False
if self.status is not None:
charging = "charg" in self.status.lower()
return icon_for_battery_level(
battery_level=self.battery_level, charging=charging
)
@property
def state_attributes(self):
"""Return the state attributes of the vacuum cleaner."""
data = {}
if self.status is not None:
data[ATTR_STATUS] = self.status
if self.battery_level is not None:
data[ATTR_BATTERY_LEVEL] = self.battery_level
data[ATTR_BATTERY_ICON] = self.battery_icon
if self.fan_speed is not None:
data[ATTR_FAN_SPEED] = self.fan_speed
data[ATTR_FAN_SPEED_LIST] = self.fan_speed_list
return data
def turn_on(self, **kwargs):
"""Turn the vacuum on and start cleaning."""
raise NotImplementedError()
async def async_turn_on(self, **kwargs):
"""Turn the vacuum on and start cleaning.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(partial(self.turn_on, **kwargs))
def turn_off(self, **kwargs):
"""Turn the vacuum off stopping the cleaning and returning home."""
raise NotImplementedError()
async def async_turn_off(self, **kwargs):
"""Turn the vacuum off stopping the cleaning and returning home.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(partial(self.turn_off, **kwargs))
def start_pause(self, **kwargs):
"""Start, pause or resume the cleaning task."""
raise NotImplementedError()
async def async_start_pause(self, **kwargs):
"""Start, pause or resume the cleaning task.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(partial(self.start_pause, **kwargs))
async def async_pause(self):
"""Not supported."""
pass
async def async_start(self):
"""Not supported."""
pass
class StateVacuumDevice(_BaseVacuum):
"""Representation of a vacuum cleaner robot that supports states."""
@property
def state(self):
"""Return the state of the vacuum cleaner."""
return None
@property
def battery_icon(self):
"""Return the battery icon for the vacuum cleaner."""
charging = bool(self.state == STATE_DOCKED)
return icon_for_battery_level(
battery_level=self.battery_level, charging=charging
)
@property
def state_attributes(self):
"""Return the state attributes of the vacuum cleaner."""
data = {}
if self.battery_level is not None:
data[ATTR_BATTERY_LEVEL] = self.battery_level
data[ATTR_BATTERY_ICON] = self.battery_icon
if self.fan_speed is not None:
data[ATTR_FAN_SPEED] = self.fan_speed
data[ATTR_FAN_SPEED_LIST] = self.fan_speed_list
return data
def start(self):
"""Start or resume the cleaning task."""
raise NotImplementedError()
async def async_start(self):
"""Start or resume the cleaning task.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(self.start)
def pause(self):
"""Pause the cleaning task."""
raise NotImplementedError()
async def async_pause(self):
"""Pause the cleaning task.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(self.pause)
async def async_turn_on(self, **kwargs):
"""Not supported."""
pass
async def async_turn_off(self, **kwargs):
"""Not supported."""
pass
async def async_toggle(self, **kwargs):
"""Not supported."""
pass
|
|
# Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from robot.parsing.settings import Documentation, MetadataList
class Populator(object):
"""Explicit interface for all populators."""
def add(self, row): raise NotImplementedError()
def populate(self): raise NotImplementedError()
class CommentCacher(object):
def __init__(self):
self._init_comments()
def _init_comments(self):
self._comments = []
def add(self, comment):
self._comments.append(comment)
def consume_comments_with(self, function):
for c in self._comments:
function(c)
self._init_comments()
class _TablePopulator(Populator):
def __init__(self, table):
self._table = table
self._populator = NullPopulator()
self._comments = CommentCacher()
def add(self, row):
if self._is_cacheable_comment_row(row):
self._comments.add(row)
else:
self._add(row)
def _add(self, row):
if not self._is_continuing(row):
self._populator.populate()
self._populator = self._get_populator(row)
self._comments.consume_comments_with(self._populator.add)
self._populator.add(row)
def populate(self):
self._comments.consume_comments_with(self._populator.add)
self._populator.populate()
def _is_continuing(self, row):
return row.is_continuing() and self._populator
def _is_cacheable_comment_row(self, row):
return row.is_commented()
class SettingTablePopulator(_TablePopulator):
def _get_populator(self, row):
row.handle_old_style_metadata()
setter = self._table.get_setter(row.head)
if not setter:
return NullPopulator()
if setter.im_class is Documentation:
return DocumentationPopulator(setter)
if setter.im_class is MetadataList:
return MetadataPopulator(setter)
return SettingPopulator(setter)
class VariableTablePopulator(_TablePopulator):
def _get_populator(self, row):
return VariablePopulator(self._table.add, row.head)
class _StepContainingTablePopulator(_TablePopulator):
def _is_continuing(self, row):
return row.is_indented() and self._populator or row.is_commented()
def _is_cacheable_comment_row(self, row):
return row.is_commented() and isinstance(self._populator, NullPopulator)
class TestTablePopulator(_StepContainingTablePopulator):
def _get_populator(self, row):
return TestCasePopulator(self._table.add)
class KeywordTablePopulator(_StepContainingTablePopulator):
def _get_populator(self, row):
return UserKeywordPopulator(self._table.add)
class ForLoopPopulator(Populator):
def __init__(self, for_loop_creator):
self._for_loop_creator = for_loop_creator
self._loop = None
self._populator = NullPopulator()
self._declaration = []
def add(self, row):
dedented_row = row.dedent()
if not self._loop:
declaration_ready = self._populate_declaration(row)
if not declaration_ready:
return
self._loop = self._for_loop_creator(self._declaration)
if not row.is_continuing():
self._populator.populate()
self._populator = StepPopulator(self._loop.add_step)
self._populator.add(dedented_row)
def _populate_declaration(self, row):
if row.starts_for_loop() or row.is_continuing():
self._declaration.extend(row.dedent().data)
return False
return True
def populate(self):
if not self._loop:
self._for_loop_creator(self._declaration)
self._populator.populate()
class _TestCaseUserKeywordPopulator(Populator):
def __init__(self, test_or_uk_creator):
self._test_or_uk_creator = test_or_uk_creator
self._test_or_uk = None
self._populator = NullPopulator()
self._comments = CommentCacher()
def add(self, row):
if row.is_commented():
self._comments.add(row)
return
if not self._test_or_uk:
self._test_or_uk = self._test_or_uk_creator(row.head)
dedented_row = row.dedent()
if dedented_row:
self._handle_data_row(dedented_row)
def _handle_data_row(self, row):
if not self._continues(row):
self._populator.populate()
self._populator = self._get_populator(row)
self._flush_comments_with(self._populate_comment_row)
else:
self._flush_comments_with(self._populator.add)
self._populator.add(row)
def _populate_comment_row(self, crow):
populator = StepPopulator(self._test_or_uk.add_step)
populator.add(crow)
populator.populate()
def _flush_comments_with(self, function):
self._comments.consume_comments_with(function)
def populate(self):
self._populator.populate()
self._flush_comments_with(self._populate_comment_row)
def _get_populator(self, row):
if row.starts_test_or_user_keyword_setting():
setter = self._setting_setter(row)
if not setter:
return NullPopulator()
if setter.im_class is Documentation:
return DocumentationPopulator(setter)
return SettingPopulator(setter)
if row.starts_for_loop():
return ForLoopPopulator(self._test_or_uk.add_for_loop)
return StepPopulator(self._test_or_uk.add_step)
def _continues(self, row):
return row.is_continuing() and self._populator or \
(isinstance(self._populator, ForLoopPopulator) and row.is_indented())
def _setting_setter(self, row):
setting_name = row.test_or_user_keyword_setting_name()
return self._test_or_uk.get_setter(setting_name)
class TestCasePopulator(_TestCaseUserKeywordPopulator):
_item_type = 'test case'
class UserKeywordPopulator(_TestCaseUserKeywordPopulator):
_item_type = 'keyword'
class Comments(object):
def __init__(self):
self._comments = []
def add(self, row):
if row.comments:
self._comments.extend(c.strip() for c in row.comments if c.strip())
@property
def value(self):
return self._comments
class _PropertyPopulator(Populator):
def __init__(self, setter):
self._setter = setter
self._value = []
self._comments = Comments()
def add(self, row):
if not row.is_commented():
self._add(row)
self._comments.add(row)
def _add(self, row):
self._value.extend(row.dedent().data)
class VariablePopulator(_PropertyPopulator):
def __init__(self, setter, name):
_PropertyPopulator.__init__(self, setter)
self._name = name
def populate(self):
self._setter(self._name, self._value,
self._comments.value)
class SettingPopulator(_PropertyPopulator):
def populate(self):
self._setter(self._value, self._comments.value)
class DocumentationPopulator(_PropertyPopulator):
_end_of_line_escapes = re.compile(r'(\\+)n?$')
def populate(self):
self._setter(self._value, self._comments.value)
def _add(self, row):
self._add_to_value(row.dedent().data)
def _add_to_value(self, data):
joiner = self._row_joiner()
if joiner:
self._value.append(joiner)
self._value.append(' '.join(data))
def _row_joiner(self):
if self._is_empty():
return None
return self._joiner_based_on_eol_escapes()
def _is_empty(self):
return not self._value or \
(len(self._value) == 1 and self._value[0] == '')
def _joiner_based_on_eol_escapes(self):
match = self._end_of_line_escapes.search(self._value[-1])
if not match or len(match.group(1)) % 2 == 0:
return '\\n'
if not match.group(0).endswith('n'):
return ' '
return None
class MetadataPopulator(DocumentationPopulator):
def __init__(self, setter):
_PropertyPopulator.__init__(self, setter)
self._name = None
def populate(self):
self._setter(self._name, self._value, self._comments.value)
def _add(self, row):
data = row.dedent().data
if self._name is None:
self._name = data[0] if data else ''
data = data[1:]
self._add_to_value(data)
class StepPopulator(_PropertyPopulator):
def _add(self, row):
self._value.extend(row.data)
def populate(self):
if self._value or self._comments:
self._setter(self._value, self._comments.value)
class NullPopulator(Populator):
def add(self, row): pass
def populate(self): pass
def __nonzero__(self): return False
|
|
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is Sync Server
#
# The Initial Developer of the Original Code is the Mozilla Foundation.
# Portions created by the Initial Developer are Copyright (C) 2010
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Tarek Ziade (tarek@mozilla.com)
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
import sys
import threading
try:
import syslog
_SYSLOG_OPTIONS = {'PID': syslog.LOG_PID,
'CONS': syslog.LOG_CONS,
'NDELAY': syslog.LOG_NDELAY,
'NOWAIT': syslog.LOG_NOWAIT,
'PERROR': syslog.LOG_PERROR}
_SYSLOG_PRIORITY = {'EMERG': syslog.LOG_EMERG,
'ALERT': syslog.LOG_ALERT,
'CRIT': syslog.LOG_CRIT,
'ERR': syslog.LOG_ERR,
'WARNING': syslog.LOG_WARNING,
'NOTICE': syslog.LOG_NOTICE,
'INFO': syslog.LOG_INFO,
'DEBUG': syslog.LOG_DEBUG}
_SYSLOG_FACILITY = {'KERN': syslog.LOG_KERN,
'USER': syslog.LOG_USER,
'MAIL': syslog.LOG_MAIL,
'DAEMON': syslog.LOG_DAEMON,
'AUTH': syslog.LOG_AUTH,
'LPR': syslog.LOG_LPR,
'NEWS': syslog.LOG_NEWS,
'UUCP': syslog.LOG_UUCP,
'CRON': syslog.LOG_CRON,
'LOCAL0': syslog.LOG_LOCAL0,
'LOCAL1': syslog.LOG_LOCAL1,
'LOCAL2': syslog.LOG_LOCAL2,
'LOCAL3': syslog.LOG_LOCAL3,
'LOCAL4': syslog.LOG_LOCAL4,
'LOCAL5': syslog.LOG_LOCAL5,
'LOCAL6': syslog.LOG_LOCAL6,
'LOCAL7': syslog.LOG_LOCAL7}
SYSLOG = True
except ImportError:
_SYSLOG_OPTIONS = _SYSLOG_PRIORITY = _SYSLOG_FACILITY = None
SYSLOG = False
import logging
import socket
from time import strftime
import re
try:
from services import logger
except ImportError:
logger = logging.getLogger('CEF') # NOQA
_HOST = socket.gethostname()
_MAXLEN = 1024
# pre-defined signatures
AUTH_FAILURE = 'AuthFail'
CAPTCHA_FAILURE = 'CaptchaFail'
OVERRIDE_FAILURE = 'InvalidAdmin'
ACCOUNT_LOCKED = 'AccountLockout'
PASSWD_RESET_CLR = 'PasswordResetCleared'
_CEF_FORMAT = ('%(date)s %(host)s CEF:%(version)s|%(vendor)s|%(product)s|'
'%(device_version)s|%(signature)s|%(name)s|%(severity)s|'
'cs1Label=requestClientApplication cs1=%(user_agent)s '
'requestMethod=%(method)s request=%(url)s '
'src=%(source)s dest=%(dest)s suser=%(suser)s')
_EXTENSIONS = ['cs1Label', 'cs1', 'requestMethod', 'request', 'src', 'dest',
'suser']
_PREFIX = re.compile(r'([|\\\r\n])')
_EXTENSION = re.compile(r'([\\=])')
_KEY = re.compile(r'^[a-zA-Z0-9_\-.]+$')
def _get_source_ip(environ):
"""Extracts the source IP from the environ."""
if 'HTTP_X_FORWARDED_FOR' in environ:
return environ['HTTP_X_FORWARDED_FOR'].split(',')[0].strip()
elif 'REMOTE_ADDR' in environ:
return environ['REMOTE_ADDR']
return None
def _to_str(data):
"""Converts to str, encoding unicode strings with utf8"""
if isinstance(data, unicode):
return data.encode('utf8')
return str(data)
def _convert_prefix(data):
"""Escapes | and = and convert to utf8 string"""
data = _to_str(data)
return _PREFIX.sub(r'\\\1', data)
def _convert_ext(data):
"""Escapes | and = and convert to utf8 string"""
data = _to_str(data)
return _EXTENSION.sub(r'\\\1', data)
_LOG_OPENED = None
# will make log writing atomic per-process
# unfortunately this will not work when several process uses it
# so lines might get mixed on high loads.
# we would need a dedicated per-server log service for this
# to serialize all logs
_log_lock = threading.RLock()
def _syslog(msg, config):
"""Opens the log with configured options and logs."""
logopt = _str2logopt(config.get('syslog_options'))
facility = _str2facility(config.get('syslog_facility'))
ident = config.get('syslog_ident', sys.argv[0])
priority = _str2priority(config.get('syslog.priority'))
with _log_lock:
global _LOG_OPENED
if _LOG_OPENED != (ident, logopt, facility):
syslog.openlog(ident, logopt, facility)
_LOG_OPENED = ident, logopt, facility
syslog.syslog(priority, msg)
def _str2logopt(value):
if value is None:
return 0
res = 0
for option in value.split(','):
res = res | _SYSLOG_OPTIONS[option.strip()]
return res
def _str2priority(value):
if value is None:
return syslog.LOG_INFO
return _SYSLOG_PRIORITY[value.strip()]
def _str2facility(value):
if value is None:
return syslog.LOG_LOCAL4
return _SYSLOG_FACILITY[value.strip()]
def _check_key(key):
if _KEY.match(key) is not None:
return key
msg = 'The "%s" key contains illegal characters' % key
logger.warning(msg)
# replacing illegal characters with a '?'
return _KEY.sub('?', key)
def _filter_params(namespace, data, replace_dot='_', splitchar='.'):
"""Keeps only params that starts with the namespace.
"""
params = {}
for key, value in data.items():
if splitchar not in key:
continue
skey = key.split(splitchar)
if skey[0] != namespace:
continue
params[replace_dot.join(skey[1:])] = value
return params
def _get_fields(name, severity, environ, config, username=None,
signature=None, **kw):
name = _convert_prefix(name)
if signature is None:
signature = name
else:
signature = _convert_prefix(signature)
severity = _convert_prefix(severity)
source = _get_source_ip(environ)
fields = {'severity': severity,
'source': source,
'method': _convert_ext(environ['REQUEST_METHOD']),
'url': _convert_ext(environ['PATH_INFO']),
'dest': _convert_ext(environ.get('HTTP_HOST', u'none')),
'user_agent': _convert_ext(environ.get('HTTP_USER_AGENT',
u'none')),
'signature': signature,
'name': name,
'version': config['version'],
'vendor': config['vendor'],
'device_version': config['device_version'],
'product': config['product'],
'host': _HOST,
'suser': username,
'date': strftime("%b %d %H:%M:%S")}
# make sure we don't have a | anymore in regular fields
for key, value in list(kw.items()):
new_key = _check_key(key)
if new_key == key:
continue
kw[new_key] = value
del kw[key]
# overriding with provided datas
fields.update(kw)
return fields
def _format_msg(fields, kw, maxlen=_MAXLEN):
# adding custom extensions
# sorting by size
msg = _CEF_FORMAT % fields
extensions = [(len(str(value)), len(key), key, value)
for key, value in kw.items()
if key not in _EXTENSIONS]
extensions.sort()
msg_len = len(msg)
for value_len, key_len, key, value in extensions:
added_len = value_len + key_len + 2
value = _convert_ext(value)
key = _check_key(key)
if maxlen and msg_len + added_len > maxlen:
# msg is too big.
warn = 'CEF Message too big. %s %s' % (msg, str(kw.items()))
logger.warning(warn)
break
msg += ' %s=%s' % (key, value)
msg_len += added_len
return msg
def log_cef(name, severity, environ, config, username='none',
signature=None, **kw):
"""Creates a CEF record, and emit it in syslog or another file.
Args:
- name: name to log
- severity: integer from 0 to 10
- environ: the WSGI environ object
- config: configuration dict
- signature: CEF signature code - defaults to name value
- username: user name - defaults to 'none'
- extra keywords: extra keys used in the CEF extension
"""
config = _filter_params('cef', config)
fields = _get_fields(name, severity, environ, config, username=username,
signature=signature, **kw)
msg = _format_msg(fields, kw)
if config['file'] == 'syslog':
if not SYSLOG:
raise ValueError('syslog not supported on this platform')
_syslog(msg, config)
else:
with _log_lock:
with open(config['file'], 'a') as f:
f.write('%s\n' % msg)
LEVEL_MAP = {
logging.DEBUG: syslog.LOG_DEBUG,
logging.WARNING: syslog.LOG_WARNING,
logging.INFO: syslog.LOG_INFO,
logging.ERROR: syslog.LOG_ERR,
logging.CRITICAL: syslog.LOG_CRIT,
}
class _Formatter(logging.Formatter):
def format(self, record):
kw = record.args
fields = _get_fields(record.msg, kw['severity'], kw['environ'],
{'version': kw.get('version', 0),
'vendor': kw.get('vendor', 'Mozilla'),
'device_version': kw.get('device_version', '1'),
'product': kw.get('product', 'Mozilla')},
username=kw.get('username'),
signature=kw.get('signature'))
datefmt = getattr(self, 'datefmt', None)
if not datefmt:
datefmt = '%H:%M:%s'
fields['date'] = strftime(datefmt)
return _format_msg(fields, kw['data'], maxlen=kw.get('maxlen'))
class SysLogFormatter(_Formatter):
def format(self, record):
record.args['severity'] = LEVEL_MAP[record.levelno]
return _Formatter.format(self, record)
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2015, Anima Istanbul
#
# This module is part of anima-tools and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
import pymel.core as pm
class RenderSlicer(object):
"""A tool to help slice single frame renders in to many little parts which
will help it to be rendered in small parts in a render farm.
"""
def __init__(self, camera=None):
self._camera = None
self.camera = camera
@property
def slices_in_x(self):
"""getter for _slices_in_x attribute
"""
return self.camera.slicesInX.get()
@slices_in_x.setter
def slices_in_x(self, slices_in_x):
"""setter for _slices_in_x attribute
"""
self.camera.slicesInX.set(self._validate_slices_in_x(slices_in_x))
@classmethod
def _validate_slices_in_x(cls, slices_in_x):
"""validates the slices_in_x value
"""
if not isinstance(slices_in_x, int):
raise TypeError(
"%s.slices_in_x should be a non-zero positive integer, not %s"
% (cls.__name__, slices_in_x.__class__.__name__)
)
if slices_in_x <= 0:
raise ValueError(
"%s.slices_in_x should be a non-zero positive integer" %
cls.__name__
)
return slices_in_x
@property
def slices_in_y(self):
"""getter for _slices_in_y attribute
"""
return self.camera.slicesInY.get()
@slices_in_y.setter
def slices_in_y(self, slices_in_y):
"""setter for _slices_in_y attribute
"""
self.camera.slicesInY.set(self._validate_slices_in_y(slices_in_y))
@classmethod
def _validate_slices_in_y(cls, slices_in_y):
"""validates the slices_in_y value
"""
if not isinstance(slices_in_y, int):
raise TypeError(
"%s.slices_in_y should be a non-zero positive integer, not %s"
% (cls.__name__, slices_in_y.__class__.__name__)
)
if slices_in_y <= 0:
raise ValueError(
"%s.slices_in_y should be a non-zero positive integer" %
cls.__name__
)
return slices_in_y
@property
def camera(self):
"""getter for the _camera attribute
"""
return self._camera
@camera.setter
def camera(self, camera):
"""setter for the _camera attribute
:param camera: A Maya camera
:return: None
"""
camera = self._validate_camera(camera)
self._create_data_attributes(camera)
self._camera = camera
@classmethod
def _validate_camera(cls, camera):
"""validates the given camera
"""
if camera is None:
raise TypeError("Please supply a Maya camera")
if not isinstance(camera, pm.nt.Camera):
raise TypeError(
"%s.camera should be a Maya camera, not %s" % (
cls.__name__,
camera.__class__.__name__
)
)
return camera
@classmethod
def _create_data_attributes(cls, camera):
"""creates slicer data attributes inside the camera
:param pm.nt.Camera camera: A maya camera
"""
# store the original resolution
# slices in x
# slices in y
# is_sliced
# non_sliced_resolution_x
# non_sliced_resolution_y
# slices_in_x
# slices_in_y
if not camera.hasAttr('isSliced'):
camera.addAttr('isSliced', at='bool')
if not camera.hasAttr('nonSlicedResolutionX'):
camera.addAttr('nonSlicedResolutionX', at='short')
if not camera.hasAttr('nonSlicedResolutionY'):
camera.addAttr('nonSlicedResolutionY', at='short')
if not camera.hasAttr('slicesInX'):
camera.addAttr('slicesInX', at='short')
if not camera.hasAttr('slicesInY'):
camera.addAttr('slicesInY', at='short')
def _store_data(self):
"""stores slicer data inside the camera
"""
self._create_data_attributes(self.camera)
self.camera.isSliced.set(self.is_sliced)
# get the current render resolution
dres = pm.PyNode("defaultResolution")
width = dres.width.get()
height = dres.height.get()
self.camera.nonSlicedResolutionX.set(width)
self.camera.nonSlicedResolutionY.set(height)
self.camera.slicesInX.set(self.slices_in_x)
self.camera.slicesInY.set(self.slices_in_y)
@property
def is_sliced(self):
"""A shortcut for the camera.isSliced attribute
"""
if self.camera.hasAttr('isSliced'):
return self.camera.isSliced.get()
return False
@is_sliced.setter
def is_sliced(self, is_sliced):
"""A shortcut for the camera.isSliced attribute
"""
if not self.camera.hasAttr('isSliced'):
self._create_data_attributes(self.camera)
self.camera.isSliced.set(is_sliced)
def unslice(self):
"""resets the camera to original non-sliced state
"""
# unslice the camera
dres = pm.PyNode('defaultResolution')
# set the resolution to original
dres.width.set(self.camera.getAttr('nonSlicedResolutionX'))
dres.height.set(self.camera.getAttr('nonSlicedResolutionY'))
dres.pixelAspect.set(1)
self.camera.isSliced.set(False)
def unslice_scene(self):
"""scans the scene cameras and unslice the scene
"""
dres = pm.PyNode('defaultResolution')
dres.aspectLock.set(0)
# TODO: check multi sliced camera
for cam in pm.ls(type=pm.nt.Camera):
if cam.hasAttr('isSliced') and cam.isSliced.get():
dres.width.set(cam.nonSlicedResolutionX.get())
dres.height.set(cam.nonSlicedResolutionY.get())
dres.pixelAspect.set(1)
cam.isSliced.set(False)
def slice(self, slices_in_x, slices_in_y):
"""slices all renderable cameras
"""
# set render resolution
self.unslice_scene()
self.is_sliced = True
self._store_data()
sx = self.slices_in_x = slices_in_x
sy = self.slices_in_y = slices_in_y
# set render resolution
d_res = pm.PyNode("defaultResolution")
h_res = d_res.width.get()
v_res = d_res.height.get()
# this system only works when the
d_res.aspectLock.set(0)
d_res.pixelAspect.set(1)
d_res.width.set(h_res / float(sx))
d_res.pixelAspect.set(1)
d_res.height.set(v_res / float(sy))
d_res.pixelAspect.set(1)
# use h_aperture to calculate v_aperture
h_aperture = self.camera.getAttr('horizontalFilmAperture')
# recalculate the other aperture
v_aperture = h_aperture * v_res / h_res
self.camera.setAttr('verticalFilmAperture', v_aperture)
v_aperture = self.camera.getAttr('verticalFilmAperture')
self.camera.setAttr('zoom', 1.0/float(sx))
t = 0
for i in range(sy):
v_pan = v_aperture / (2.0 * sy) * (1 + 2 * i - sy)
for j in range(sx):
h_pan = h_aperture / (2.0 * sx) * (1 + 2 * j - sx)
pm.currentTime(t)
pm.setKeyframe(self.camera, at='horizontalPan', v=h_pan)
pm.setKeyframe(self.camera, at='verticalPan', v=v_pan)
t += 1
self.camera.panZoomEnabled.set(1)
self.camera.renderPanZoom.set(1)
d_res.pixelAspect.set(1)
def ui(self):
"""The UI for the slicer
"""
pass
|
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2021, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from collections import deque
from pytest import mark, skip
from py2neo.cypher import Cursor, Record
@mark.skip
def test_bolt_connection_pool_usage_for_autocommit(connector):
if "bolt" not in connector.scheme:
skip("Bolt tests are only valid for Bolt connectors")
pool = connector.pool
address = connector.connection_data["host"], connector.connection_data["port"]
n = len(pool.connections)
assert pool.in_use_connection_count(address) == 0
cursor = connector.auto_run("RETURN 1")
assert 1 <= len(pool.connections) <= n + 1
assert pool.in_use_connection_count(address) == 1
n = len(pool.connections)
cursor.summary()
assert len(pool.connections) == n
assert pool.in_use_connection_count(address) == 0
@mark.skip
def test_bolt_connection_reuse_for_autocommit(connector):
if "bolt" not in connector.scheme:
skip("Bolt tests are only valid for Bolt connectors")
pool = connector.pool
address = connector.connection_data["host"], connector.connection_data["port"]
n = len(pool.connections)
assert pool.in_use_connection_count(address) == 0
cursor = connector.auto_run("RETURN 1")
assert 1 <= len(pool.connections) <= n + 1
assert pool.in_use_connection_count(address) == 1
n = len(pool.connections)
cursor.summary()
assert len(pool.connections) == n
assert pool.in_use_connection_count(address) == 0
cursor = connector.auto_run("RETURN 1")
assert len(pool.connections) == n
assert pool.in_use_connection_count(address) == 1
cursor.summary()
assert len(pool.connections) == n
assert pool.in_use_connection_count(address) == 0
@mark.skip
def test_bolt_connection_pool_usage_for_begin_commit(connector):
if "bolt" not in connector.scheme:
skip("Bolt tests are only valid for Bolt connectors")
pool = connector.pool
address = connector.connection_data["host"], connector.connection_data["port"]
n = len(pool.connections)
assert pool.in_use_connection_count(address) == 0
tx = connector.begin()
assert 1 <= len(pool.connections) <= n + 1
assert pool.in_use_connection_count(address) == 1
n = len(pool.connections)
connector.commit(tx)
assert len(pool.connections) == n
assert pool.in_use_connection_count(address) == 0
@mark.skip
def test_bolt_connection_pool_usage_for_begin_rollback(connector):
if "bolt" not in connector.scheme:
skip("Bolt tests are only valid for Bolt connectors")
pool = connector.pool
address = connector.connection_data["host"], connector.connection_data["port"]
n = len(pool.connections)
assert pool.in_use_connection_count(address) == 0
tx = connector.begin()
assert 1 <= len(pool.connections) <= n + 1
assert pool.in_use_connection_count(address) == 1
n = len(pool.connections)
connector.rollback(tx)
assert len(pool.connections) == n
assert pool.in_use_connection_count(address) == 0
def test_keys(connector):
cursor = Cursor(connector.auto_run("RETURN 'Alice' AS name, 33 AS age"))
expected = ["name", "age"]
actual = cursor.keys()
assert expected == actual
def test_records(connector):
cursor = Cursor(
connector.auto_run("UNWIND range(1, $x) AS n RETURN n, n * n AS n_sq", {"x": 3}))
expected = deque([(1, 1), (2, 4), (3, 9)])
for actual_record in cursor:
expected_record = Record(["n", "n_sq"], expected.popleft())
assert expected_record == actual_record
def test_stats(connector):
cursor = Cursor(connector.auto_run("CREATE ()"))
stats = cursor.stats()
assert stats["nodes_created"] == 1
# def test_explain_plan(connector, neo4j_minor_version):
# cursor = Cursor(connector.run("EXPLAIN RETURN $x", {"x": 1}))
# expected = CypherPlan(
# operator_type='ProduceResults',
# identifiers=['$x'],
# children=[
# CypherPlan(
# operator_type='Projection',
# identifiers=['$x'],
# children=[],
# args={
# 'estimated_rows': 1.0,
# 'expressions': '{$x : $x}',
# },
# ),
# ],
# args={
# 'estimated_rows': 1.0,
# 'planner': 'COST',
# 'planner_impl': 'IDP',
# 'planner_version': neo4j_minor_version,
# 'runtime': 'COMPILED',
# 'runtime_impl': 'COMPILED',
# 'runtime_version': neo4j_minor_version,
# 'version': 'CYPHER %s' % neo4j_minor_version,
# },
# )
# actual = cursor.plan()
# assert expected == actual
# def test_profile_plan(connector, neo4j_version):
# cursor = Cursor(connector.run("PROFILE RETURN $x", {"x": 1}))
# actual = cursor.plan()
# expected = CypherPlan(
# operator_type='ProduceResults',
# identifiers=['$x'],
# children=[
# CypherPlan(
# operator_type='Projection',
# identifiers=['$x'],
# children=[],
# args={
# 'db_hits': 0,
# 'estimated_rows': 1.0,
# 'expressions': '{$x : $x}',
# 'page_cache_hit_ratio': 0.0,
# 'page_cache_hits': 0,
# 'page_cache_misses': 0,
# 'rows': 1,
# 'time': actual.children[0].args["time"],
# },
# ),
# ],
# args={
# 'db_hits': 0,
# 'estimated_rows': 1.0,
# 'page_cache_hit_ratio': 0.0,
# 'page_cache_hits': 0,
# 'page_cache_misses': 0,
# 'planner': 'COST',
# 'planner_impl': 'IDP',
# 'planner_version': neo4j_version,
# 'rows': 1,
# 'runtime': 'COMPILED',
# 'runtime_impl': 'COMPILED',
# 'runtime_version': neo4j_version,
# 'time': actual.args["time"],
# 'version': 'CYPHER %s' % neo4j_version,
# },
# )
# assert expected == actual
# def skip_if_no_multidb_support(graph):
# if graph.service.kernel_version < (4, 0):
# skip("MultiDB tests are only valid for Neo4j 4.0+")
#
#
# def test_db_extra(graph, connector):
# skip_if_no_multidb_support(graph)
# cursor = Cursor(connector.run("RETURN 1", {}, db="system"))
# expected = CypherStats(nodes_created=1)
# actual = cursor.stats()
# assert expected == actual
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.