code
stringlengths 1
199k
|
|---|
from math import sqrt
import numpy as np
''' *********************** USER-PARAMETERS *********************** '''
''' INITIAL STATE PARAMETERS '''
MAX_TEST_DURATION = 3000;
dt = 1e-3;
model_path = ["/home/adelpret/devel/sot_hydro/install/share"];
urdfFileName = model_path[0] + "/hrp2_14_description/urdf/hrp2_14_reduced.urdf";
freeFlyer = True;
q0_urdf = np.matrix([0.0, 0.0, 0.648702, 0.0, 0.0 , 0.0, 1.0, # Free flyer 0-6
0.0, 0.0, 0.0, 0.0, # CHEST HEAD 7-10
0.261799388, 0.174532925, 0.0, -0.523598776, 0.0, 0.0, 0.174532925, # LARM 11-17
0.261799388, -0.174532925, 0.0, -0.523598776, 0.0, 0.0, 0.174532925, # RARM 18-24
0.0, 0.0, -0.453785606, 0.872664626, -0.41887902, 0.0, # LLEG 25-30
0.0, 0.0, -0.453785606, 0.872664626, -0.41887902, 0.0, # RLEG 31-36
]).T;
q0_sot = ( # Free flyer
0., 0., 0.648702, 0., 0. , 0.,
# Legs
0., 0., -0.453786, 0.872665, -0.418879, 0.,
0., 0., -0.453786, 0.872665, -0.418879, 0.,
# Chest and head
0., 0., 0., 0.,
# Arms
0.261799, -0.17453, 0., -0.523599, 0., 0., 0.1,
0.261799, 0.17453, 0., -0.523599, 0., 0., 0.1);
COM_DES = (0.01, 0.0, 0.81);
v0 = np.matrix(np.zeros(36)).T;
active_joints = (1, 1, 1, 1, 1, 1, # lleg
1, 1, 1, 1, 1, 1, # rleg
0, 0, 0, 0, # chest-head
0, 0, 0, 0, 0, 0, 0, # larm
0, 0, 0, 0, 0, 0, 0) # rarm
GEAR_RATIOS = (384.0, 240.0, 180.0, 200.0, 180.0, 100.0,
384.0, 240.0, 180.0, 200.0, 180.0, 100.0,
207.69, 381.54, 100.0, 100.0,
219.23, 231.25, 266.67, 250.0, 145.45, 350.0, 200.0,
219.23, 231.25, 266.67, 250.0, 145.45, 350.0, 200.0);
ROTOR_INERTIAS = (1.01e-4, 6.96e-4, 1.34e-4, 1.34e-4, 6.96e-4, 6.96e-4,
1.01e-4, 6.96e-4, 1.34e-4, 1.34e-4, 6.96e-4, 6.96e-4,
6.96e-4, 6.96e-4, 1.10e-4, 1.10e-4,
6.96e-4, 6.60e-4, 1.00e-4, 6.60e-4, 1.10e-4, 1.00e-4, 1.00e-4,
6.96e-4, 6.60e-4, 1.00e-4, 6.60e-4, 1.10e-4, 1.00e-4, 1.00e-4);
''' CONTROLLER CONFIGURATION '''
ENABLE_CAPTURE_POINT_LIMITS = False;
ENABLE_TORQUE_LIMITS = True;
ENABLE_FORCE_LIMITS = True;
ENABLE_JOINT_LIMITS = True;
IMPOSE_POSITION_BOUNDS = True;
IMPOSE_VELOCITY_BOUNDS = True;
IMPOSE_VIABILITY_BOUNDS = True;
IMPOSE_ACCELERATION_BOUNDS = True;
JOINT_POS_PREVIEW = 1.5; # preview window to convert joint pos limits into joint acc limits
JOINT_VEL_PREVIEW = 1; # preview window to convert joint vel limits into joint acc limits
MAX_JOINT_ACC = 30.0;
MAX_MIN_JOINT_ACC = 10.0;
USE_JOINT_VELOCITY_ESTIMATOR = False;
ACCOUNT_FOR_ROTOR_INERTIAS = True;
kp_posture = 1.0; # proportional gain of postural task
kd_posture = 2*sqrt(kp_posture);
kp_pos = 100.0; # proportional gain of position controller
kd_pos = 2*sqrt(kp_pos);
kp_constr = 1.0; # constraint proportional feedback gain
kd_constr = 2*sqrt(kp_constr); # constraint derivative feedback gain
kp_com = 30.0;
kd_com = 2*sqrt(kp_com);
constraint_mask = np.array([True, True, True, True, True, True]).T;
ee_mask = np.array([True, True, True, True, True, True]).T;
w_com = 1.0;
w_posture = 1e-2; # weight of postural task
w_forces = 1e-4;
w_base_orientation = 0.0;
w_torques = 0.0;
maxIter = 300; # max number of iterations
maxTime = 0.8; # max computation time for the solver in seconds
verb=0; # verbosity level (0, 1, or 2)
RIGHT_FOOT_SIZES = (0.130, -0.100, 0.056, -0.075); # pos x, neg x, pos y, neg y size
LEFT_FOOT_SIZES = (0.130, -0.100, 0.075, -0.056); # pos x, neg x, pos y, neg y size
RIGHT_FOOT_SIZES = (0.130, -0.100, 0.056, -0.056); # pos x, neg x, pos y, neg y size
RIGHT_FOOT_CONTACT_POINTS = ((RIGHT_FOOT_SIZES[0], RIGHT_FOOT_SIZES[0], RIGHT_FOOT_SIZES[1], RIGHT_FOOT_SIZES[1]),
(RIGHT_FOOT_SIZES[3], RIGHT_FOOT_SIZES[2], RIGHT_FOOT_SIZES[3], RIGHT_FOOT_SIZES[2]),
(-0.105, -0.105, -0.105, -0.105)); # contact points in local reference frame
LEFT_FOOT_CONTACT_POINTS = np.matrix([[LEFT_FOOT_SIZES[0], LEFT_FOOT_SIZES[3], -0.105],
[LEFT_FOOT_SIZES[0], LEFT_FOOT_SIZES[2], -0.105],
[LEFT_FOOT_SIZES[1], LEFT_FOOT_SIZES[3], -0.105],
[LEFT_FOOT_SIZES[1], LEFT_FOOT_SIZES[2], -0.105]]).T # contact points in local reference frame
mu = np.array([0.3, 0.1]); # force and moment friction coefficient
fMin = 1e-3; # minimum normal force
''' SIMULATOR PARAMETERS '''
FORCE_TORQUE_LIMITS = ENABLE_TORQUE_LIMITS;
FORCE_JOINT_LIMITS = ENABLE_JOINT_LIMITS and IMPOSE_POSITION_BOUNDS;
USE_LCP_SOLVER = False
''' STOPPING CRITERIA THRESHOLDS '''
MAX_CONSTRAINT_ERROR = 0.1;
''' INITIAL STATE PARAMETERS '''
INITIAL_CONFIG_ID = 0;
INITIAL_CONFIG_FILENAME = '../../../data/hrp2_configs_coplanar';
''' VIEWER PARAMETERS '''
ENABLE_VIEWER = True;
PLAY_MOTION_WHILE_COMPUTING = True;
PLAY_MOTION_AT_THE_END = True;
DT_VIEWER = 10*dt; # timestep used to display motion with viewer
SHOW_VIEWER_FLOOR = True;
''' FIGURE PARAMETERS '''
SAVE_FIGURES = False;
SHOW_FIGURES = False;
SHOW_LEGENDS = True;
LINE_ALPHA = 0.7;
|
from pycp2k.inputsection import InputSection
from ._each119 import _each119
class _fock_gap1(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Section_parameters = None
self.Add_last = None
self.Common_iteration_levels = None
self.Filename = None
self.Log_print_key = None
self.EACH = _each119()
self._name = "FOCK_GAP"
self._keywords = {'Log_print_key': 'LOG_PRINT_KEY', 'Filename': 'FILENAME', 'Add_last': 'ADD_LAST', 'Common_iteration_levels': 'COMMON_ITERATION_LEVELS'}
self._subsections = {'EACH': 'EACH'}
self._attributes = ['Section_parameters']
|
from distutils.core import setup
setup(name='pantheradesktop',
description = "Panthera Desktop Framework",
long_description = "Tiny desktop framework for easy application development in Python using PyQT/PySide",
author = "Damian Kęska",
author_email = "webnull.www@gmail.com",
version="0.1.0.3",
license = "LGPL",
url = 'ttps://github.com/Panthera-Framework/Panthera-Desktop/',
download_url = 'https://github.com/Panthera-Framework/Panthera-Desktop/archive/master.tar.gz',
package_dir={'': 'src'},
packages=['pantheradesktop'],
keywords=['panthera', 'desktop', 'framework', 'shell', 'apps', 'cli apps'],
data_files = []
)
|
from django.shortcuts import render, reverse, redirect
from django.http import HttpResponseRedirect, HttpResponse
from .models import bp_pages, bp_products, bp_users, ContactModel
from .forms import ContactForm, ShoppingForm, Login, Registreren
from cart.cart import Cart
def get_index(request):
text = bp_pages.objects.filter(pagina="home")
former = ContactForm()
if request.session.has_key('username'):
username = request.session['username']
message = "Welkom, %s" % username
return render(request, 'bits/home.html', context=locals())
def get_product(request, shop_item):
query = bp_products.objects.filter(pr_cat=shop_item)[:3]
query_all = bp_products.objects.filter(pr_cat=shop_item)
return render(request, 'bits/product.html', {'query': query, 'query_all': query_all})
def get_product_by_sub(request, shop_item, shop_subitem):
query = bp_products.objects.filter(pr_cat=shop_item, pr_subcat=shop_subitem)[:3]
query_all = bp_products.objects.filter(pr_cat=shop_item, pr_subcat=shop_subitem)
return render(request, 'bits/product.html', {'query': query, 'query_all': query_all})
def get_product_by_new(request, shop_item):
new = 'NEW'
query = bp_products.objects.filter(pr_cat=shop_item, new=new)[:3]
query_all = bp_products.objects.filter(pr_cat=shop_item, new=new)
return render(request, 'bits/product.html', {'query': query, 'query_all': query_all})
def get_product_by_new_solo(request):
new = 'NEW'
query = bp_products.objects.filter(new=new)[:3]
query_all = bp_products.objects.filter(new=new)
return render(request, 'bits/product.html', {'query': query, 'query_all': query_all})
def get_product_solo(request, product_item):
query = bp_products.objects.filter(id=product_item)
query_naam = bp_products.objects.values_list('pr_naam', flat=True).get(id=product_item)
query_prijs = bp_products.objects.values_list('pr_prijs', flat=True).get(id=product_item)
if request.method == 'POST':
form = ShoppingForm(request.POST)
if form.is_valid():
product = bp_products.objects.get(id=product_item)
cart = Cart(request)
quantity = form.cleaned_data['quantity']
maat = form.cleaned_data['maat']
cart.add(product, product.pr_prijs, maat, quantity)
return HttpResponseRedirect(reverse('bits:add_to_cart'))
else :
form = ShoppingForm(initial={'pr_prijs': query_prijs, 'pr_naam': query_naam})
return render(request, 'bits/solo.html', {'query': query, 'form': form})
def add_to_cart(request):
return render(request, 'bits/cart.html', dict(cart=Cart(request)))
def contact_request(request):
if request.method == 'POST':
form = ContactForm(request.POST)
if form.is_valid():
obj = ContactModel()
obj.voornaam = form.cleaned_data['voornaam']
obj.onderwerp = form.cleaned_data['onderwerp']
obj.email = form.cleaned_data['email']
obj.text = form.cleaned_data['text']
obj.save()
else:
form = ContactForm()
return render(request, 'bits/contact.html', {'form': form})
def authenticate(request):
if request.method == 'POST':
form = Login(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password']
if bp_users.objects.filter(username=username).exists() == True:
if bp_users.objects.filter(password=password).exists() == True:
request.session['username']= username
url = reverse('bits:get_index')
return HttpResponseRedirect(reverse('bits:get_index'))
else:
message = "Fout wachtwoord"
return HttpResponseRedirect(reverse('bits:authenticate'))
else:
message = "Foute username"
return HttpResponseRedirect(reverse('bits:authenticate'))
else:
message = "Niet alle velden zijn ingevuld"
return HttpResponseRedirect(reverse('bits:authenticate'))
else:
form = Login()
return render(request, 'bits/login.html', context=locals())
def register(request):
if request.method == 'POST':
form = Registreren(request.POST)
if form.is_valid():
form.save()
message = "Gefeliciteerd. U kunt nu inloggen met uw gebruikersnaam en wachtwoord"
return render(request, 'bits/register.html', context=locals())
else:
message = "U bent velden vergeten"
form = Registreren()
return render(request, 'bits/register.html', context=locals())
else:
form = Registreren()
return render(request, 'bits/register.html', context=locals())
def logout(request):
try:
del request.session['username']
except:
pass
url = reverse('bits:get_index')
return HttpResponseRedirect(url)
def deleterow(request, product_item):
product = bp_products.objects.get(id=int(product_item))
cart = Cart(request)
cart.remove(product)
return HttpResponseRedirect(reverse('bits:add_to_cart'))
|
from psi4 import core
from psi4.driver import constants
def print_sapt_var(name, value, short=False, start_spacer=" "):
"""
Converts the incoming value as hartree to a correctly formatted Psi print format.
"""
vals = (name, value * 1000, value * constants.hartree2kcalmol, value * constants.hartree2kJmol)
if short:
return start_spacer + "%-20s % 15.8f [mEh]" % vals[:2]
else:
return start_spacer + "%-20s % 15.8f [mEh] % 15.8f [kcal/mol] % 15.8f [kJ/mol]" % vals
def print_sapt_hf_summary(data, name, short=False, delta_hf=False):
ret = " %s Results\n" % name
ret += " " + "-" * 97 + "\n"
# Elst
ret += print_sapt_var("Electrostatics", data["Elst10,r"]) + "\n"
ret += print_sapt_var(" Elst10,r", data["Elst10,r"]) + "\n"
ret += "\n"
core.set_variable("SAPT ELST ENERGY", data["Elst10,r"])
# Exchange
ret += print_sapt_var("Exchange", data["Exch10"]) + "\n"
ret += print_sapt_var(" Exch10", data["Exch10"]) + "\n"
ret += print_sapt_var(" Exch10(S^2)", data["Exch10(S^2)"]) + "\n"
ret += "\n"
core.set_variable("SAPT EXCH ENERGY", data["Exch10"])
ind = data["Ind20,r"] + data["Exch-Ind20,r"]
ind_ab = data["Ind20,r (A<-B)"] + data["Exch-Ind20,r (A<-B)"]
ind_ba = data["Ind20,r (A->B)"] + data["Exch-Ind20,r (A->B)"]
ret += print_sapt_var("Induction", ind) + "\n"
ret += print_sapt_var(" Ind20,r", data["Ind20,r"]) + "\n"
ret += print_sapt_var(" Exch-Ind20,r", data["Exch-Ind20,r"]) + "\n"
ret += print_sapt_var(" Induction (A<-B)", ind_ab) + "\n"
ret += print_sapt_var(" Induction (A->B)", ind_ba) + "\n"
ret += "\n"
core.set_variable("SAPT IND ENERGY", ind)
if delta_hf:
total_sapt = (data["Elst10,r"] + data["Exch10"] + ind)
sapt_hf_delta = delta_hf - total_sapt
core.set_variable("SAPT(DFT) Delta HF", sapt_hf_delta)
ret += print_sapt_var("%-21s" % "Total SAPT", total_sapt, start_spacer=" ") + "\n"
ret += print_sapt_var("%-21s" % "Total HF", delta_hf, start_spacer=" ") + "\n"
ret += print_sapt_var("%-21s" % "Delta HF", sapt_hf_delta, start_spacer=" ") + "\n"
ret += " " + "-" * 97 + "\n"
return ret
else:
# Dispersion
disp = data["Disp20"] + data["Exch-Disp20,u"]
ret += print_sapt_var("Dispersion", disp) + "\n"
ret += print_sapt_var(" Disp20", data["Disp20,u"]) + "\n"
ret += print_sapt_var(" Exch-Disp20", data["Exch-Disp20,u"]) + "\n"
ret += "\n"
core.set_variable("SAPT DISP ENERGY", disp)
# Total energy
total = data["Elst10,r"] + data["Exch10"] + ind + disp
ret += print_sapt_var("Total %-15s" % name, total, start_spacer=" ") + "\n"
core.set_variable("SAPT0 TOTAL ENERGY", total)
core.set_variable("SAPT TOTAL ENERGY", total)
core.set_variable("CURRENT ENERGY", total)
ret += " " + "-" * 97 + "\n"
return ret
def print_sapt_dft_summary(data, name, short=False):
ret = " %s Results\n" % name
ret += " " + "-" * 97 + "\n"
# Elst
ret += print_sapt_var("Electrostatics", data["Elst10,r"]) + "\n"
ret += print_sapt_var(" Elst1,r", data["Elst10,r"]) + "\n"
ret += "\n"
core.set_variable("SAPT ELST ENERGY", data["Elst10,r"])
# Exchange
ret += print_sapt_var("Exchange", data["Exch10"]) + "\n"
ret += print_sapt_var(" Exch1", data["Exch10"]) + "\n"
ret += print_sapt_var(" Exch1(S^2)", data["Exch10(S^2)"]) + "\n"
ret += "\n"
core.set_variable("SAPT EXCH ENERGY", data["Exch10"])
# Induction
ind = data["Ind20,r"] + data["Exch-Ind20,r"]
ind_ab = data["Ind20,r (A<-B)"] + data["Exch-Ind20,r (A<-B)"]
ind_ba = data["Ind20,r (A->B)"] + data["Exch-Ind20,r (A->B)"]
if "Delta HF Correction" in list(data):
ind += data["Delta HF Correction"]
ret += print_sapt_var("Induction", ind) + "\n"
ret += print_sapt_var(" Ind2,r", data["Ind20,r"]) + "\n"
ret += print_sapt_var(" Exch-Ind2,r", data["Exch-Ind20,r"]) + "\n"
ret += print_sapt_var(" Induction (A<-B)", ind_ab) + "\n"
ret += print_sapt_var(" Induction (A->B)", ind_ba) + "\n"
if "Delta HF Correction" in list(data):
ret += print_sapt_var(" delta HF,r (2)", data["Delta HF Correction"]) + "\n"
ret += "\n"
core.set_variable("SAPT IND ENERGY", ind)
# Dispersion
disp = data["Disp20"] + data["Exch-Disp20,u"]
ret += print_sapt_var("Dispersion", disp) + "\n"
ret += print_sapt_var(" Disp2,r", data["Disp20"]) + "\n"
ret += print_sapt_var(" Disp2,u", data["Disp20,u"]) + "\n"
ret += print_sapt_var(" Exch-Disp2,u", data["Exch-Disp20,u"]) + "\n"
ret += "\n"
core.set_variable("SAPT DISP ENERGY", disp)
# Total energy
total = data["Elst10,r"] + data["Exch10"] + ind + disp
ret += print_sapt_var("Total %-15s" % name, total, start_spacer=" ") + "\n"
core.set_variable("SAPT(DFT) TOTAL ENERGY", total)
core.set_variable("SAPT TOTAL ENERGY", total)
core.set_variable("CURRENT ENERGY", total)
ret += " " + "-" * 97 + "\n"
return ret
|
__author__ = "Mikael Mortensen <mikaem@math.uio.no> and Nathanael Schilling <nathanael.schilling@in.tum.de>"
__date__ = "2015-04-07"
__copyright__ = "Copyright (C) 2015-2018 " + __author__
__license__ = "GNU Lesser GPL version 3 or any later version"
import numpy as np
from mpi4py import MPI
from ..optimization import optimizer, wraps
comm = MPI.COMM_WORLD
__all__ = ['getintegrator']
def adaptiveRK(A, b, bhat, err_order, fY_hat, u0_new, sc, err, fsal, offset,
aTOL, rTOL, adaptive, errnorm, rhs, u0, solver, dt, tstep,
context, additional_callback, params, predictivecontroller=False):
"""
Take a step using any Runge-Kutta method.
Parameters
----------
A, b, bhat : arrays
Runge-Kutta coefficients
err_order : int
Order of embedded method
fY_hat, U_tmp, u0_new, sc, err : work arrays
fsal : boolean
Whether method is first-same-as-last
offset : length-1 array of int
Where to find the previous RHS evaluation (for FSAL methods). This can probably be eliminated.
aTOL, rTOL : float
Error tolerances
adaptive : boolean
If true, adapt the step size
errnorm : str
Which norm to use in computing the error estimate. One of {"2", "inf"}.
rhs : array
RHS evaluation
u0 : array
solution value (returned)
solver : calling module
contains method ComputeRHS for computing RHS of evolution equation
dt : float
time step size
tstep : int
Number of steps taken so far
predictivecontroller : boolean
If True use PI controller
"""
s = A.shape[0]
#Some parameters for adaptive time-stepping. See p167, Hairer, Norsett and Wanner. "Solving Ordinary Differential Equations 1"
#for details.
facmax_default = 2
facmax = facmax_default
fac = 0.8
facmin = 0.01
#We may need to repeat the time-step until a small enough value is used.
while True:
dt_prev = dt
if fsal:
offset[0] = (offset[0] - 1) % s
for i in range(0, s):
if not fsal or (tstep == 0 or i != 0):
fY_hat[(i + offset[0]) % s] = u0
for j in range(0, i):
fY_hat[(i+offset[0]) % s] += dt*A[i, j]*fY_hat[(j+offset[0]) % s]
#Compute F(Y)
rhs = solver.ComputeRHS(rhs, fY_hat[(i+offset[0]) % s], solver, **context)
fY_hat[(i+offset[0]) % s] = rhs
if i == 0:
context.fu0 = fY_hat[(0+offset[0]) % s]
additional_callback(context)
#Calculate the new value
u0_new[:] = u0
u0_new[:] += dt*b[0]*fY_hat[(0+offset[0]) % s]
err[:] = dt*(b[0] - bhat[0])*fY_hat[(0+offset[0]) % s]
for j in range(1, s):
u0_new[:] += dt*b[j]*fY_hat[(j+offset[0])%s]
err[:] += dt*(b[j] - bhat[j])*fY_hat[(j+offset[0])%s]
est = 0.0
sc[:] = aTOL + np.maximum(np.abs(u0), np.abs(u0_new))*rTOL
if errnorm == "2":
est_to_bcast = None
nsquared = np.zeros(u0.shape[0])
for k in range(u0.shape[0]):
nsquared[k] = comm.reduce(np.sum(np.power(np.abs(err[k]/sc[k]), 2)))
if comm.Get_rank() == 0:
est_to_bcast = np.zeros(1)
est = np.max(np.sqrt(nsquared))
est /= np.sqrt(np.array(context.T.shape(True)).prod())
est_to_bcast[0] = est
est_to_bcast = comm.bcast(est_to_bcast, root=0)
est = est_to_bcast[0]
elif errnorm == "inf":
raise AssertionError("Don't use this, not sure if it works")
#TODO: Test this error norm
sc[:] = aTOL + np.maximum(np.abs(u0), np.abs(u0_new))*rTOL
err[:] = err[:] / sc[:]
err = np.abs(err, out=err)
asdf = np.max(err)
x = np.zeros(asdf.shape)
comm.Allreduce(asdf, x, op=MPI.MAX)
est = np.abs(np.max(x))
est /= np.sqrt(np.array(context.T.shape(True)).prod())
else:
assert False, "Wrong error norm"
#Check error estimate
exponent = 1.0 / (err_order + 1)
if not predictivecontroller:
factor = min(facmax, max(facmin, fac*pow((1/est), exponent)))
else:
if "last_dt" not in vars(params):
params.last_dt = dt
if "last_est" not in vars(params):
params.last_est = est
last_dt = params.last_dt
last_est = params.last_est
factor = min(facmax, max(facmin, fac*pow((1/est), exponent)*dt/last_dt*pow(last_est/est, exponent)))
if adaptive:
dt = dt*factor
if est > 1.0:
facmax = 1
context.is_step_rejected_callback = True
context.dt_rejected = dt_prev
additional_callback(context)
#The offset gets decreased in the next step, which is something we do not want.
if fsal:
offset[0] += 1
continue
#if predictivecontroller:
#context.time_integrator["last_dt"] = dt_prev
#context.time_integrator["last_est"] = est
break
#Update u0 and U
u0[:] = u0_new
return u0, dt, dt_prev
@optimizer
def RK4(u0, u1, u2, rhs, a, b, dt, solver, context):
"""Runge Kutta fourth order"""
u2[:] = u1[:] = u0
for rk in range(4):
rhs = solver.ComputeRHS(rhs, u0, solver, **context)
if rk < 3:
u0[:] = u1 + b[rk]*dt*rhs
u2 += a[rk]*dt*rhs
u0[:] = u2
return u0, dt, dt
@optimizer
def ForwardEuler(u0, rhs, dt, solver, context):
rhs = solver.ComputeRHS(rhs, u0, solver, **context)
u0 += rhs*dt
return u0, dt, dt
@optimizer
def AB2(u0, u1, rhs, dt, tstep, solver, context):
rhs = solver.ComputeRHS(rhs, u0, solver, **context)
if tstep == 0:
u0 += rhs*dt
else:
u0 += (1.5*rhs*dt - 0.5*u1)
u1[:] = rhs*dt
return u0, dt, dt
def getintegrator(rhs, u0, solver, context):
"""Return integrator using choice in global parameter integrator.
"""
params = solver.params
u1 = u0.copy()
if params.integrator == "RK4":
# RK4 parameters
a = np.array([1./6., 1./3., 1./3., 1./6.], dtype=context.float)
b = np.array([0.5, 0.5, 1.], dtype=context.float)
u2 = u0.copy()
@wraps(RK4)
def func():
return RK4(u0, u1, u2, rhs, a, b, params.dt, solver, context)
return func
elif params.integrator in ("BS5_adaptive", "BS5_fixed"):
# Remove nodepy dependency since it requires matplotlib and six
#import nodepy
#A = nodepy.rk.loadRKM("BS5").A.astype(context.float)
#b = nodepy.rk.loadRKM("BS5").b.astype(context.float)
#bhat = nodepy.rk.loadRKM("BS5").bhat.astype(context.float)
A = np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[1/6, 0, 0, 0, 0, 0, 0, 0],
[2/27, 4/27, 0, 0, 0, 0, 0, 0],
[183/1372, -162/343, 1053/1372, 0, 0, 0, 0, 0],
[68/297, -4/11, 42/143, 1960/3861, 0, 0, 0, 0],
[597/22528, 81/352, 63099/585728, 58653/366080, 4617/20480, 0, 0, 0],
[174197/959244, -30942/79937, 8152137/19744439, 666106/1039181, -29421/29068, 482048/414219, 0, 0],
[587/8064, 0, 4440339/15491840, 24353/124800, 387/44800, 2152/5985, 7267/94080, 0]], dtype=context.float)
b = np.array([587/8064, 0, 4440339/15491840, 24353/124800, 387/44800, 2152/5985, 7267/94080, 0], dtype=context.float)
bhat = np.array([2479/34992, 0, 123/416, 612941/3411720, 43/1440, 2272/6561, 79937/1113912, 3293/556956], dtype=context.float)
err_order = 4
errnorm = "2"
fsal = True
adaptive = True if params.integrator == "BS5_adaptive" else False
#Offset for fsal stuff. #TODO: infer this from tstep
offset = [0]
s = A.shape[0]
fY_hat = np.zeros((s,) + u0.shape, dtype=u0.dtype)
sc = np.zeros_like(u0)
err = np.zeros_like(u0)
@wraps(adaptiveRK)
def func():
return adaptiveRK(A, b, bhat, err_order, fY_hat, u1, sc, err, fsal,
offset, params.TOL, params.TOL, adaptive, errnorm,
rhs, u0, solver, params.dt, params.tstep, context,
solver.additional_callback, params)
return func
elif params.integrator == "ForwardEuler":
@wraps(ForwardEuler)
def func():
return ForwardEuler(u0, rhs, params.dt, solver, context)
return func
elif params.integrator == "AB2":
@wraps(AB2)
def func():
return AB2(u0, u1, rhs, params.dt, params.tstep, solver, context)
return func
|
from Tribler.community.market.wallet import ASSET_MAP
class Price(object):
"""Price is used for having a consistent comparable and usable class that deals with floats."""
def __init__(self, price, wallet_id):
"""
:param price: Integer representation of a price that is positive or zero
:param wallet_id: Identifier of the wallet type of this price
:type price: float
:type wallet_id: str
:raises ValueError: Thrown when one of the arguments are invalid
"""
super(Price, self).__init__()
if not isinstance(price, (int, float)):
raise ValueError("Price must be an int or a float")
if not isinstance(wallet_id, str):
raise ValueError("Wallet id must be a string")
if price < 0:
raise ValueError("Price must be positive or zero")
self._price = price
self._wallet_id = wallet_id
@property
def wallet_id(self):
"""
:rtype: str
"""
return self._wallet_id
@property
def int_wallet_id(self):
"""
:rtype: int
"""
return ASSET_MAP[self._wallet_id]
def __int__(self):
return int(self._price)
def __float__(self):
return float(self._price)
def __str__(self):
return "%f %s" % (self._price, self.wallet_id)
def __add__(self, other):
if isinstance(other, Price) and self.wallet_id == other.wallet_id:
return Price(self._price + float(other), self._wallet_id)
else:
return NotImplemented
def __sub__(self, other):
if isinstance(other, Price) and self.wallet_id == other.wallet_id:
return Price(self._price - float(other), self._wallet_id)
else:
return NotImplemented
def __lt__(self, other):
if isinstance(other, Price) and self.wallet_id == other.wallet_id:
return self._price < float(other)
else:
return NotImplemented
def __le__(self, other):
if isinstance(other, Price) and self.wallet_id == other.wallet_id:
return self._price <= float(other)
else:
return NotImplemented
def __eq__(self, other):
if not isinstance(other, Price) or self.wallet_id != other.wallet_id:
return NotImplemented
elif self is other:
return True
else:
return self._price == float(other)
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
if isinstance(other, Price) and self.wallet_id == other.wallet_id:
return self._price > float(other)
else:
return NotImplemented
def __ge__(self, other):
if isinstance(other, Price) and self.wallet_id == other.wallet_id:
return self._price >= float(other)
else:
return NotImplemented
def __hash__(self):
return hash(self._price)
|
"""Provide a class for simulation and result handling of FMU.
FMUFunction is Function factory similar to OpenTURNS'
PythonFunction.
It relies on the lower level OpenTURNSFMUFunction, which is similar to
OpenTURNS' OpenTURNSPythonFunction.
"""
import pyfmi
import numpy as np
import os
import openturns as ot
from . import fmi
from . import fmu_pool
class FMUFunction(ot.Function):
"""
Define a Function from a FMU file.
Parameters
----------
path_fmu : str, path to the FMU file.
inputs_fmu : Sequence of str, default=None
Names of the variable from the fmu to be used as input variables.
By default assigns variables with FMI causality INPUT.
outputs_fmu : Sequence of str, default=None
Names of the variable from the fmu to be used as output variables.
By default assigns variables with FMI causality OUTPUT.
inputs : Sequence of str
Optional names to use as variables descriptions.
outputs : Sequence of str
Optional names to use as variables descriptions.
n_cpus : int
Number of cores to use for multiprocessing.
initialization_script : str (optional)
Path to the initialization script.
final_time : float
The output variables value is collected at t=final_time and returned by
FMUFunction.
kind : str, one of "ME" (model exchange) or "CS"
(co-simulation)
Select a kind of FMU if both are available.
Note:
Contrary to pyfmi, the default here is "CS" (co-simulation). The
rationale behind this choice is that co-simulation may be used to
impose a solver not available in pyfmi.
"""
# expect_trajectory : bool, if True, the call inputs are assumed to be
# time dependent trajectories. Default is False
# TODO: Not implemented yet. Currently the __call__ from
# Function gets in the way and switch to sample execution.
# Hence a sequence of vectors is expected but a single vector is
# outputted.
def __new__(self, path_fmu=None, inputs_fmu=None, outputs_fmu=None,
inputs=None, outputs=None, n_cpus=None, kind=None,
initialization_script=None, final_time=None):
lowlevel = OpenTURNSFMUFunction(
path_fmu=path_fmu, inputs_fmu=inputs_fmu, outputs_fmu=outputs_fmu,
inputs=inputs, n_cpus=n_cpus, outputs=outputs, kind=kind,
initialization_script=initialization_script, final_time=final_time)
highlevel = ot.Function(lowlevel)
# highlevel._model = lowlevel.model
return highlevel
class OpenTURNSFMUFunction(ot.OpenTURNSPythonFunction):
"""
Define a Function from a FMU file.
Parameters
----------
path_fmu : str, path to the FMU file.
inputs_fmu : Sequence of str, default=None
Names of the variable from the fmu to be used as input variables.
By default assigns variables with FMI causality INPUT.
outputs_fmu : Sequence of str, default=None
Names of the variable from the fmu to be used as output variables.
By default assigns variables with FMI causality OUTPUT.
inputs : Sequence of str
Optional names to use as variables descriptions.
outputs : Sequence of str
Optional names to use as variables descriptions.
n_cpus : int
Number of cores to use for multiprocessing.
initialization_script : str (optional)
Path to the initialization script.
final_time : float
The output variables value is collected at t=final_time and returned by
FMUFunction.
kind : str, one of "ME" (model exchange) or "CS" (co-simulation)
Select a kind of FMU if both are available.
Note:
Contrary to pyfmi, the default here is "CS" (co-simulation). The
rationale behind this choice is is that co-simulation may be used to
impose a solver not available in pyfmi.
expect_trajectory : bool
If True, the call inputs are assumed to be time dependent
trajectories. Default is False
"""
def __init__(self, path_fmu, inputs_fmu=None, outputs_fmu=None,
inputs=None, outputs=None, n_cpus=None,
initialization_script=None, kind=None,
expect_trajectory=False, final_time=None, **kwargs):
self.load_fmu(path_fmu=path_fmu, kind=kind)
self._set_inputs_fmu(inputs_fmu)
self._set_outputs_fmu(outputs_fmu)
super(OpenTURNSFMUFunction, self).__init__(n=len(self.inputs_fmu),
p=len(self.outputs_fmu))
self._set_inputs(inputs)
self._set_outputs(outputs)
self._set_final_time(final_time)
self.n_cpus = n_cpus
self.initialize(initialization_script)
self.__expect_trajectory = expect_trajectory
self.__final = kwargs.pop("final", None)
def _set_inputs_fmu(self, inputs_fmu):
"""Set input variable names.
Parameters
----------
inputs_fmu : Sequence of strings
Names of the variable from the fmu to be used as input variables.
"""
all_vars = fmi.get_name_variable(self.model)
causality = dict(zip(all_vars, fmi.get_causality(self.model, all_vars)))
if inputs_fmu is None:
# choose all variables with variability INPUT
fmix_input = pyfmi.fmi.FMI2_INPUT if self.model.get_version() == '2.0' else pyfmi.fmi.FMI_INPUT
inputs_fmu = [name for name in all_vars if causality[name] == fmix_input]
else:
difference = set(inputs_fmu).difference(all_vars)
if difference:
raise pyfmi.common.io.VariableNotFoundError(", ".join(difference))
for name in inputs_fmu:
if (self.model.get_version() == '2.0' and not causality[name] in [pyfmi.fmi.FMI2_PARAMETER, pyfmi.fmi.FMI2_INPUT]) \
or (self.model.get_version() == '1.0' and causality[name] != pyfmi.fmi.FMI_INPUT):
raise ValueError('Variable "' + name + '" cannot be used as a function input (causality ' + fmi.get_causality_str(self.model, name) + ')')
self.inputs_fmu = inputs_fmu
def _set_inputs(self, inputs=None):
"""Set input variable names.
Parameters
----------
inputs : Sequence of strings, optional names to use as variables
descriptions.
"""
if inputs is None:
inputs = self.inputs_fmu
self.setInputDescription(inputs)
def _set_outputs_fmu(self, outputs_fmu):
"""Set output variable names.
Parameters
----------
outputs_fmu : Sequence of strings
Names of the variable from the fmu to be used as output variables.
"""
all_vars = fmi.get_name_variable(self.model)
causality = dict(zip(all_vars, fmi.get_causality(self.model, all_vars)))
if outputs_fmu is None:
# choose all variables with variability OUTPUT
fmix_output = pyfmi.fmi.FMI2_OUTPUT if self.model.get_version() == '2.0' else pyfmi.fmi.FMI_OUTPUT
outputs_fmu = [name for name in all_vars if causality[name] == fmix_output]
if len(outputs_fmu) == 0:
raise pyfmi.common.io.VariableNotFoundError("No variables marked as OUTPUT please specify outputs_fmu")
else:
difference = set(outputs_fmu).difference(fmi.get_name_variable(self.model))
if difference:
raise pyfmi.common.io.VariableNotFoundError(", ".join(difference))
for name in outputs_fmu:
if (self.model.get_version() == '2.0' and not causality[name] in [pyfmi.fmi.FMI2_LOCAL, pyfmi.fmi.FMI2_OUTPUT]) \
or (self.model.get_version() == '1.0' and causality[name] != pyfmi.fmi.FMI_OUTPUT):
raise ValueError('Variable "' + name + '" cannot be used as a function output (causality ' + fmi.get_causality_str(self.model, name) + ')')
self.outputs_fmu = outputs_fmu
def _set_outputs(self, outputs=None):
"""Set output variable names.
Parameters
----------
outputs : Sequence of strings, optional names to use as variables
descriptions.
"""
if outputs is None:
outputs = self.outputs_fmu
self.setOutputDescription(outputs)
def __call__(self, X, **kwargs):
X = np.atleast_1d(np.squeeze(X))
if self.__expect_trajectory:
if X.ndim > 2:
return self._exec_sample(X, **kwargs)
else:
return self._exec(X, **kwargs)
else:
if X.ndim > 1:
return self._exec_sample(X, **kwargs)
else:
return self._exec(X, **kwargs)
def _set_final_time(self, final_time):
"""Extract final time from keywords if exists.
Parameters
----------
final_time: float (must be >= 0).
"""
if final_time is not None:
self.final_time = final_time
else:
self.final_time = self.model.get_default_experiment_stop_time()
def _exec(self, value_input, **kwargs):
"""Simulate the FMU for a given set of input values.
Parameters
----------
value_input : Vector or array-like with time steps as rows.
See the 'simulate' method for additional keyword arguments.
"""
return self.simulate(value_input=value_input, **kwargs)
def _exec_sample(self, list_value_input, **kwargs):
"""Simulate the FMU multiple times.
Parameters
----------
list_value_input : Sequence of vectors of input values.
Additional keyword arguments are passed on to the 'simulate' method of
the underlying PyFMI model object.
"""
return self.simulate_sample(list_value_input, **kwargs)
def load_fmu(self, path_fmu, kind=None, **kwargs):
"""Load an FMU.
Parameters
----------
path_fmu : String, path to the FMU file.
kind : String, one of "ME" (model exchange) or "CS" (co-simulation)
Select a kind of FMU if both are available.
Note:
Contrary to pyfmi, the default here is "CS" (co-simulation). The
rationale behind this choice is is that co-simulation may be used
to impose a solver not available in pyfmi.
Additional keyword arguments are passed on to pyfmi's 'load_fmu'
function.
"""
self.model = fmi.load_fmu(path_fmu=os.path.expanduser(path_fmu), kind=kind, **kwargs)
def getFMUInputDescription(self):
"""Get the list of input variable names."""
return self.inputs_fmu
def getFMUOutputDescription(self):
"""Get the list of output variable names."""
return self.outputs_fmu
def initialize(self, initialization_script=None):
"""Initialize the FMU, using initialization script if available.
Parameters
----------
initialization_script : String (optional), path to the initialization
script.
"""
self.initialization_script = initialization_script
try:
self.model.setup_experiment()
except AttributeError:
pass # Probably FMI version 1.
try:
fmi.apply_initialization_script(self.model,
self.initialization_script)
except TypeError:
pass # No initialization script.
try:
self.model.initialize()
except pyfmi.fmi.FMUException as ex:
raise pyfmi.fmi.FMUException(str(ex)+'\n'+'\n'.join([str(line) for line in self.model.get_log()]))
def simulate(self, value_input=None, reset=True, **kwargs):
"""Simulate the fmu.
Parameters
----------
value_input : Vector of input values.
reset : Boolean, toggle resetting the FMU prior to simulation. True by
default.
time : Sequence of floats, time vector (optional).
timestep : Float, time step in seconds (optional).
Additional keyword arguments are passed on to the 'simulate' method of
the underlying PyFMI model object.
"""
kwargs.setdefault("initialization_script", self.initialization_script)
kwargs_simulate = fmi.parse_kwargs_simulate(
value_input, name_input=self.getFMUInputDescription(),
name_output=self.getFMUOutputDescription(),
model=self.model, **kwargs)
if "final_time" in kwargs.keys():
raise Warning("final_time must be set in the constructor.")
simulation = fmi.simulate(self.model, reset=reset,
final_time=self.final_time, **kwargs_simulate)
return fmi.strip_simulation(simulation,
name_output=self.getOutputDescription(),
final=self.__final)
def simulate_sample(self, list_value_input, **kwargs):
"""Simulate the FMU multiple times.
Parameters
----------
list_value_input : Sequence of vectors of input values.
Additional keyword arguments are passed on to the 'simulate' method of
the underlying PyFMI model object.
"""
if self.n_cpus is None:
n_cpus = 1
else:
n_cpus = self.n_cpus
kwargs.setdefault("initialization_script", self.initialization_script)
#TODO: re-factorize parsing of kwargs?
list_kwargs = []
for value_input in list_value_input:
kwargs_simulate = fmi.parse_kwargs_simulate(
value_input, name_input=self.getFMUInputDescription(),
name_output=self.getFMUOutputDescription(),
model=self.model, **kwargs)
list_kwargs.append(kwargs_simulate)
# if n_cpus > 1: # TODO?
pool = fmu_pool.FMUPool(self.model, n_process=n_cpus)
return pool.run(list_kwargs, final=self.__final)
class FMUPointToFieldFunction(ot.PointToFieldFunction):
"""
Define a PointToFieldFunction from a FMU file.
Parameters
----------
mesh : :class:`openturns.Mesh`
Time grid, has to be included in the start/end time defined in the FMU
path_fmu : str, path to the FMU file.
inputs_fmu : Sequence of str, default=None
Names of the variable from the fmu to be used as input variables.
By default assigns variables with FMI causality INPUT.
outputs_fmu : Sequence of str, default=None
Names of the variable from the fmu to be used as output variables.
By default assigns variables with FMI causality OUTPUT.
inputs : Sequence of str
Optional names to use as variables descriptions.
outputs : Sequence of str
Optional names to use as variables descriptions.
initialization_script : str (optional)
Path to the initialization script.
kind : str, one of "ME" (model exchange) or "CS" (co-simulation)
Select a kind of FMU if both are available.
Note:
Contrary to pyfmi, the default here is "CS" (co-simulation). The
rationale behind this choice is that co-simulation may be used to
impose a solver not available in pyfmi.
start_time : float
The FMU simulation start time.
final_time : float
The FMU simulation stop time.
"""
def __new__(self, mesh, path_fmu=None, inputs_fmu=None, outputs_fmu=None,
inputs=None, outputs=None, kind=None,
initialization_script=None, start_time=None, final_time=None):
lowlevel = OpenTURNSFMUPointToFieldFunction(mesh,
path_fmu=path_fmu, inputs_fmu=inputs_fmu, outputs_fmu=outputs_fmu,
inputs=inputs, outputs=outputs, kind=kind,
initialization_script=initialization_script, start_time=start_time,
final_time=final_time)
highlevel = ot.PointToFieldFunction(lowlevel)
# highlevel._model = lowlevel.model
return highlevel
class OpenTURNSFMUPointToFieldFunction(ot.OpenTURNSPythonPointToFieldFunction):
"""Define a PointToFieldFunction from a FMU file."""
def __init__(self, mesh, path_fmu, inputs_fmu=None, outputs_fmu=None,
inputs=None, outputs=None,
initialization_script=None, kind=None,
expect_trajectory=False, start_time=None,
final_time=None, **kwargs):
self.load_fmu(path_fmu=path_fmu, kind=kind)
self._set_inputs_fmu(inputs_fmu)
self._set_outputs_fmu(outputs_fmu)
super(OpenTURNSFMUPointToFieldFunction, self).__init__(len
(self.inputs_fmu), mesh,
len(self.outputs_fmu))
self._set_inputs(inputs)
self._set_outputs(outputs)
self._set_final_time(final_time)
self._set_start_time(start_time)
self._assert_mesh_pertinence()
self.initialize(initialization_script)
def _set_inputs_fmu(self, inputs_fmu):
"""Set input variable names.
Parameters
----------
inputs_fmu : Sequence of strings
Names of the variable from the fmu to be used as input variables.
"""
all_vars = fmi.get_name_variable(self.model)
causality = dict(zip(all_vars, fmi.get_causality(self.model, all_vars)))
if inputs_fmu is None:
# choose all variables with variability INPUT
fmix_input = pyfmi.fmi.FMI2_INPUT if self.model.get_version() == '2.0' else pyfmi.fmi.FMI_INPUT
inputs_fmu = [name for name in all_vars if causality[name] == fmix_input]
else:
difference = set(inputs_fmu).difference(all_vars)
if difference:
raise pyfmi.common.io.VariableNotFoundError(", ".join(difference))
for name in inputs_fmu:
if (self.model.get_version() == '2.0' and not causality[name] in [pyfmi.fmi.FMI2_PARAMETER, pyfmi.fmi.FMI2_INPUT]) \
or (self.model.get_version() == '1.0' and causality[name] != pyfmi.fmi.FMI_INPUT):
raise ValueError('Variable "' + name + '" cannot be used as a function input (causality ' + fmi.get_causality_str(self.model, name) + ')')
self.inputs_fmu = inputs_fmu
def _set_inputs(self, inputs=None):
"""Set input variable names.
Parameters
----------
inputs : Sequence of strings, optional names to use as variables
descriptions.
"""
if inputs is None:
inputs = self.inputs_fmu
self.setInputDescription(inputs)
def _set_outputs_fmu(self, outputs_fmu):
"""Set output variable names.
Parameters
----------
outputs_fmu : Sequence of strings
Names of the variable from the fmu to be used as output variables.
"""
all_vars = fmi.get_name_variable(self.model)
causality = dict(zip(all_vars, fmi.get_causality(self.model, all_vars)))
if outputs_fmu is None:
# choose all variables with variability OUTPUT
fmix_output = pyfmi.fmi.FMI2_OUTPUT if self.model.get_version() == '2.0' else pyfmi.fmi.FMI_OUTPUT
outputs_fmu = [name for name in all_vars if causality[name] == fmix_output]
if len(outputs_fmu) == 0:
raise pyfmi.common.io.VariableNotFoundError("No variables marked as OUTPUT please specify outputs_fmu")
else:
difference = set(outputs_fmu).difference(fmi.get_name_variable(self.model))
if difference:
raise pyfmi.common.io.VariableNotFoundError(", ".join(difference))
for name in outputs_fmu:
if (self.model.get_version() == '2.0' and not causality[name] in [pyfmi.fmi.FMI2_LOCAL, pyfmi.fmi.FMI2_OUTPUT]) \
or (self.model.get_version() == '1.0' and causality[name] != pyfmi.fmi.FMI_OUTPUT):
raise ValueError('Variable "' + name + '" cannot be used as a function output (causality ' + fmi.get_causality_str(self.model, name) + ')')
self.outputs_fmu = outputs_fmu
def _set_outputs(self, outputs=None):
"""Set output variable names.
Parameters
----------
outputs : Sequence of strings, optional names to use as variables
descriptions.
"""
if outputs is None:
outputs = self.outputs_fmu
self.setOutputDescription(outputs)
def _set_final_time(self, final_time):
"""Extract final time from keywords if exists.
Parameters
----------
final_time: float (must be >= 0).
"""
if final_time is not None:
self.final_time = final_time
else:
self.final_time = self.model.get_default_experiment_stop_time()
def _set_start_time(self, start_time):
"""Extract start time from keywords if exists.
Parameters
----------
start_time: float (must be >= 0)
"""
if start_time is not None:
self.start_time = start_time
else:
self.start_time = self.model.get_default_experiment_start_time()
def _assert_mesh_pertinence(self):
"""Raise an error if the mesh is not comprised between the start and
final simulation time.
"""
mesh = self.getOutputMesh()
mesh_min = mesh.getVertices().getMin()[0]
assert mesh_min >= self.start_time, """The mesh start time must be >= to FMU start time.\n
To set the FMU start time, use the argument *start_time* in
FMUPointToFieldFunction constructor."""
mesh_max = mesh.getVertices().getMax()[0]
assert mesh_max <= self.final_time, """The mesh final time must be >= to FMU final time.\n
To set the FMU final time, use the argument final_time in
FMUPointToFieldFunction constructor."""
def _exec(self, value_input, **kwargs):
"""Simulate the FMU for a given set of input values.
Parameters
----------
value_input : Vector or array-like with time steps as rows.
See the 'simulate' method for additional keyword arguments.
"""
return self.simulate(value_input=value_input, **kwargs)
def load_fmu(self, path_fmu, kind=None, **kwargs):
"""Load an FMU.
Parameters
----------
path_fmu : String, path to the FMU file.
kind : String, one of "ME" (model exchange) or "CS" (co-simulation)
Select a kind of FMU if both are available.
Note:
Contrary to pyfmi, the default here is "CS" (co-simulation). The
rationale behind this choice is is that co-simulation may be used
to impose a solver not available in pyfmi.
Additional keyword arguments are passed on to pyfmi's 'load_fmu'
function.
"""
self.model = fmi.load_fmu(path_fmu=os.path.expanduser(path_fmu), kind=kind, **kwargs)
def getFMUInputDescription(self):
"""Get the list of input variable names."""
return self.inputs_fmu
def getFMUOutputDescription(self):
"""Get the list of output variable names."""
return self.outputs_fmu
def initialize(self, initialization_script=None):
"""Initialize the FMU, using initialization script if available.
Parameters
----------
initialization_script : String (optional), path to the initialization
script.
"""
self.initialization_script = initialization_script
try:
self.model.setup_experiment()
except AttributeError:
pass # Probably FMI version 1.
try:
fmi.apply_initialization_script(self.model,
self.initialization_script)
except TypeError:
pass # No initialization script.
try:
self.model.initialize()
except pyfmi.fmi.FMUException as ex:
raise pyfmi.fmi.FMUException(str(ex)+'\n'+'\n'.join([str(line) for line in self.model.get_log()]))
def simulate(self, value_input=None, reset=True, **kwargs):
"""Simulate the fmu.
Parameters
----------
value_input : Vector of input values.
reset : Boolean, toggle resetting the FMU prior to simulation. True by
default.
time : Sequence of floats, time vector (optional).
timestep : Float, time step in seconds (optional).
Additional keyword arguments are passed on to the 'simulate' method of
the underlying PyFMI model object.
"""
kwargs.setdefault("initialization_script", self.initialization_script)
kwargs_simulate = fmi.parse_kwargs_simulate(
value_input, name_input=self.getFMUInputDescription(),
name_output=self.getFMUOutputDescription(),
model=self.model, **kwargs)
if "final_time" in kwargs.keys():
raise Warning("final_time must be set in the constructor.")
if "start_time" in kwargs.keys():
raise Warning("start_time must be set in the constructor.")
simulation = fmi.simulate(self.model,
reset=reset,
start_time=self.start_time,
final_time=self.final_time,
**kwargs_simulate)
time, values = fmi.strip_simulation(simulation,
name_output=self.getOutputDescription(),
final="trajectory")
local_mesh = ot.Mesh([[t] for t in time], [[i, i + 1] for i in range(len(time) - 1)])
interpolation = ot.P1LagrangeInterpolation(local_mesh, self.getOutputMesh(), self.getOutputDimension())
return interpolation(values)
|
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
res = []
remain = 0
while l1 != None or l2 != None:
value = (0 if l1 == None else l1.val) + (0 if l2 == None else l2.val) + remain
if value > 9:
res.append(value-10)
remain=1
else:
res.append(value)
remain=0
if l1 != None:
l1 = l1.next
if l2 != None:
l2 = l2.next
if remain == 1:
res.append(1)
return res
|
import praw
import time
from random import random
def makeResponseArray(replies, weights):
responseArray = []
i = 0
while i < len(replies):
responseArray.append([replies[i], weights[i]])
i += 1
return responseArray
def calcWeightSum(responseArray):
i = 0
weightSum = 0
while i < len(responseArray):
weightSum += responseArray[i][1]
i += 1
return weightSum
def makeProbArray(responseArray, weightSum):
i = 1;
probArray = [responseArray[0][1] / weightSum]
print(probArray[0])
while i < len(responseArray):
probArray.append(probArray[i-1] + (responseArray[i][1] / weightSum))
print(probArray[i])
i += 1
return probArray
def findComment(subreddit, hotwords, responseArray, probArray):
already_done = set()
lastMsg = -1
while True:
for comment in subreddit.get_comments():
print(comment)
time.sleep(0.5) #This is for debugging purposes; remove it to quicken process
has_hot = any(string in comment.body for string in hotwords)
if has_hot and comment.id not in already_done:
replySelect = random()
lastMsg = postReply(comment, responseArray, probArray, replySelect, lastMsg)
already_done.add(comment.id) #Records ID of comment replied to; will not reply to the same comment twice
time.sleep(600) #Waits until bot may post again
def postReply(comment, responseArray, probArray, replySelect, lastMsg):
replyPosted = 0
i = 0
while replyPosted == 0:
if replySelect <= probArray[i] and i != lastMsg:
comment.reply(responseArray[i][0])
print("Comment posted: " + responseArray[i][0])
lastMsg = i
replyPosted = 1
if i < len(probArray) - 1:
i += 1
else:
replySelect = random()
i = 0
print(lastMsg)
return lastMsg
|
from django.conf.urls import url
from rango import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^about/', views.about, name='about'),
url(r'^add_category/$', views.add_category, name='add_category'),
url(r'^category/(?P<category_name_url>\w+)/$', views.category, name='category'),
url(r'(?i)^category/(?P<category_name_url>\w+)/add_page/$', views.add_page, name='add_page'),
url(r'^register/$', views.register, name='register'),
url(r'(?i)^login/$', views.user_login, name='login'),
url(r'^restricted/', views.restricted, name='restricted'),
url(r'^logout/$', views.user_logout, name='logout'),
]
|
"""
Script to analyze the "dark" images from the experiments in the EssentialLab
"""
import glob
import os
import numpy
import matplotlib.pylab as plt
import matplotlib.patches as patches
def processimage(inputimage, clip=3):
"""
Clip image brightness to "mean +- 3 STD" (by default). Another value can
be given. This is applied to the input images if the -c commandline
parameter is given.
"""
return numpy.clip(inputimage,
numpy.mean(inputimage) - (clip * numpy.std(inputimage)),
numpy.mean(inputimage) + (clip * numpy.std(inputimage)))
def my_display_histogram(img, howmanybins=128, histogramcolor='k',
rangecolor='r', clip=3):
"""
Display the histogram of an input image, including the ranges we clip
the gray values to
"""
plt.hist(img.flatten(), bins=howmanybins, histtype='stepfilled',
fc=histogramcolor, alpha=0.309)
plt.axvline(x=numpy.mean(img) - clip * numpy.std(img), color=rangecolor,
linestyle='--')
plt.axvline(x=numpy.mean(img), color='k', linestyle='--')
plt.axvline(x=numpy.mean(img) + clip * numpy.std(img),
color=rangecolor, linestyle='--')
# turn off y-ticks: http://stackoverflow.com/a/2176591/323100
plt.gca().axes.get_yaxis().set_ticks([])
plt.title('Histogram. Black = mean\nRed = Display range')
BasePath = '/afs/psi.ch/user/h/haberthuer/EssentialMed/Images/' \
'DetectorElectronicsTests/EssentialLab/'
FileNames = sorted([item for item in glob.glob(os.path.join(BasePath, '*',
'*36.gray'))])
DarkFolderList = ['1421327978_noxray',
'1421327947_noxray',
'1421156423_output',
'1421155907_output']
DarkFileNames = []
for i in FileNames:
if os.path.split(os.path.dirname(i))[1] in DarkFolderList:
DarkFileNames.append(i)
print 'Reading all %s images' % len(FileNames)
Images = [numpy.fromfile(item, dtype=numpy.uint16, count=-1,
sep='').reshape(1024, 1280) for item in FileNames]
print 'Reading only %s dark images' % len(DarkFileNames)
DarkImages = [numpy.fromfile(item, dtype=numpy.uint16, count=-1,
sep='').reshape(1024, 1280) for item in
DarkFileNames]
plt.figure('dark images', figsize=[16, 9])
for counter, image in enumerate(DarkImages):
plt.subplot(3, len(DarkImages), counter + 1)
plt.imshow(processimage(image), interpolation='bicubic', cmap='gray_r')
Zoom = patches.Rectangle((333, 456), width=100, height=50, color='g',
alpha=0.618)
plt.gcf().gca().add_patch(Zoom)
FigureTitle = os.path.basename(os.path.split(FileNames[counter])[0]), \
'\n', os.path.basename(FileNames[counter]), '\nmean',\
str(round(numpy.mean(processimage(image)))), '\nSTD', \
str(round(numpy.std(processimage(image))))
plt.title(' '.join(FigureTitle))
plt.axis('off')
plt.subplot(3, len(DarkImages), counter + len(DarkImages) + 1)
plt.imshow(processimage(image)[333:333 + 50, 456:456 + 100],
interpolation='nearest', cmap='gray_r')
plt.title('Zoomed region')
plt.axis('off')
plt.subplot(3, len(DarkImages), counter + 2 * len(DarkImages) + 1)
my_display_histogram(image)
plt.xlim([0, 256])
plt.show()
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='ContactFormCategory',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('slug', models.SlugField(max_length=256, verbose_name='Slug')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ContactFormCategoryTranslation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=256)),
('language_code', models.CharField(max_length=15, db_index=True)),
('master', models.ForeignKey(related_name='translations', editable=False, to='contact_form.ContactFormCategory', null=True)),
],
options={
'managed': True,
'abstract': False,
'db_table': 'contact_form_contactformcategory_translation',
'db_tablespace': '',
},
),
migrations.AlterUniqueTogether(
name='contactformcategorytranslation',
unique_together=set([('language_code', 'master')]),
),
]
|
import logging
from ibmsecurity.utilities import tools
logger = logging.getLogger(__name__)
requires_model = "Appliance"
uri = "/wga/reverseproxy"
def get_all(isamAppliance, instance_id, check_mode=False, force=False):
"""
Retrieving all statistics components and details - Reverse Proxy
"""
try:
return isamAppliance.invoke_get("Retrieving all statistics components - Reverse Proxy",
"{0}/{1}/statistics".format(uri, instance_id),requires_model=requires_model)
except:
# Return empty array - exception thrown if list has no entries or does not exist
ret_obj = isamAppliance.create_return_object()
ret_obj['data'] = {}
return ret_obj
def get_all_logs(isamAppliance, instance_id, component_id, check_mode=False, force=False):
"""
Retrieving all log files for a component - Reverse Proxy
"""
return isamAppliance.invoke_get("Retrieving all statistics log files for a component - Reverse Proxy",
"{0}/{1}/statistics/{2}/stats_files".format(uri,
instance_id,
component_id),requires_model=requires_model)
def get(isamAppliance, instance_id, component_id, file_id, options=None,
size=None, start=None, check_mode=False, force=False):
"""
Retrieving snippets of a statistics log file for a component - Reverse Proxy
"""
return isamAppliance.invoke_get("Retrieving snippet of a statistics log file - Reverse Proxy",
"{0}/{1}/statistics/{2}/stats_files/{3}".format(uri,
instance_id,
component_id,
file_id,
tools.create_query_string(
options=options, start=start,
size=size)),requires_model=requires_model)
def export_file(isamAppliance, instance_id, component_id, file_id, filename, check_mode=False, force=False):
"""
Exporting a statistics log file for a component - Reverse Proxy
"""
import os.path
if force is True or (os.path.exists(filename) is False):
if check_mode is False: # No point downloading a file if in check_mode
return isamAppliance.invoke_get_file("Exporting a Reverse Proxy statistics log file.",
"{0}/{1}/statistics/{2}/stats_files/{3}?export".format(uri,
instance_id,
component_id,
file_id),
filename,requires_model=requires_model)
return isamAppliance.create_return_object()
def set(isamAppliance, instance_id, component_id, status, hours, mins, secs,
count, flush_interval, rollover_size, max_rollover_files, compress,
check_mode=False, force=False):
"""
Modify the statistics settings for a component
"""
check_value, warnings = _check(isamAppliance,instance_id)
if check_mode is True and check_value is True:
return isamAppliance.create_return_object(changed=True,warnings=warnings)
else:
return isamAppliance.invoke_put(
"Modify statistics settings for a component",
"{0}/{1}/statistics/{2}".format(uri,
instance_id,
component_id),
{
'status': status,
'interval_hours': hours,
'interval_mins': mins,
'interval_secs': secs,
'count': count,
'flush_interval': flush_interval,
'rollover_size': rollover_size,
'max_rollover_files': max_rollover_files,
'compress': compress
},requires_model=requires_model)
return isamAppliance.create_return_object(warnings=warnings)
def delete(isamAppliance, instance_id, component_id, file_id, check_mode=False, force=False):
"""
Deleting the statistics log file or rollover file for a component - Reverse Proxy
"""
check_value, warnings = _check(isamAppliance,instance_id)
if force is False:
try:
ret_obj = get_default_snippet(isamAppliance, instance_id, component_id, file_id)
delete_required = True # Exception thrown if the file is empty
except:
delete_required = False
if force is True or delete_required is True:
if check_mode is True and check_value is True:
return isamAppliance.create_return_object(changed=True,warnings=warnings)
else:
return isamAppliance.invoke_delete(
"Deleting a statistics log file",
"{0}/{1}/statistics/{2}/stats_files/{3}".format(uri,
instance_id,
component_id,
file_id),requires_model=requires_model)
return isamAppliance.create_return_object(warnings=warnings)
def delete_all(isamAppliance, instance_id, component_id, check_mode=False, force=False):
"""
Deleting all the log files for a component - Reverse Proxy
"""
check_value, warnings = _check(isamAppliance,instance_id)
if force is False:
try:
ret_obj = get_all_logs(isamAppliance, instance_id, component_id)
delete_required = True # Exception thrown if the file is empty
except:
delete_required = False
if force is True or delete_required is True:
if check_mode is True and check_value is True:
return isamAppliance.create_return_object(changed=True,warnings=warnings)
else:
return isamAppliance.invoke_delete(
"Deleting all statistics log files",
"{0}/{1}/statistics/{2}/stats_files".format(uri,
instance_id,
component_id),requires_model=requires_model)
return isamAppliance.create_return_object(warnings=warnings)
def _check(isamAppliance,instance_id):
"""
Check if it's appliance or not
:param isamAppliance:
:return: true|false, warnings message
"""
ret_obj = get_all(isamAppliance,instance_id)
check_value, warnings=False, ret_obj['warnings']
if warnings == []:
check_value = True
return check_value, warnings
else:
return check_value, warnings
|
__author__ = 'alan'
class Solution(object):
def nthUglyNumber(self, n):
"""
:type n: int
:rtype: int
"""
r = [1]
p1 = p2 = p3 = 0
while len(r) < n:
t1, t2, t3 = r[p1] * 2, r[p2] * 3, r[p3] * 5
t = min(t1, t2, t3)
r.append(t)
if t == t1:
p1 += 1
if t == t2:
p2 += 1
if t == t3:
p3 += 1
return r[n - 1]
if __name__ == "__main__":
sol = Solution()
print(sol.nthUglyNumber(7))
|
from model.group import Group
class GroupHelper:
def __init__(self, app):
self.app = app
group_cache = None
def create(self, group):
wd = self.app.wd
self.open_group_page()
wd.find_element_by_name("new").click()
self.set_fields(group)
wd.find_element_by_name("submit").click()
self.return_to_group_page()
self.group_cache = None
def select_group_by_index(self, index):
wd = self.app.wd
xpath = "//span[%s]/input[@name='selected[]']" % str(index+1)
wd.find_element_by_xpath(xpath).click()
def select_group_by_id(self, id):
wd = self.app.wd
css = "input[value='%s']" % str(id)
wd.find_element_by_css_selector(css).click()
def delete_first(self):
self.delete_by_index(0)
def delete_by_index(self, index):
wd = self.app.wd
self.open_group_page()
self.select_group_by_index(index)
wd.find_element_by_name("delete").click()
self.return_to_group_page()
self.group_cache = None
def delete_by_id(self, id):
wd = self.app.wd
self.open_group_page()
self.select_group_by_id(id)
wd.find_element_by_name("delete").click()
self.return_to_group_page()
self.group_cache = None
def modify_by_index(self, edition, index):
wd = self.app.wd
self.open_group_page()
self.select_group_by_index(index)
wd.find_element_by_name("edit").click()
self.set_fields(edition)
wd.find_element_by_name("update").click()
self.return_to_group_page()
self.group_cache = None
def modify_by_id(self, edition, id):
wd = self.app.wd
self.open_group_page()
self.select_group_by_id(id)
wd.find_element_by_name("edit").click()
self.set_fields(edition)
wd.find_element_by_name("update").click()
self.return_to_group_page()
self.group_cache = None
def modify_first(self, edition):
self.modify_by_index(edition, 0)
def count(self):
wd = self.app.wd
self.open_group_page()
return len(wd.find_elements_by_name("selected[]"))
def return_to_group_page(self):
wd = self.app.wd
wd.find_element_by_link_text("group page").click()
def open_group_page(self):
wd = self.app.wd
if not(wd.current_url.endswith("/group.php") and len(wd.find_elements_by_name("new"))>0):
wd.find_element_by_link_text("groups").click()
def set_fields(self, group):
self.app.type_text("group_name", group.name)
self.app.type_text("group_header", group.header)
self.app.type_text("group_footer", group.footer)
def get_group_list(self):
if self.group_cache is None:
self.group_cache = []
wd = self.app.wd
self.open_group_page()
for element in wd.find_elements_by_css_selector("span.group"):
text = element.text
id = element.find_element_by_name("selected[]").get_attribute("value")
self.group_cache.append(Group(name=text, id=id))
return list(self.group_cache)
|
from tempest.api.identity import base
from tempest import auth
from tempest import clients
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
CONF = config.CONF
class TestDefaultProjectId (base.BaseIdentityV3AdminTest):
@classmethod
def setup_credentials(cls):
cls.set_network_resources()
super(TestDefaultProjectId, cls).setup_credentials()
def _delete_domain(self, domain_id):
# It is necessary to disable the domain before deleting,
# or else it would result in unauthorized error
self.client.update_domain(domain_id, enabled=False)
self.client.delete_domain(domain_id)
@test.attr(type='smoke')
def test_default_project_id(self):
# create a domain
dom_name = data_utils.rand_name('dom')
domain_body = self.client.create_domain(dom_name)
dom_id = domain_body['id']
self.addCleanup(self._delete_domain, dom_id)
# create a project in the domain
proj_name = data_utils.rand_name('proj')
proj_body = self.client.create_project(proj_name, domain_id=dom_id)
proj_id = proj_body['id']
self.addCleanup(self.client.delete_project, proj_id)
self.assertEqual(proj_body['domain_id'], dom_id,
"project " + proj_name +
"doesn't have domain id " + dom_id)
# create a user in the domain, with the previous project as his
# default project
user_name = data_utils.rand_name('user')
user_body = self.client.create_user(user_name, password=user_name,
domain_id=dom_id,
default_project_id=proj_id)
user_id = user_body['id']
self.addCleanup(self.client.delete_user, user_id)
self.assertEqual(user_body['domain_id'], dom_id,
"user " + user_name +
"doesn't have domain id " + dom_id)
# get roles and find the admin role
admin_role = self.get_role_by_name("admin")
admin_role_id = admin_role['id']
# grant the admin role to the user on his project
self.client.assign_user_role_on_project(proj_id, user_id,
admin_role_id)
# create a new client with user's credentials (NOTE: unscoped token!)
creds = auth.KeystoneV3Credentials(username=user_name,
password=user_name,
domain_name=dom_name)
auth_provider = auth.KeystoneV3AuthProvider(creds,
CONF.identity.uri_v3)
creds = auth_provider.fill_credentials()
admin_client = clients.Manager(credentials=creds)
# verify the user's token and see that it is scoped to the project
token, auth_data = admin_client.auth_provider.get_auth()
result = admin_client.identity_v3_client.get_token(token)
self.assertEqual(result['project']['domain']['id'], dom_id)
self.assertEqual(result['project']['id'], proj_id)
|
import unittest
from mock import Mock
from airflow.jobs import BackfillJob
from airflow.models import DagRun
from airflow.ti_deps.deps.dagrun_id_dep import DagrunIdDep
class TestDagrunRunningDep(unittest.TestCase):
def test_dagrun_id_is_backfill(self):
"""
Task instances whose dagrun ID is a backfill dagrun ID should fail this dep.
"""
dagrun = DagRun()
dagrun.run_id = BackfillJob.ID_PREFIX + '_something'
ti = Mock(get_dagrun=Mock(return_value=dagrun))
self.assertFalse(DagrunIdDep().is_met(ti=ti))
def test_dagrun_id_is_not_backfill(self):
"""
Task instances whose dagrun ID is not a backfill dagrun ID should pass this dep.
"""
dagrun = DagRun()
dagrun.run_id = 'notbackfill_something'
ti = Mock(get_dagrun=Mock(return_value=dagrun))
self.assertTrue(DagrunIdDep().is_met(ti=ti))
dagrun = DagRun()
dagrun.run_id = None
ti = Mock(get_dagrun=Mock(return_value=dagrun))
self.assertTrue(DagrunIdDep().is_met(ti=ti))
|
from simplebus import SimpleBus
def create_simplebus():
bus = SimpleBus('unittests')
bus.config.from_object(SimpleBusConfig())
return bus
class SimpleBusConfig(object):
pass
|
import logging
import os
import sys
import time
import boto
import boto.ec2
import yaml
from boto.ec2.blockdevicemapping import BlockDeviceMapping
from boto.ec2.blockdevicemapping import BlockDeviceType
from fabric.api import sudo, run, env
from fabric.decorators import task
import helper
__author__ = "Kyle Bush"
__copyright__ = "Copyright 2016"
__license__ = "Apache 2.0"
__version__ = "1.0.0"
__maintainer__ = "Kyle Bush"
__status__ = "Development"
__credits__ = ["https://github.com/CrowdStrike/cassandra-tools"]
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def mount_ebs_volumes(host_config):
env.host_string = helper.get_env_host_string(host_config)
env.user = helper.get_env_user(host_config)
env.key_filename = helper.get_env_key_filename(host_config)
sudo("apt-get -y install xfsprogs")
for ebs in host_config['ec2-mounts']:
device = ebs['device']
mount = ebs['mount']
sudo("mkdir -p {}".format(mount))
sudo("mv /etc/fstab /etc/fstab.old")
sudo("touch /etc/fstab")
if sudo('mkfs.xfs -f {0}'.format(device), warn_only=True):
run("echo '{0}\t{1}\txfs\tdefaults\t0\t0' | sudo tee -a /etc/fstab".format(device, mount))
sudo('sudo mount -a')
logger.info("EBS volume {} : {} mounted.".format(device, mount))
@task
def ec2_provision(_config_yaml_file, nodes=1):
"""Provisions EC2 instances | args : config_yaml_file, nodes"""
config_path = "{}/{}".format(os.getcwd(), _config_yaml_file)
with open(config_path, 'r') as yaml_file:
env.config = yaml.load(yaml_file)
env.config['total-nodes'] = nodes
logger.info(env.config)
if env.config['verbose']:
logger.info("Starting EC2 provisioning (with config: %s)..." % config_path)
aws_provision()
if env.config['verbose']:
logger.info("Done provisioning instances!")
def aws_load_credentials():
if env.config['verbose']:
logger.info("[aws_load_credentials]")
env.config['aws_key'] = os.environ.get('AWS_ACCESS_KEY_ID', '')
env.config['aws_secret'] = os.environ.get('AWS_SECRET_ACCESS_KEY', '')
def aws_connect():
if env.config['verbose']:
logging.info("[aws_connect]")
target_region = None
if 'region' in env.config:
target_region = boto.ec2.get_region(env.config['region'])
conn = boto.connect_ec2(env.config['aws_key'], env.config['aws_secret'], region=target_region)
return conn
def aws_provision():
if env.config['verbose']:
logger.info("[aws_provision]")
aws_load_credentials()
conn = aws_connect()
reservation = None
subnet_id = None
security_groups = None
security_group_ids = None
shutdown_behavior = None
# NOTE: shutdown behavior can only be defined for EBS backed instances
if 'ebs-instance' in env.config:
if env.config['ebs-instance']:
shutdown_behavior = 'terminate'
else:
shutdown_behavior = 'stop'
if env.config['in_vpc']:
security_group_ids = env.config['security-groups']
else:
security_groups = env.config['security-groups']
device_map = None
if 'ebs' in env.config and env.config['ebs']:
device_map = BlockDeviceMapping()
for volume in env.config['ebs']['volumes']:
device = volume['device']
vol = BlockDeviceType()
vol.size = volume['size_gb']
vol.volume_type = volume['type']
vol.delete_on_termination = True
device_map[device] = vol
ami_image = env.config['ami-id']
aws_az = env.config['az']
subnet = env.config['subnets'][aws_az]['subnet']
try:
logger.info("Launching in AZ: {0}".format(aws_az))
if env.config['dryrun']:
logger.warn("DRY RUN, NOT LAUNCHING....")
sys.exit()
else:
reservation = conn.run_instances(
ami_image,
placement=aws_az,
min_count=env.config['total-nodes'],
max_count=env.config['total-nodes'],
instance_initiated_shutdown_behavior=shutdown_behavior,
instance_type=env.config['instance-type'],
key_name=env.config['ssh_keys']['key-pair-name'],
subnet_id=subnet,
security_groups=security_groups,
security_group_ids=security_group_ids,
ebs_optimized=env.config['ebs-optimized'],
block_device_map=device_map)
except boto.exception.EC2ResponseError as x:
logger.error("Failed to start an AWS instance: %s" % x)
return
except Exception as e:
logger.error("Got reservation error", e)
return
if reservation:
logger.info('Waiting for VM instances to start...')
# time.sleep(10)
instance_set_info = []
instance_ids = [] # stores a list of all the active instanceids we can use to attach ebs volumes to
instance_private_ips = []
instance_public_dns = []
is_first = True
first_node_ip = None
for i, instance in enumerate(reservation.instances):
status = instance.update()
while not status == 'running':
logger.info("Instance status: %s" % status)
if status == 'terminated':
sys.exit(-1)
time.sleep(4)
status = instance.update()
if env.config['verbose']:
logger.info("Instance ID: %s" % instance.id)
logger.info("Instance Private IP: %s" % instance.private_ip_address)
logger.info("Instance Public IP: %s" % instance.public_dns_name)
if is_first:
first_node_ip = instance.private_ip_address
info = {'Id': instance.id, 'PrivateIp': instance.private_ip_address, 'PublicDnsName': instance.public_dns_name,
'FirstNode': is_first}
is_first = False
instance_set_info.append(info)
instance_ids.append(instance.id)
instance_private_ips.append(instance.private_ip_address)
instance_public_dns.append(instance.public_dns_name)
tags = env.config['tags']
for instance in reservation.instances:
conn.create_tags([instance.id], tags)
print "-" * 50
print " *** HOSTS ***"
print "-" * 50
print "hosts:"
for idx, private_ip in enumerate(instance_private_ips):
print " - name: {}".format(tags['Name'])
print " private-ip: {}".format(private_ip)
print " public-ip: {}".format(instance_public_dns[idx])
print "-" * 50
|
"""Tests for app functions."""
import os
import unittest
from clusterfuzz._internal.platforms.android import settings
from clusterfuzz._internal.tests.test_libs import helpers as test_helpers
DATA_PATH = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'settings_data')
def _read_data_file(filename):
return open(os.path.join(DATA_PATH, filename)).read()
class GetDeviceCodenameTest(unittest.TestCase):
"""Tests for get_device_codename."""
def setUp(self):
test_helpers.patch(
self, ['clusterfuzz._internal.platforms.android.adb.run_command'])
test_helpers.patch_environ(self)
output = _read_data_file('get_device_codename_output.txt')
self.mock.run_command.return_value = output
def test_by_serial(self):
"""Ensure that we report the correct codename for serial number."""
os.environ['ANDROID_SERIAL'] = '123456789012'
self.assertEqual(settings.get_device_codename(), 'device1')
def test_by_usb(self):
"""Ensure that we report the correct codename for a usb device."""
os.environ['ANDROID_SERIAL'] = 'usb:2-4.2'
self.assertEqual(settings.get_device_codename(), 'device2')
|
"""Chicago taxi example using TFX on Beam."""
import os
from absl import logging
from tfx.components.example_gen.csv_example_gen.component import CsvExampleGen
from tfx.components.schema_gen.component import SchemaGen
from tfx.components.statistics_gen.component import StatisticsGen
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
_pipeline_name = 'chicago_taxi_beam'
_taxi_root = os.path.join(os.environ['HOME'], 'taxi')
_data_root = os.path.join(_taxi_root, 'data', 'simple')
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str,
metadata_path: str) -> pipeline.Pipeline:
"""Implements the chicago taxi pipeline with TFX."""
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input_base=data_root)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
infer_schema = SchemaGen(statistics=statistics_gen.outputs['statistics'])
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[example_gen, statistics_gen, infer_schema],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
additional_pipeline_args={},
)
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
BeamDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
metadata_path=_metadata_path))
|
"""
Exception definitions.
"""
class UnsupportedVersion(Exception):
"""Indicates that the user is trying to use an unsupported
version of the API.
"""
pass
class CommandError(Exception):
pass
class AuthorizationFailure(Exception):
pass
class NoUniqueMatch(Exception):
pass
class AuthSystemNotFound(Exception):
"""When the user specify a AuthSystem but not installed."""
def __init__(self, auth_system):
self.auth_system = auth_system
def __str__(self):
return "AuthSystemNotFound: %s" % repr(self.auth_system)
class NoTokenLookupException(Exception):
"""This form of authentication does not support looking up
endpoints from an existing token.
"""
pass
class EndpointNotFound(Exception):
"""Could not find Service or Region in Service Catalog."""
pass
class AmbiguousEndpoints(Exception):
"""Found more than one matching endpoint in Service Catalog."""
def __init__(self, endpoints=None):
self.endpoints = endpoints
def __str__(self):
return "AmbiguousEndpoints: %s" % repr(self.endpoints)
class ConnectionRefused(Exception):
"""
Connection refused: the server refused the connection.
"""
def __init__(self, response=None):
self.response = response
def __str__(self):
return "ConnectionRefused: %s" % repr(self.response)
class InstanceInErrorState(Exception):
"""Instance is in the error state."""
pass
class VersionNotFoundForAPIMethod(Exception):
msg_fmt = "API version '%(vers)s' is not supported on '%(method)s' method."
def __init__(self, version, method):
self.version = version
self.method = method
def __str__(self):
return self.msg_fmt % {"vers": self.version, "method": self.method}
class ClientException(Exception):
"""
The base exception class for all exceptions this library raises.
"""
message = 'Unknown Error'
def __init__(self, code, message=None, details=None, request_id=None,
url=None, method=None):
self.code = code
self.message = message or self.__class__.message
self.details = details
self.request_id = request_id
self.url = url
self.method = method
def __str__(self):
formatted_string = "%s (HTTP %s)" % (self.message, self.code)
if self.request_id:
formatted_string += " (Request-ID: %s)" % self.request_id
return formatted_string
class RetryAfterException(ClientException):
"""
The base exception class for ClientExceptions that use Retry-After header.
"""
def __init__(self, *args, **kwargs):
try:
self.retry_after = int(kwargs.pop('retry_after'))
except (KeyError, ValueError):
self.retry_after = 0
super(RetryAfterException, self).__init__(*args, **kwargs)
class BadRequest(ClientException):
"""
HTTP 400 - Bad request: you sent some malformed data.
"""
http_status = 400
message = "Bad request"
class Unauthorized(ClientException):
"""
HTTP 401 - Unauthorized: bad credentials.
"""
http_status = 401
message = "Unauthorized"
class Forbidden(ClientException):
"""
HTTP 403 - Forbidden: your credentials don't give you access to this
resource.
"""
http_status = 403
message = "Forbidden"
class NotFound(ClientException):
"""
HTTP 404 - Not found
"""
http_status = 404
message = "Not found"
class MethodNotAllowed(ClientException):
"""
HTTP 405 - Method Not Allowed
"""
http_status = 405
message = "Method Not Allowed"
class NotAcceptable(ClientException):
"""
HTTP 406 - Not Acceptable
"""
http_status = 406
message = "Not Acceptable"
class Conflict(ClientException):
"""
HTTP 409 - Conflict
"""
http_status = 409
message = "Conflict"
class OverLimit(RetryAfterException):
"""
HTTP 413 - Over limit: you're over the API limits for this time period.
"""
http_status = 413
message = "Over limit"
class RateLimit(RetryAfterException):
"""
HTTP 429 - Rate limit: you've sent too many requests for this time period.
"""
http_status = 429
message = "Rate limit"
class HTTPNotImplemented(ClientException):
"""
HTTP 501 - Not Implemented: the server does not support this operation.
"""
http_status = 501
message = "Not Implemented"
_error_classes = [BadRequest, Unauthorized, Forbidden, NotFound,
MethodNotAllowed, NotAcceptable, Conflict, OverLimit,
RateLimit, HTTPNotImplemented]
_code_map = dict((c.http_status, c) for c in _error_classes)
class InvalidUsage(RuntimeError):
"""This function call is invalid in the way you are using this client.
Due to the transition to using keystoneclient some function calls are no
longer available. You should make a similar call to the session object
instead.
"""
pass
def from_response(response, body, url, method=None):
"""
Return an instance of an ClientException or subclass
based on an requests response.
Usage::
resp, body = requests.request(...)
if resp.status_code != 200:
raise exception_from_response(resp, rest.text)
"""
cls = _code_map.get(response.status_code, ClientException)
kwargs = {
'code': response.status_code,
'method': method,
'url': url,
'request_id': None,
}
if response.headers:
kwargs['request_id'] = response.headers.get('x-compute-request-id')
if (issubclass(cls, RetryAfterException) and
'retry-after' in response.headers):
kwargs['retry_after'] = response.headers.get('retry-after')
if body:
message = "n/a"
details = "n/a"
if hasattr(body, 'keys'):
error = body[list(body)[0]]
message = error.get('message')
details = error.get('details')
kwargs['message'] = message
kwargs['details'] = details
return cls(**kwargs)
class ResourceNotFound(Exception):
"""Error in getting the resource."""
pass
|
"""Tests for open_spiel.python.algorithms.jpsro."""
import itertools
from absl.testing import absltest
from absl.testing import parameterized
from open_spiel.python.algorithms import jpsro
import pyspiel
GAMES = (
"sheriff_2p_gabriele",
)
SWEEP_KWARGS = [
dict( # pylint: disable=g-complex-comprehension
game_name=game,
iterations=iterations,
policy_init=policy_init,
update_players_strategy=update_players_strategy,
target_equilibrium=target_equilibrium,
br_selection=br_selection,
train_meta_solver=train_meta_solver,
eval_meta_solver=eval_meta_solver,
ignore_repeats=ignore_repeats,
) for (
iterations,
game,
policy_init,
update_players_strategy,
target_equilibrium,
br_selection,
train_meta_solver,
eval_meta_solver,
ignore_repeats) in itertools.product(
[2],
GAMES,
jpsro.INIT_POLICIES,
jpsro.UPDATE_PLAYERS_STRATEGY,
jpsro.BRS,
jpsro.BR_SELECTIONS,
jpsro.META_SOLVERS,
["mwcce"],
[True, False])
]
TEST_COUNT_LIMIT = 100
interval = len(SWEEP_KWARGS) // TEST_COUNT_LIMIT
interval = interval if interval % 2 != 0 else interval + 1 # Odd interval.
SWEEP_KWARGS = SWEEP_KWARGS[::interval]
def get_game(game_name):
"""Returns the game."""
if game_name == "kuhn_poker_3p":
game_name = "kuhn_poker"
game_kwargs = {"players": int(3)}
elif game_name == "trade_comm_2p_2i":
game_name = "trade_comm"
game_kwargs = {"num_items": int(2)}
elif game_name == "sheriff_2p_gabriele":
game_name = "sheriff"
game_kwargs = {
"item_penalty": float(1.0),
"item_value": float(5.0),
"max_bribe": int(2),
"max_items": int(10),
"num_rounds": int(2),
"sheriff_penalty": float(1.0),
}
else:
raise ValueError("Unrecognised game: %s" % game_name)
return pyspiel.load_game_as_turn_based(game_name, game_kwargs)
class JPSROTest(parameterized.TestCase, absltest.TestCase):
@parameterized.parameters(*SWEEP_KWARGS)
def test_jpsro_cce(self, **kwargs):
game = get_game(kwargs["game_name"])
jpsro.run_loop(game=game, **kwargs)
if __name__ == "__main__":
absltest.main()
|
''' config.py '''
import string
from heron.statemgrs.src.python.config import Config as StateMgrConfig
STATEMGRS_KEY = "statemgrs"
EXTRA_LINKS_KEY = "extra.links"
EXTRA_LINK_NAME_KEY = "name"
EXTRA_LINK_FORMATTER_KEY = "formatter"
EXTRA_LINK_URL_KEY = "url"
class Config:
"""
Responsible for reading the yaml config file and
exposing various tracker configs.
"""
FORMATTER_PARAMETERS = {"CLUSTER", "ENVIRON", "TOPOLOGY", "ROLE", "USER"}
def __init__(self, configs):
self.configs = configs
self.statemgr_config = StateMgrConfig()
self.extra_links = []
self.load_configs()
def load_configs(self):
"""load config files"""
self.statemgr_config.set_state_locations(self.configs[STATEMGRS_KEY])
if EXTRA_LINKS_KEY in self.configs:
for extra_link in self.configs[EXTRA_LINKS_KEY]:
self.extra_links.append(self.validate_extra_link(extra_link))
def validate_extra_link(self, extra_link: dict):
"""validate extra link"""
if EXTRA_LINK_NAME_KEY not in extra_link or EXTRA_LINK_FORMATTER_KEY not in extra_link:
raise Exception("Invalid extra.links format. " +
"Extra link must include a 'name' and 'formatter' field")
self.validated_formatter(extra_link[EXTRA_LINK_FORMATTER_KEY])
return extra_link
def validated_formatter(self, url_format: str) -> None:
"""Check visualization url format has no unrecongnised parameters."""
# collect the parameters which would be interpolated
formatter_variables = set()
class ValidationHelper:
def __getitem__(self, key):
formatter_variables.add(key)
return ""
string.Template(url_format).safe_substitute(ValidationHelper())
if not formatter_variables <= self.FORMATTER_PARAMETERS:
raise Exception(f"Invalid viz.url.format: {url_format!r}")
@staticmethod
def get_formatted_url(formatter: str, execution_state: dict) -> str:
"""
Format a url string using values from the execution state.
"""
subs = {
var: execution_state[prop]
for prop, var in (
("cluster", "CLUSTER"),
("environ", "ENVIRON"),
("jobname", "TOPOLOGY"),
("role", "ROLE"),
("submission_user", "USER"))
if prop in execution_state
}
return string.Template(formatter).substitute(subs)
def __str__(self):
return "".join((self.config_str(c) for c in self.configs[STATEMGRS_KEY]))
@staticmethod
def config_str(config):
keys = ("type", "name", "hostport", "rootpath", "tunnelhost")
return "".join("\t{}: {}\n".format(k, config[k]) for k in keys if k in config).rstrip()
|
"""Library for heterogeneous SBMs with node features."""
from typing import List, Tuple
import numpy as np
def GetClusterTypeComponents(
num_clusters_list):
"""Given a list of # clusters per-type, compute cross-type cluster components.
This function expands num_clusters_lists into a list of cluster index lists --
one list of size num_clusters_list[i] for each i-th entry. It then assigns
each member of each list to a unique component out of min(num_clusters_list)
components.
For example, an input [3, 4, 2] implies three clusters for type-1 nodes, four
clusters for type-2 nodes, and two clusters for type-1 nodes. This function
will return two type components, and evenly (or as-evenly-as-possible) divide
the cluster indices of each type among the two components. See the test file
for the expected outputs from this example.
Arguments:
num_clusters_list: list of the number of clusters for each node type.
Returns:
output: a 2-tuple with the following elements:
cluster_index_lists: a list of cluster index lists.
type_components: a list of cluster index sets, giving the type components.
"""
# Compute the cluster_index_lists.
offset = 0
cluster_index_lists = []
for num_clusters in num_clusters_list:
cluster_index_lists.append(list(range(offset, offset + num_clusters)))
offset += num_clusters
# Compute type_components.
num_components = np.min(num_clusters_list)
type_components = [list() for _ in range(num_components)]
for cluster_index_list in cluster_index_lists:
for i in cluster_index_list:
type_components[i % num_components].append(i)
return (cluster_index_lists, type_components)
def GetCrossLinks(num_clusters_list,
type_index1,
type_index2):
"""Returns the cross-type component linking between two specified clusterings.
The linking is given as a list of tuples. Each tuple contains two cluster
indices. Indices are decided based on the first output when num_clusters_list
is passed to GetClusterTypeComponents. Each tuple is contained in the second
output of the same function.
Arguments:
num_clusters_list: list of the number of clusters for each node type.
type_index1: the first node type to return in the linking.
type_index2: the second node type to return in the linking.
Returns:
cross_links: list of cluster index tuples.
"""
cluster_index_lists, type_components = GetClusterTypeComponents(
num_clusters_list)
cross_links = []
for component in type_components:
for cluster_index1 in component:
for cluster_index2 in component:
if (cluster_index1 in cluster_index_lists[type_index1] and
cluster_index2 in cluster_index_lists[type_index2]):
cross_links.append((cluster_index1, cluster_index2))
return cross_links
def _GetHomogeneousPropMat(num_clusters, p_to_q_ratio):
"""Generates a proportion matrix within a type."""
base_prop_mat = np.ones(shape=(num_clusters, num_clusters))
np.fill_diagonal(base_prop_mat, p_to_q_ratio)
return base_prop_mat
def _GetCrossPropMat(num_clusters1, num_clusters2, cross_links, p_to_q_ratio):
"""Helper function to generate a proporation matrix across types."""
base_prop_mat = np.ones(shape=(num_clusters1, num_clusters2))
for link in cross_links:
base_prop_mat[link[0], link[1] - num_clusters1] = p_to_q_ratio
return base_prop_mat
def GetPropMat(num_clusters1, p_to_q_ratio1,
num_clusters2 = 0, p_to_q_ratio2 = 0,
p_to_q_ratio_cross = 0.0):
"""Generates a proportion matrix for the heterogeneous SBM.
Arguments:
num_clusters1: Number of clusters of nodes of type 1.
p_to_q_ratio1: Probability of in-cluster edges divided by probability of
out-cluster edges, for type 1 nodes.
num_clusters2: Number of clusters of nodes of type 2.
p_to_q_ratio2: Probability of in-cluster edges divided by probability of
out-cluster edges, for type 2 nodes.
p_to_q_ratio_cross: Probability of in-cluster edges divided by probability
of out-cluster edges, for node clusters that are linked across-type.
Returns:
prop_mat: proportion matrix for input to
simulations.GenerateStochasticBlockModelWithFeatures.
"""
base_prop_mat = np.zeros(
shape=(num_clusters1 + num_clusters2, num_clusters1 + num_clusters2))
base_prop_mat[0:num_clusters1,
0:num_clusters1] = _GetHomogeneousPropMat(num_clusters1,
p_to_q_ratio1)
if num_clusters2 == 0:
return base_prop_mat
cross_links = GetCrossLinks([num_clusters1, num_clusters2], 0, 1)
base_prop_mat[
(num_clusters1):(num_clusters1 + num_clusters2),
(num_clusters1):(num_clusters1 + num_clusters2)] = _GetHomogeneousPropMat(
num_clusters2, p_to_q_ratio2)
cross_prop_mat = _GetCrossPropMat(num_clusters1, num_clusters2, cross_links,
p_to_q_ratio_cross)
base_prop_mat[0:num_clusters1,
(num_clusters1):(num_clusters1 +
num_clusters2)] = cross_prop_mat
base_prop_mat[(num_clusters1):(num_clusters1 + num_clusters2),
0:num_clusters1] = cross_prop_mat.T
return base_prop_mat
|
import pytest
import sdk_install as install
import sdk_tasks as tasks
import sdk_marathon as marathon
import sdk_utils as utils
from tests.test_utils import (
PACKAGE_NAME,
SERVICE_NAME,
DEFAULT_BROKER_COUNT,
DYNAMIC_PORT_OPTIONS_DICT,
STATIC_PORT_OPTIONS_DICT,
DEFAULT_POD_TYPE,
service_cli
)
def setup_module(module):
install.uninstall(SERVICE_NAME, PACKAGE_NAME)
utils.gc_frameworks()
def teardown_module(module):
install.uninstall(SERVICE_NAME, PACKAGE_NAME)
@pytest.yield_fixture
def dynamic_port_config():
install.install(PACKAGE_NAME,
DEFAULT_BROKER_COUNT,
service_name=SERVICE_NAME,
additional_options=DYNAMIC_PORT_OPTIONS_DICT)
yield
install.uninstall(SERVICE_NAME, PACKAGE_NAME)
@pytest.fixture
def static_port_config():
install.install(PACKAGE_NAME,
DEFAULT_BROKER_COUNT,
service_name=SERVICE_NAME,
additional_options=STATIC_PORT_OPTIONS_DICT)
@pytest.mark.sanity
def test_dynamic_port_comes_online(dynamic_port_config):
tasks.check_running(SERVICE_NAME, DEFAULT_BROKER_COUNT)
@pytest.mark.sanity
def test_static_port_comes_online(static_port_config):
tasks.check_running(SERVICE_NAME, DEFAULT_BROKER_COUNT)
@pytest.mark.sanity
def test_port_static_to_static_port():
tasks.check_running(SERVICE_NAME, DEFAULT_BROKER_COUNT)
broker_ids = tasks.get_task_ids(SERVICE_NAME, '{}-'.format(DEFAULT_POD_TYPE))
config = marathon.get_config(SERVICE_NAME)
print('Old Config :{}'.format(config))
for broker_id in range(DEFAULT_BROKER_COUNT):
result = service_cli('broker get {}'.format(broker_id))
assert result['port'] == 9092
config['env']['BROKER_PORT'] = '9095'
marathon.update_app(SERVICE_NAME, config)
print('New Config :{}'.format(config))
tasks.check_tasks_updated(SERVICE_NAME, '{}-'.format(DEFAULT_POD_TYPE), broker_ids)
# all tasks are running
tasks.check_running(SERVICE_NAME, DEFAULT_BROKER_COUNT)
for broker_id in range(DEFAULT_BROKER_COUNT):
result = service_cli('broker get {}'.format(broker_id))
assert result['port'] == 9095
@pytest.mark.sanity
def test_port_static_to_dynamic_port():
tasks.check_running(SERVICE_NAME, DEFAULT_BROKER_COUNT)
broker_ids = tasks.get_task_ids(SERVICE_NAME, '{}-'.format(DEFAULT_POD_TYPE))
config = marathon.get_config(SERVICE_NAME)
config['env']['BROKER_PORT'] = '0'
marathon.update_app(SERVICE_NAME, config)
tasks.check_tasks_updated(SERVICE_NAME, '{}-'.format(DEFAULT_POD_TYPE), broker_ids)
# all tasks are running
tasks.check_running(SERVICE_NAME, DEFAULT_BROKER_COUNT)
for broker_id in range(DEFAULT_BROKER_COUNT):
result = service_cli('broker get {}'.format(broker_id))
assert result['port'] != 9092
@pytest.mark.sanity
def test_port_dynamic_to_dynamic_port():
tasks.check_running(SERVICE_NAME, DEFAULT_BROKER_COUNT)
broker_ids = tasks.get_task_ids(SERVICE_NAME, '{}-'.format(DEFAULT_POD_TYPE))
config = marathon.get_config(SERVICE_NAME)
broker_cpus = int(config['env']['BROKER_CPUS'])
config['env']['BROKER_CPUS'] = str(broker_cpus + 0.1)
marathon.update_app(SERVICE_NAME, config)
tasks.check_tasks_updated(SERVICE_NAME, '{}-'.format(DEFAULT_POD_TYPE), broker_ids)
# all tasks are running
tasks.check_running(SERVICE_NAME, DEFAULT_BROKER_COUNT)
@pytest.mark.sanity
def test_can_adjust_config_from_dynamic_to_static_port():
tasks.check_running(SERVICE_NAME, DEFAULT_BROKER_COUNT)
broker_ids = tasks.get_task_ids(SERVICE_NAME, '{}-'.format(DEFAULT_POD_TYPE))
config = marathon.get_config(SERVICE_NAME)
config['env']['BROKER_PORT'] = '9092'
marathon.update_app(SERVICE_NAME, config)
tasks.check_tasks_updated(SERVICE_NAME, '{}-'.format(DEFAULT_POD_TYPE), broker_ids)
# all tasks are running
tasks.check_running(SERVICE_NAME, DEFAULT_BROKER_COUNT)
for broker_id in range(DEFAULT_BROKER_COUNT):
result = service_cli('broker get {}'.format(broker_id))
assert result['port'] == 9092
|
import os
from oslo_log import log as logging
from taskflow.listeners import base
from taskflow.listeners import logging as logging_listener
from taskflow import task
from <project_name> import exception
LOG = logging.getLogger(__name__)
def _make_task_name(cls, addons=None):
"""Makes a pretty name for a task class."""
base_name = ".".join([cls.__module__, cls.__name__])
extra = ''
if addons:
extra = ';%s' % (", ".join([str(a) for a in addons]))
return base_name + extra
class <Project_name>Task(task.Task):
"""The root task class for all <project_name> tasks.
It automatically names the given task using the module and class that
implement the given task as the task name.
"""
def __init__(self, addons=None, **kwargs):
super(<Project_name>Task, self).__init__(self.make_name(addons), **kwargs)
@classmethod
def make_name(cls, addons=None):
return _make_task_name(cls, addons)
class DynamicLogListener(logging_listener.DynamicLoggingListener):
"""This is used to attach to taskflow engines while they are running.
It provides a bunch of useful features that expose the actions happening
inside a taskflow engine, which can be useful for developers for debugging,
for operations folks for monitoring and tracking of the resource actions
and more...
"""
#: Exception is an excepted case, don't include traceback in log if fails.
_NO_TRACE_EXCEPTIONS = (exception.InvalidInput, exception.QuotaError)
def __init__(self, engine,
task_listen_for=base.DEFAULT_LISTEN_FOR,
flow_listen_for=base.DEFAULT_LISTEN_FOR,
retry_listen_for=base.DEFAULT_LISTEN_FOR,
logger=LOG):
super(DynamicLogListener, self).__init__(
engine,
task_listen_for=task_listen_for,
flow_listen_for=flow_listen_for,
retry_listen_for=retry_listen_for,
log=logger)
def _format_failure(self, fail):
if fail.check(*self._NO_TRACE_EXCEPTIONS) is not None:
exc_info = None
exc_details = '%s%s' % (os.linesep, fail.pformat(traceback=False))
return (exc_info, exc_details)
else:
return super(DynamicLogListener, self)._format_failure(fail)
|
"""
@author: Raven
@contact: aducode@126.com
@site: https://github.com/aducode
@file: invoker.py
@time: 2016/2/1 23:05
"""
from socket import socket, AF_INET, SOCK_STREAM
from gaea.core.protocol.protocol import MsgType, Platform
from gaea.core.protocol.protocol import Protocol, RequestProtocol, KeyValuePair, Out
from gaea.core.exception import InternalServerException
def recv_data(conn, buf_size=1024):
_data = ''
while True:
_in_buf = conn.recv(buf_size)
_data += _in_buf if _in_buf else ''
if (len(_in_buf) if _in_buf else 0) < buf_size:
break
conn.close()
return _data
def invoker(proxy, func):
def _func(*args):
type_and_values = zip(func.__args__, args)
params = list()
out_params = list()
for t, v in type_and_values:
if isinstance(t, Out):
if not isinstance(v, Out):
raise RuntimeError('Value must be Out instance!')
else:
out_params.append(v)
params.append((t.value, v.value, ))
else:
params.append((t, v, ))
request = RequestProtocol()
request.lookup = proxy.implement.__class__.__service_name__
request.methodName = func.__method_name__
request.paraKVList = [KeyValuePair(_type.__simple_name__, value)
for _type, value in params]
send_protocol = Protocol(msg=request,
msg_type=MsgType.Request,
compress_type=proxy.compress,
serialize_type=proxy.serialize,
platform=Platform.Java)
serialized = send_protocol.to_bytes()
conn = socket(AF_INET, SOCK_STREAM)
conn.connect(proxy.address)
conn.send(serialized)
data = recv_data(conn)
receive_protocol = Protocol.from_bytes(data)
if receive_protocol.msg_type == MsgType.Response:
response = receive_protocol.msg
response_out_params = response.outpara if response.outpara is not None else list()
if len(response_out_params) != len(out_params):
raise RuntimeError('Out parameter num not equal!')
for i in xrange(len(out_params)):
out_params[i].value = response_out_params[i]
return response.result
elif receive_protocol.msg_type == MsgType.Exception:
exception = receive_protocol.msg
exp = InternalServerException(exception.errorCode, exception.toIP, exception.fromIP, exception.errorMsg)
raise exp
return _func
|
advanced_builder = AdvancedDataViewBuilder()
advanced_builder.dataset_ids(dataset_ids)
for descriptor in descriptors:
advanced_builder.add_raw_descriptor(descriptor)
advanced_builder.add_relation(['formula'], 'Property Band gap')
advanced_builder.add_relation(['formula'], 'Property Color')
advanced_builder.add_relation(['formula', 'Temperature (Property Band gap)'], 'Property Band gap')
advanced_builder.add_relation(['formula', 'Property Band gap'], 'Property Color')
|
from django.db import models
from django.utils.translation import gettext_lazy as _
from filer.fields.file import FilerFileField
from fluent_contents.models.db import ContentItem
class FilerFileItem(ContentItem):
file = FilerFileField(verbose_name=_("file"), on_delete=models.CASCADE)
name = models.CharField(_("name"), max_length=255, null=True, blank=True)
target = models.CharField(
_("target"),
blank=True,
max_length=100,
choices=(
(
("", _("same window")),
("_blank", _("new window")),
("_parent", _("parent window")),
("_top", _("topmost frame")),
)
),
default="",
)
class Meta:
verbose_name = _("File")
verbose_name_plural = _("Files")
@property
def filename(self):
if self.file.name in ("", None):
return self.file.original_filename
return self.file.name
def __str__(self):
if self.name:
return self.name
elif self.file:
return str(self.filename)
return "<empty>"
|
"""Training-related part of the Keras engine.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import weakref
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import losses
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.engine import distributed_training_utils
from tensorflow.python.keras.engine import training_arrays
from tensorflow.python.keras.engine import training_distributed
from tensorflow.python.keras.engine import training_eager
from tensorflow.python.keras.engine import training_generator
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.engine.network import Network
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils.generic_utils import slice_arrays
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import optimizer as tf_optimizer_module
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.models.Model', 'keras.Model')
class Model(Network):
"""`Model` groups layers into an object with training and inference features.
There are two ways to instantiate a `Model`:
1 - With the "functional API", where you start from `Input`,
you chain layer calls to specify the model's forward pass,
and finally you create your model from inputs and outputs:
```python
import tensorflow as tf
inputs = tf.keras.Input(shape=(3,))
x = tf.keras.layers.Dense(4, activation=tf.nn.relu)(inputs)
outputs = tf.keras.layers.Dense(5, activation=tf.nn.softmax)(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
```
2 - By subclassing the `Model` class: in that case, you should define your
layers in `__init__` and you should implement the model's forward pass
in `call`.
```python
import tensorflow as tf
class MyModel(tf.keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)
def call(self, inputs):
x = self.dense1(inputs)
return self.dense2(x)
model = MyModel()
```
If you subclass `Model`, you can optionally have
a `training` argument (boolean) in `call`, which you can use to specify
a different behavior in training and inference:
```python
import tensorflow as tf
class MyModel(tf.keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)
self.dropout = tf.keras.layers.Dropout(0.5)
def call(self, inputs, training=False):
x = self.dense1(inputs)
if training:
x = self.dropout(x, training=training)
return self.dense2(x)
model = MyModel()
```
"""
def __init__(self, *args, **kwargs):
super(Model, self).__init__(*args, **kwargs)
# Create a cache for iterator get_next op.
self._iterator_get_next = weakref.WeakKeyDictionary()
# Create a cache for dataset - uninitialized iterators
self._dataset_iterator_cache = weakref.WeakKeyDictionary()
# initializing _distribution_strategy here since it is possible to call
# predict on a model without compiling it.
self._distribution_strategy = None
self.run_eagerly = None
def _set_sample_weight_attributes(self, sample_weight_mode,
skip_target_weighing_indices):
"""Sets sample weight related attributes on the model."""
sample_weights, sample_weight_modes = training_utils.prepare_sample_weights(
self.output_names, sample_weight_mode, skip_target_weighing_indices)
self.sample_weights = sample_weights
self.sample_weight_modes = sample_weight_modes
self._feed_sample_weight_modes = [
sample_weight_modes[i]
for i in range(len(self.outputs))
if i not in skip_target_weighing_indices
]
self._feed_sample_weights = [
sample_weights[i]
for i in range(len(sample_weights))
if i not in skip_target_weighing_indices
]
def _cache_output_metric_attributes(self, metrics, weighted_metrics):
"""Caches metric name and function attributes for every model output."""
output_shapes = [
None if output is None else output.get_shape().as_list()
for output in self.outputs
]
self._per_output_metrics = training_utils.collect_per_output_metric_info(
metrics, self.output_names, output_shapes, self.loss_functions)
self._per_output_weighted_metrics = \
training_utils.collect_per_output_metric_info(
weighted_metrics, self.output_names, output_shapes,
self.loss_functions, self.sample_weights)
def _add_unique_metric_name(self, metric_name, output_index):
"""Makes the metric name unique and adds it to the model's metric name list.
If there are multiple outputs for which the metrics are calculated, the
metric names have to be made unique by appending an integer.
Arguments:
metric_name: Metric name that corresponds to the metric specified by the
user. For example: 'acc'.
output_index: The index of the model output for which the metric name is
being added.
Returns:
string, name of the model's unique metric name
"""
if len(self.output_names) > 1:
metric_name = '%s_%s' % (self.output_names[output_index], metric_name)
j = 1
base_metric_name = metric_name
while metric_name in self._compile_metrics_names:
metric_name = '%s_%d' % (base_metric_name, j)
j += 1
return metric_name
@property
def metrics(self):
"""Returns the model's metrics added using `compile`, `add_metric` APIs."""
metrics = []
if self._is_compiled:
metrics += self._compile_stateful_metric_functions
return metrics + super(Model, self).metrics
@property
def metrics_names(self):
"""Returns the model's display labels for all outputs."""
metrics_names = []
if self._is_compiled:
metrics_names += self._compile_metrics_names # Includes names of losses.
# Add metric names from layers.
for layer in self.layers:
metrics_names += [m.name for m in layer._metrics] # pylint: disable=protected-access
metrics_names += [m.name for m in self._metrics]
return metrics_names
@property
def _all_metrics_tensors(self):
"""Returns the network's symbolic metric tensors."""
metrics_tensors = {}
if self._is_compiled:
metrics_tensors.update(self._compile_metrics_tensors)
metrics_tensors.update(super(Model, self)._all_metrics_tensors)
return metrics_tensors
@property
def _all_stateful_metrics_tensors(self):
"""Returns the network's symbolic metric tensors."""
metrics_tensors = {}
if self._is_compiled:
metrics_tensors.update(self._compile_stateful_metrics_tensors)
metrics_tensors.update(super(Model, self)._all_metrics_tensors)
return metrics_tensors
def _init_metric_attributes(self):
"""Initialized model metric attributes."""
# List of all metric names in the model.
self._compile_metrics_names = ['loss']
# List of stateful metric functions. Used for resetting metric state during
# training/eval.
# This includes loss functions when there are multiple outputs.
self._compile_stateful_metric_functions = []
# Dict of all aggregated metric result tensors. This includes aggregated
# loss result tensors when there are multiple outputs.
self._compile_stateful_metrics_tensors = {}
# Dict of all metric result tensors (aggregated or not - based on the
# values given in compile.). This includes aggregated loss result tensors
# when there are multiple outputs.
self._compile_metrics_tensors = {}
def _set_per_output_metric_attributes(self, metrics_dict, output_index):
"""Sets the metric attributes on the model for the given output.
Arguments:
metrics_dict: A dict with metric names as keys and metric fns as values.
output_index: The index of the model output for which the metric
attributes are added.
Returns:
Metrics dict updated with unique metric names as keys.
"""
updated_metrics_dict = collections.OrderedDict()
for metric_name, (metric_fn, stateful_metric_fn) in metrics_dict.items():
metric_name = self._add_unique_metric_name(metric_name, output_index)
updated_metrics_dict[metric_name] = (metric_fn, stateful_metric_fn)
# Keep track of metric name, function and stateful function.
self._compile_metrics_names.append(metric_name)
self._compile_stateful_metric_functions.append(stateful_metric_fn)
return updated_metrics_dict
def _set_metric_attributes(self, outputs, skip_target_indices=None):
"""Sets the metric attributes on the model for all the model outputs."""
skip_target_indices = skip_target_indices or []
updated_per_output_metrics = []
updated_per_output_weighted_metrics = []
for i in range(len(outputs)):
if i in skip_target_indices:
updated_per_output_metrics.append(self._per_output_metrics[i])
updated_per_output_weighted_metrics.append(
self._per_output_weighted_metrics[i])
continue
updated_per_output_metrics.append(
self._set_per_output_metric_attributes(self._per_output_metrics[i],
i))
updated_per_output_weighted_metrics.append(
self._set_per_output_metric_attributes(
self._per_output_weighted_metrics[i], i))
self._per_output_metrics = updated_per_output_metrics
self._per_output_weighted_metrics = updated_per_output_weighted_metrics
def _handle_per_output_metrics(self,
metrics_dict,
y_true,
y_pred,
mask,
weights=None,
return_stateful_result=True):
"""Calls metric functions for a single output.
Arguments:
metrics_dict: A dict with metric names as keys and metric fns as values.
y_true: Target output.
y_pred: Predicted output.
mask: Computed mask value for the current output.
weights: Weights to be applied on the current output.
return_stateful_result: Boolean, indicates whether the stateful
(aggregated)/stateless metric result should be returned.
Returns:
A list of metric result tensors.
"""
metric_results = []
for metric_name, (metric_fn, stateful_fn) in metrics_dict.items():
with K.name_scope(metric_name):
def _call_stateful_fn(fn):
return training_utils.call_metric_function(
fn, y_true, y_pred, weights=weights, mask=mask)
def _call_stateless_fn(fn):
weighted_metric_fn = training_utils.weighted_masked_objective(fn)
return weighted_metric_fn(y_true, y_pred, weights=weights, mask=mask)
def _track_metric_tensors(name, stateless_result, stateful_result):
self._compile_metrics_tensors[name] = stateless_result
self._compile_stateful_metrics_tensors[name] = stateful_result
if isinstance(metric_fn, metrics_module.Metric):
# If the given metric fn is stateful, call the fn and return result.
metric_result = _call_stateful_fn(metric_fn)
metric_results.append(metric_result)
if not self.run_eagerly:
_track_metric_tensors(metric_name, metric_result, metric_result)
elif self.run_eagerly:
# In eager mode, if the given metric fn is not stateful, we invoke the
# given fn or its stateful version based on the given flag.
if return_stateful_result:
metric_result = _call_stateful_fn(stateful_fn)
else:
metric_result = _call_stateless_fn(metric_fn)
metric_results.append(metric_result)
else:
# In graph mode, we build the sub-graph for both the stateful and the
# stateless fns.
stateful_metric_result = _call_stateful_fn(stateful_fn)
metric_result = _call_stateless_fn(metric_fn)
_track_metric_tensors(metric_name, metric_result,
stateful_metric_result)
return metric_results
def _handle_metrics(self,
outputs,
skip_target_indices=None,
targets=None,
sample_weights=None,
masks=None,
return_stateful_result=True):
"""Handles calling metric functions.
Arguments:
outputs: List of outputs (predictions).
skip_target_indices: Optional. List of target ids to skip.
targets: List of targets.
sample_weights: Optional list of sample weight arrays.
masks: List of computed output mask values.
return_stateful_result: Boolean, indicates whether the stateful
(aggregated)/stateless metric result should be returned.
Returns:
A list of metric result tensors.
"""
skip_target_indices = skip_target_indices or []
metric_results = []
with K.name_scope('metrics'):
# Invoke all metrics added using `compile`.
for i in range(len(outputs)):
if i in skip_target_indices:
continue
output = outputs[i] if outputs else None
target = targets[i] if targets else None
output_mask = masks[i] if masks else None
metric_results.extend(
self._handle_per_output_metrics(
self._per_output_metrics[i],
target,
output,
output_mask,
return_stateful_result=return_stateful_result))
metric_results.extend(
self._handle_per_output_metrics(
self._per_output_weighted_metrics[i],
target,
output,
output_mask,
weights=sample_weights[i],
return_stateful_result=return_stateful_result))
# Add metric results from the `add_metric` metrics in eager mode.
if context.executing_eagerly():
for m in self.metrics:
if m not in self._compile_stateful_metric_functions:
metric_results.append(m.result())
return metric_results
@property
def run_eagerly(self):
"""Settable attribute indicating whether the model should run eagerly.
Running eagerly means that your model will be run step by step,
like Python code. Your model might run slower, but it should become easier
for you to debug it by stepping into individual layer calls.
By default, we will attempt to compile your model to a static graph to
deliver the best execution performance.
Returns:
Boolean, whether the model should run eagerly.
"""
if self._run_eagerly is True and not context.executing_eagerly():
raise ValueError('You can only set `run_eagerly=True` if eager execution '
'is enabled.')
if self._static_graph_friendly:
if self._run_eagerly is None:
return False
else:
return self._run_eagerly
else:
if self._run_eagerly is False:
# TODO(fchollet): consider using py_func to enable this.
raise ValueError('Your model contains layers that can only be '
'successfully run in eager execution. '
'You cannot set `run_eagerly=False`.')
return context.executing_eagerly()
@run_eagerly.setter
def run_eagerly(self, value):
self._run_eagerly = value
@checkpointable.no_automatic_dependency_tracking
def compile(self,
optimizer,
loss=None,
metrics=None,
loss_weights=None,
sample_weight_mode=None,
weighted_metrics=None,
target_tensors=None,
distribute=None,
**kwargs):
"""Configures the model for training.
Arguments:
optimizer: String (name of optimizer) or optimizer instance.
See [optimizers](/api_docs/python/tf/keras/optimizers).
loss: String (name of objective function) or objective function.
See [losses](/api_docs/python/tf/losses).
If the model has multiple outputs, you can use a different loss
on each output by passing a dictionary or a list of losses.
The loss value that will be minimized by the model
will then be the sum of all individual losses.
metrics: List of metrics to be evaluated by the model
during training and testing.
Typically you will use `metrics=['accuracy']`.
To specify different metrics for different outputs of a
multi-output model, you could also pass a dictionary,
such as `metrics={'output_a': 'accuracy'}`.
loss_weights: Optional list or dictionary specifying scalar
coefficients (Python floats) to weight the loss contributions
of different model outputs.
The loss value that will be minimized by the model
will then be the *weighted sum* of all individual losses,
weighted by the `loss_weights` coefficients.
If a list, it is expected to have a 1:1 mapping
to the model's outputs. If a tensor, it is expected to map
output names (strings) to scalar coefficients.
sample_weight_mode: If you need to do timestep-wise
sample weighting (2D weights), set this to `"temporal"`.
`None` defaults to sample-wise weights (1D).
If the model has multiple outputs, you can use a different
`sample_weight_mode` on each output by passing a
dictionary or a list of modes.
weighted_metrics: List of metrics to be evaluated and weighted
by sample_weight or class_weight during training and testing.
target_tensors: By default, Keras will create placeholders for the
model's target, which will be fed with the target data during
training. If instead you would like to use your own
target tensors (in turn, Keras will not expect external
Numpy data for these targets at training time), you
can specify them via the `target_tensors` argument. It can be
a single tensor (for a single-output model), a list of tensors,
or a dict mapping output names to target tensors.
distribute: The DistributionStrategy instance that we want to use to
distribute the training of the model.
**kwargs: These arguments are passed to `tf.Session.run`.
Raises:
ValueError: In case of invalid arguments for
`optimizer`, `loss`, `metrics` or `sample_weight_mode`.
"""
run_eagerly = kwargs.pop('run_eagerly', None)
self._run_eagerly = run_eagerly
# Validate that arguments passed by the user to `compile` are supported by
# DistributionStrategy.
if distribute:
if not isinstance(
optimizer, (tf_optimizer_module.Optimizer, optimizers.TFOptimizer)):
raise NotImplementedError(
'optimizer must be an instance of '
'tf.train.Optimizer, not a %s' % type(optimizer))
if self.run_eagerly:
raise NotImplementedError('DistributionStrategy is not supported '
'when running a model eagerly.')
if sample_weight_mode:
raise NotImplementedError('sample_weight_mode is not supported with '
'DistributionStrategy.')
if weighted_metrics:
raise NotImplementedError('weighted_metrics is not supported with '
'DistributionStrategy.')
if target_tensors:
raise ValueError('target_tensors is not supported with '
'DistributionStrategy.')
loss = loss or {}
if self.run_eagerly and not isinstance(
optimizer, (tf_optimizer_module.Optimizer, optimizers.TFOptimizer)):
raise ValueError(
'When running a model in eager execution, the optimizer must be an '
'instance of tf.train.Optimizer. Received: '
'%s' % optimizer)
self.optimizer = optimizers.get(optimizer)
# We've disabled automatic dependency tracking for this method, but do want
# to add a checkpoint dependency on the optimizer if it's checkpointable.
if isinstance(self.optimizer, checkpointable.CheckpointableBase):
self._track_checkpointable(
self.optimizer, name='optimizer', overwrite=True)
self.loss = loss
self._compile_metrics = metrics or []
self.loss_weights = loss_weights
self.sample_weight_mode = sample_weight_mode
self._compile_weighted_metrics = weighted_metrics
if self.run_eagerly and target_tensors is not None:
raise ValueError(
'target_tensors argument is not supported when '
'running a model eagerly.')
self.target_tensors = target_tensors
# Set DistributionStrategy specific parameters.
self._distribution_strategy = distribute
# Reset the value of grouped_model
self._grouped_model = None
if self._distribution_strategy is not None:
distributed_training_utils.configure_and_create_session(
self._distribution_strategy)
# Initialize model metric attributes.
self._init_metric_attributes()
if not self.built:
# Model is not compilable because it does not know its number of inputs
# and outputs, nor their shapes and names. We will compile after the first
# time the model gets called on training data.
return
self._is_compiled = True
# Prepare loss functions.
if isinstance(loss, dict):
for name in loss:
if name not in self.output_names:
raise ValueError(
'Unknown entry in loss '
'dictionary: "' + name + '". '
'Only expected the following keys: ' + str(self.output_names))
loss_functions = []
for name in self.output_names:
if name not in loss:
logging.warning(
'Output "' + name +
'" missing from loss dictionary. We assume '
'this was done on purpose. The fit and evaluate APIs will not be '
'expecting any data to be passed to "' + name + '".')
loss_functions.append(losses.get(loss.get(name)))
elif isinstance(loss, list):
if len(loss) != len(self.outputs):
raise ValueError('When passing a list as loss, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss=' + str(loss))
loss_functions = [losses.get(l) for l in loss]
else:
loss_function = losses.get(loss)
loss_functions = [loss_function for _ in range(len(self.outputs))]
self.loss_functions = loss_functions
skip_target_indices = []
skip_target_weighing_indices = []
self._feed_outputs = []
self._feed_output_names = []
self._feed_output_shapes = []
self._feed_loss_fns = []
for i in range(len(loss_functions)):
if loss_functions[i] is None:
skip_target_indices.append(i)
skip_target_weighing_indices.append(i)
# Prepare output masks.
if not self.run_eagerly:
masks = [getattr(x, '_keras_mask', None) for x in self.outputs]
if not isinstance(masks, list):
masks = [masks]
# Prepare loss weights.
if loss_weights is None:
loss_weights_list = [1. for _ in range(len(self.outputs))]
elif isinstance(loss_weights, dict):
for name in loss_weights:
if name not in self.output_names:
raise ValueError(
'Unknown entry in loss_weights '
'dictionary: "' + name + '". '
'Only expected the following keys: ' + str(self.output_names))
loss_weights_list = []
for name in self.output_names:
loss_weights_list.append(loss_weights.get(name, 1.))
elif isinstance(loss_weights, list):
if len(loss_weights) != len(self.outputs):
raise ValueError(
'When passing a list as loss_weights, '
'it should have one entry per model output. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss_weights=' + str(loss_weights))
loss_weights_list = loss_weights
else:
raise TypeError('Could not interpret loss_weights argument: ' +
str(loss_weights) + ' - expected a list of dicts.')
self.loss_weights_list = loss_weights_list
# Initialization for Eager mode execution.
if self.run_eagerly:
# Prepare sample weights.
self._set_sample_weight_attributes(sample_weight_mode,
skip_target_weighing_indices)
# Save all metric attributes per output of the model.
self._cache_output_metric_attributes(metrics, weighted_metrics)
if target_tensors is not None:
raise ValueError('target_tensors are not currently supported in Eager '
'mode.')
self.total_loss = None
for i in range(len(self.outputs)):
if len(self.outputs) > 1:
self._compile_metrics_names.append(self.output_names[i] + '_loss')
# Set metric attributes on model.
self._set_metric_attributes(
self.outputs,
skip_target_indices=skip_target_indices,
)
self.targets = []
for i in range(len(self.outputs)):
self._feed_output_names.append(self.output_names[i])
self._collected_trainable_weights = self.trainable_weights
return
with K.get_graph().as_default():
# Prepare targets of model.
self.targets = []
self._feed_targets = []
if target_tensors not in (None, []):
if isinstance(target_tensors, list):
if len(target_tensors) != len(self.outputs):
raise ValueError(
'When passing a list as `target_tensors`, '
'it should have one entry per model output. '
'The model has %s outputs, but you passed target_tensors=%s' %
(len(self.outputs), target_tensors))
elif isinstance(target_tensors, dict):
for name in target_tensors:
if name not in self.output_names:
raise ValueError(
'Unknown entry in `target_tensors` '
'dictionary: "' + name + '". '
'Only expected the following keys: ' + str(self.output_names))
tmp_target_tensors = []
for name in self.output_names:
tmp_target_tensors.append(target_tensors.get(name, None))
target_tensors = tmp_target_tensors
elif tensor_util.is_tensor(target_tensors):
target_tensors = [target_tensors]
else:
raise TypeError('Expected `target_tensors` to be a list or tuple or '
'dict or a single tensor, but got:', target_tensors)
for i in range(len(self.outputs)):
if i in skip_target_indices:
self.targets.append(None)
else:
shape = K.int_shape(self.outputs[i])
name = self.output_names[i]
if target_tensors not in (None, []):
target = target_tensors[i]
else:
target = None
if target is None or K.is_placeholder(target):
if target is None:
target = K.placeholder(
ndim=len(shape),
name=name + '_target',
sparse=K.is_sparse(self.outputs[i]),
dtype=K.dtype(self.outputs[i]))
self._feed_targets.append(target)
self._feed_outputs.append(self.outputs[i])
self._feed_output_names.append(name)
self._feed_output_shapes.append(shape)
self._feed_loss_fns.append(self.loss_functions[i])
else:
skip_target_weighing_indices.append(i)
self.targets.append(target)
# Prepare sample weights.
self._set_sample_weight_attributes(sample_weight_mode,
skip_target_weighing_indices)
# Save all metric attributes per output of the model.
self._cache_output_metric_attributes(metrics, weighted_metrics)
# Compute total loss.
total_loss = None
with K.name_scope('loss'):
for i in range(len(self.outputs)):
if i in skip_target_indices:
continue
y_true = self.targets[i]
y_pred = self.outputs[i]
loss_fn = loss_functions[i]
sample_weight = self.sample_weights[i]
mask = masks[i]
loss_weight = loss_weights_list[i]
with K.name_scope(self.output_names[i] + '_loss'):
weighted_loss = training_utils.weighted_masked_objective(loss_fn)
output_loss = weighted_loss(y_true, y_pred, sample_weight, mask)
if len(self.outputs) > 1:
# Keep track of the un-aggregated loss result tensor.
self._compile_metrics_tensors[self.output_names[i] +
'_loss'] = output_loss
# Keep track of stateful result tensor and function for the loss.
mean_wrapped_loss = metrics_module.MeanMetricWrapper(
loss_fn, name=loss_fn.__name__)
result_tensor = training_utils.call_metric_function(
mean_wrapped_loss,
y_true,
y_pred,
weights=sample_weight,
mask=mask)
self._compile_stateful_metrics_tensors[self.output_names[i] +
'_loss'] = result_tensor
self._compile_stateful_metric_functions.append(mean_wrapped_loss)
self._compile_metrics_names.append(self.output_names[i] + '_loss')
if total_loss is None:
total_loss = loss_weight * output_loss
else:
total_loss += loss_weight * output_loss
if total_loss is None:
if not self.losses:
raise ValueError('The model cannot be compiled '
'because it has no loss to optimize.')
else:
total_loss = 0.
# Add regularization penalties
# and other layer-specific losses.
for loss_tensor in self.losses:
total_loss += loss_tensor
# Set metric attributes on model.
self._set_metric_attributes(
self.outputs,
skip_target_indices=skip_target_indices,
)
# Invoke metric functions for all the outputs.
self._handle_metrics(
self.outputs,
masks=masks,
targets=self.targets,
skip_target_indices=skip_target_indices,
sample_weights=self.sample_weights)
# Prepare gradient updates and state updates.
self.total_loss = total_loss
# Functions for train, test and predict will
# be compiled lazily when required.
# This saves time when the user is not using all functions.
self._function_kwargs = kwargs
self._fit_function = None
self._eval_function = None
self.train_function = None
self.test_function = None
self.predict_function = None
# Collected trainable weights, sorted in topological order.
trainable_weights = self.trainable_weights
self._collected_trainable_weights = trainable_weights
def _check_trainable_weights_consistency(self):
"""Check trainable weights count consistency.
This will raise a warning if `trainable_weights` and
`_collected_trainable_weights` are inconsistent (i.e. have different
number of parameters).
Inconsistency will typically arise when one modifies `model.trainable`
without calling `model.compile` again.
"""
if not hasattr(self, '_collected_trainable_weights'):
return
if len(self.trainable_weights) != len(self._collected_trainable_weights):
logging.log_first_n(
logging.WARN, 'Discrepancy between trainable weights and collected'
' trainable weights, did you set `model.trainable`'
' without calling `model.compile` after ?', 1)
def _make_train_function_helper(self, fn_name, outputs, metric_updates=None):
if not hasattr(self, fn_name):
raise RuntimeError('You must compile your model before using it.')
self._check_trainable_weights_consistency()
if getattr(self, fn_name) is None:
inputs = (self._feed_inputs +
self._feed_targets +
self._feed_sample_weights)
if not isinstance(K.symbolic_learning_phase(), int):
inputs += [K.symbolic_learning_phase()]
with K.get_graph().as_default():
with K.name_scope('training'):
with K.name_scope(self.optimizer.__class__.__name__):
# Training updates
updates = self.optimizer.get_updates(
params=self._collected_trainable_weights, loss=self.total_loss)
# Unconditional updates
updates += self.get_updates_for(None)
# Conditional updates relevant to this model
updates += self.get_updates_for(self.inputs)
# Add stateful metrics updates.
if metric_updates is not None:
updates += metric_updates
with K.name_scope('training'):
# Gets loss and metrics. Updates weights at each call.
fn = K.function(
inputs,
outputs,
updates=updates,
name='train_function',
**self._function_kwargs)
setattr(self, fn_name, fn)
def _make_train_function(self):
metrics_tensors = [
self._all_metrics_tensors[m] for m in self.metrics_names[1:]
]
self._make_train_function_helper('train_function',
[self.total_loss] + metrics_tensors)
def _make_fit_function(self):
# TODO(psv/anjalisridhar): Remove updates after we fix b/118841692
# Stateful metrics updates
metric_updates = []
for m in self.metrics:
metric_updates += m.updates
metrics_tensors = [
self._all_stateful_metrics_tensors[m] for m in self.metrics_names[1:]
]
self._make_train_function_helper(
'_fit_function', [self.total_loss] + metrics_tensors, metric_updates)
def _make_test_function_helper(self, fn_name, outputs, metric_updates=None):
if not hasattr(self, fn_name):
raise RuntimeError('You must compile your model before using it.')
if getattr(self, fn_name) is None:
inputs = (self._feed_inputs +
self._feed_targets +
self._feed_sample_weights)
with K.name_scope('evaluation'):
updates = self.state_updates
# Add stateful metrics updates.
if metric_updates is not None:
updates += metric_updates
# Return loss and metrics, no gradient updates.
# Does update the network states.
fn = K.function(
inputs,
outputs,
updates=updates,
name='test_function',
**self._function_kwargs)
setattr(self, fn_name, fn)
def _make_test_function(self):
metrics_tensors = [
self._all_metrics_tensors[m] for m in self.metrics_names[1:]
]
self._make_test_function_helper('test_function',
[self.total_loss] + metrics_tensors)
def _make_eval_function(self):
metrics_tensors = [
self._all_stateful_metrics_tensors[m] for m in self.metrics_names[1:]
]
self._make_test_function_helper('_eval_function',
[self.total_loss] + metrics_tensors)
def _make_predict_function(self):
if not hasattr(self, 'predict_function'):
self.predict_function = None
if self.predict_function is None:
inputs = self._feed_inputs
# Gets network outputs. Does not update weights.
# Does update the network states.
kwargs = getattr(self, '_function_kwargs', {})
with K.name_scope('predict'):
self.predict_function = K.function(
inputs,
self.outputs,
updates=self.state_updates,
name='predict_function',
**kwargs)
def _get_execution_function(self, mode):
if mode == 'train':
self._make_fit_function()
return self._fit_function
if mode == 'test':
self._make_eval_function()
return self._eval_function
if mode == 'predict':
self._make_predict_function()
return self.predict_function
def _get_iterator_get_next_tensors(self, iterator):
get_next_op = self._iterator_get_next.get(iterator, None)
if get_next_op is None:
get_next_op = iterator.get_next()
self._iterator_get_next[iterator] = get_next_op
return get_next_op
def _distribution_standardize_user_data(self,
x,
y=None,
sample_weight=None,
class_weight=None,
batch_size=None,
check_steps=False,
steps_name='steps',
steps=None,
validation_split=0,
shuffle=False):
"""Runs validation checks on input and target data passed by the user.
This is called when using DistributionStrategy to train, evaluate or serve
the model.
Args:
x: Input data. A numpy array or `tf.data` dataset.
y: Target data. A numpy array or None if x is a `tf.data` dataset.
sample_weight: An optional sample-weight array passed by the user to
weight the importance of each sample in `x`.
class_weight: An optional class-weight array by the user to
weight the importance of samples in `x` based on the class they belong
to, as conveyed by `y`.
batch_size: Integer batch size. If provided, it is used to run additional
validation checks on stateful models.
check_steps: boolean, True if we want to check for validity of `steps` and
False, otherwise.
steps_name: The public API's parameter name for `steps`.
steps: Integer or `None`. Total number of steps (batches of samples) to
execute.
validation_split: Float between 0 and 1.
Fraction of the training data to be used as validation data.
shuffle: Boolean whether to shuffle the training data before each epoch.
Returns:
Iterator for reading the dataset `x`.
Raises:
ValueError: In case of invalid user-provided data.
RuntimeError: If the model was never compiled.
"""
if class_weight:
raise NotImplementedError('`class_weight` is currently not supported '
'when using DistributionStrategy.')
if (sample_weight is not None and sample_weight.all() and
self._distribution_strategy.__class__.__name__ == 'TPUStrategy'):
raise NotImplementedError('`sample_weight` is currently not supported '
'when using TPUStrategy.')
# Validates `steps` argument right at the beginning since we use it to
# construct the dataset object.
# TODO(anjalisridhar): Remove this check once we refactor the
# _standardize_user_data code path. This check is already present elsewhere
# in the codebase.
if check_steps and isinstance(x, dataset_ops.Dataset) and steps is None:
raise ValueError('When using Datasets as input, '
'you should specify the `{steps_name}` argument.'
.format(steps_name=steps_name))
first_x_value = nest.flatten(x)[0]
if isinstance(first_x_value, np.ndarray):
assert steps is not None
x_shape = first_x_value.shape
if batch_size is None:
batch_size = distributed_training_utils.get_batch_size(
self._distribution_strategy, x_shape[0], steps)
# We need to use the drop_remainder argument to allow for a static
# input shape which is required for TPUs.
drop_remainder = self._distribution_strategy.require_static_shapes
if y is not None:
var_x = distributed_training_utils.get_var_for_numpy(
self._distribution_strategy, x)
var_y = distributed_training_utils.get_var_for_numpy(
self._distribution_strategy, y)
if sample_weight is not None:
var_sample_weights = distributed_training_utils.get_var_for_numpy(
self._distribution_strategy, sample_weight)
x = dataset_ops.Dataset.from_tensor_slices((var_x, var_y,
var_sample_weights))
else:
x = dataset_ops.Dataset.from_tensor_slices((var_x, var_y))
x = dataset_ops.Dataset.from_tensor_slices((var_x, var_y))
if shuffle:
# 1024 is a good buffer size since it is much larger than the average
# batch size provided by the user and provides sufficient randomness.
# One thing to keep in mind is the memory usage based on the size of
# each sample.
x = x.shuffle(1024)
x = x.repeat()
x = x.batch(batch_size, drop_remainder=drop_remainder)
y = None
sample_weight = None
else:
# This case is for the predict call where the dataset only contains
# inputs and no targets, i.e. it does not return a tuple
var_x = distributed_training_utils.get_var_for_numpy(
self._distribution_strategy, x)
x = dataset_ops.Dataset.from_tensor_slices(var_x)
x = x.repeat()
x = x.batch(batch_size, drop_remainder=drop_remainder)
assert isinstance(x, dataset_ops.Dataset)
with self._distribution_strategy.scope():
iterator = self._distribution_strategy.make_dataset_iterator(x)
K.get_session().run(iterator.initialize())
training_utils.validate_iterator_input(x, y, sample_weight,
validation_split)
return iterator
def _standardize_user_data(self,
x,
y=None,
sample_weight=None,
class_weight=None,
batch_size=None,
check_steps=False,
steps_name='steps',
steps=None,
validation_split=0,
shuffle=False):
"""Runs validation checks on input and target data passed by the user.
Also standardizes the data to lists of arrays, in order.
Also builds and compiles the model on the fly if it is a subclassed model
that has never been called before (and thus has no inputs/outputs).
This is a purely internal method, subject to refactoring at any time.
Args:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data` dataset or a dataset iterator.
y: Target data. Like the input data `x`,
it could be either Numpy array(s) or TensorFlow tensor(s).
It should be consistent with `x` (you cannot have Numpy inputs and
tensor targets, or inversely). If `x` is a dataset or a
dataset iterator, `y` should not be specified
(since targets will be obtained from the iterator).
sample_weight: An optional sample-weight array passed by the user to
weight the importance of each sample in `x`.
class_weight: An optional class-weight array by the user to
weight the importance of samples in `x` based on the class they belong
to, as conveyed by `y`.
batch_size: Integer batch size. If provided, it is used to run additional
validation checks on stateful models.
check_steps: boolean, True if we want to check for validity of `steps` and
False, otherwise. For example, when we are standardizing one batch of
data for train_on_batch/predict_on_batch/test_on_batch APIs, `steps`
value is not required and we should not check for its validity in these
cases.
steps_name: The public API's parameter name for `steps`.
steps: Integer or `None`. Total number of steps (batches of samples) to
execute.
validation_split: Float between 0 and 1.
Fraction of the training data to be used as validation data.
shuffle: Boolean whether to shuffle the training data before each epoch.
Returns:
A tuple of 3: inputs (arrays or dicts, depending on whether `x` was a dict
or not), target arrays, sample-weight arrays.
If the model's input and targets are symbolic, these lists are empty
(since the model takes no user-provided data, instead the data comes
from the symbolic inputs/targets).
Raises:
ValueError: In case of invalid user-provided data.
RuntimeError: If the model was never compiled.
"""
if self._distribution_strategy:
iterator = self._distribution_standardize_user_data(
x,
y,
sample_weight=sample_weight,
class_weight=class_weight,
batch_size=batch_size,
check_steps=check_steps,
steps_name=steps_name,
steps=steps,
validation_split=validation_split,
shuffle=shuffle)
return iterator, None, None
if isinstance(x, dataset_ops.Dataset):
if context.executing_eagerly():
x = x.make_one_shot_iterator()
else:
if x in self._dataset_iterator_cache:
x = self._dataset_iterator_cache[x]
else:
iterator = x.make_initializable_iterator()
self._dataset_iterator_cache[x] = iterator
x = iterator
K.get_session().run(x.initializer)
# Validates `steps` argument based on x's type.
if check_steps:
training_utils.check_steps_argument(x, steps, steps_name)
is_x_eager_iterator = isinstance(x, iterator_ops.EagerIterator)
is_x_iterator = isinstance(x, iterator_ops.Iterator)
if is_x_eager_iterator:
self._run_eagerly = True # TODO(fchollet): support using graph functions
# Validate user inputs when data is given as a dataset or dataset iterator.
if is_x_iterator or is_x_eager_iterator:
training_utils.validate_iterator_input(x, y, sample_weight,
validation_split)
# For eager iterators, when we have to process multiple batches of samples,
# we will standardize the data when we actually loop over iterator and get
# the batches. For now, we just return the iterator as is.
if is_x_eager_iterator:
return x, y, sample_weight
# If input data is a dataset iterator in graph mode or if it is an eager
# iterator and only one batch of samples is required, we fetch the data
# tensors from the iterator and then standardize them.
if is_x_iterator or is_x_eager_iterator:
try:
if is_x_iterator:
next_element = self._get_iterator_get_next_tensors(x)
else:
next_element = x.get_next()
except errors.OutOfRangeError:
raise RuntimeError('Your dataset iterator ran out of data; '
'Make sure that your dataset can generate '
'required number of samples.')
if isinstance(next_element, (list, tuple)):
if len(next_element) not in [2, 3]:
raise ValueError(
'Please provide model inputs as a list or tuple of 2 or 3'
'elements: (input, target) or (input, target, sample_weights)'
'Received %s' % next_element)
if len(next_element) == 2:
x, y = next_element
else:
x, y, sample_weight = next_element
else:
x = next_element
x, y, sample_weights = self._standardize_weights(x, y, sample_weight,
class_weight, batch_size)
return x, y, sample_weights
def _standardize_weights(self, x, y, sample_weight=None, class_weight=None,
batch_size=None,):
# TODO(sourabhbajaj): Split input validation from weight standardization.
if sample_weight is not None and class_weight is not None:
logging.warning(
'Received both a `sample_weight` and `class_weight` argument. '
'The `class_weight` argument will be ignored.')
# First, we build/compile the model on the fly if necessary.
all_inputs = []
is_build_called = False
is_compile_called = False
dict_inputs = False
if not self.inputs:
# We need to use `x` to set the model inputs.
# We type-check that `x` and `y` are either single arrays
# or lists of arrays.
if isinstance(x, (list, tuple)):
if not all(isinstance(v, np.ndarray) or
tensor_util.is_tensor(v) for v in x):
raise ValueError('Please provide as model inputs either a single '
'array or a list of arrays. You passed: x=' + str(x))
all_inputs += list(x)
elif isinstance(x, dict):
dict_inputs = True
keys = sorted(x.keys())
all_inputs = [x[k] for k in keys]
else:
if not isinstance(x, np.ndarray) and not tensor_util.is_tensor(x):
raise ValueError('Please provide as model inputs either a single '
'array or a list of arrays. You passed: x=' + str(x))
all_inputs.append(x)
# Build the model using the retrieved inputs (value or symbolic).
# If values, then in symbolic-mode placeholders will be created
# to match the value shapes.
if not self.inputs:
is_build_called = True
self._set_inputs(x)
else:
dict_inputs = isinstance(self.inputs, dict)
if y is not None:
if not self.optimizer:
raise RuntimeError('You must compile a model before '
'training/testing. '
'Use `model.compile(optimizer, loss)`.')
if not self._is_compiled:
# On-the-fly compilation of the model.
# We need to use `y` to set the model targets.
if isinstance(y, (list, tuple)):
if not all(isinstance(v, np.ndarray) or
tensor_util.is_tensor(v) for v in y):
raise ValueError('Please provide as model targets either a single '
'array or a list of arrays. '
'You passed: y=' + str(y))
all_inputs += list(y)
elif isinstance(y, dict):
raise ValueError('Please do not pass a dictionary as model targets.')
else:
if not isinstance(y, np.ndarray) and not tensor_util.is_tensor(y):
raise ValueError('Please provide as model targets either a single '
'array or a list of arrays. '
'You passed: y=' + str(y))
all_inputs.append(y)
# Typecheck that all inputs are *either* value *or* symbolic.
# TODO(fchollet): this check could be removed in Eager mode?
if any(tensor_util.is_tensor(v) for v in all_inputs):
if not all(tensor_util.is_tensor(v) for v in all_inputs):
raise ValueError('Do not pass inputs that mix Numpy arrays and '
'TensorFlow tensors. '
'You passed: x=' + str(x) + '; y=' + str(y))
if self.run_eagerly:
target_tensors = None
else:
# Handle target tensors if any passed.
if not isinstance(y, (list, tuple)):
y = [y]
target_tensors = [v for v in y if tensor_util.is_tensor(v)]
is_compile_called = True
self.compile(
optimizer=self.optimizer,
loss=self.loss,
metrics=self._compile_metrics,
weighted_metrics=self._compile_weighted_metrics,
loss_weights=self.loss_weights,
target_tensors=target_tensors,
run_eagerly=self.run_eagerly)
# In graph mode, if we had just set inputs and targets as symbolic tensors
# by invoking build and compile on the model respectively, we do not have to
# feed anything to the model. Model already has input and target data as
# part of the graph.
# Note: in this case, `any` and `all` are equivalent since we disallow
# mixed symbolic/value inputs.
if (not self.run_eagerly and is_build_called and
is_compile_called and
any(tensor_util.is_tensor(v) for v in all_inputs)):
return [], [], []
# What follows is input validation and standardization to list format,
# in the case where all inputs are value arrays.
if self.run_eagerly:
# In eager mode, do not do shape validation
# since the network has no input nodes (placeholders) to be fed.
feed_input_names = self.input_names
feed_input_shapes = None
elif not self._is_graph_network:
# Case: symbolic-mode subclassed network. Do not do shape validation.
feed_input_names = self._feed_input_names
feed_input_shapes = None
else:
# Case: symbolic-mode graph network.
# In this case, we run extensive shape validation checks.
feed_input_names = self._feed_input_names
feed_input_shapes = self._feed_input_shapes
# Standardize the inputs.
x = training_utils.standardize_input_data(
x,
feed_input_names,
feed_input_shapes,
check_batch_axis=False, # Don't enforce the batch size.
exception_prefix='input')
if y is not None:
if not self._is_graph_network:
feed_output_names = self._feed_output_names
feed_output_shapes = None
# Sample weighting not supported in this case.
# TODO(fchollet): consider supporting it.
feed_sample_weight_modes = [None for _ in self.outputs]
else:
feed_output_names = self._feed_output_names
feed_sample_weight_modes = self._feed_sample_weight_modes
feed_output_shapes = []
for output_shape, loss_fn in zip(self._feed_output_shapes,
self._feed_loss_fns):
if loss_fn is losses.sparse_categorical_crossentropy:
if K.image_data_format() == 'channels_first':
feed_output_shapes.append(
(output_shape[0], 1) + output_shape[2:])
else:
feed_output_shapes.append(output_shape[:-1] + (1,))
elif (not hasattr(loss_fn, '__name__') or
getattr(losses, loss_fn.__name__, None) is None):
# If `loss_fn` is not a function (e.g. callable class)
# or if it not in the `losses` module, then
# it is a user-defined loss and we make no assumptions
# about it.
feed_output_shapes.append(None)
else:
feed_output_shapes.append(output_shape)
# Standardize the outputs.
y = training_utils.standardize_input_data(
y,
feed_output_names,
# Don't enforce target shapes to match output shapes.
# Precise checks will be run in `check_loss_and_target_compatibility`.
shapes=None,
check_batch_axis=False, # Don't enforce the batch size.
exception_prefix='target')
# Generate sample-wise weight values given the `sample_weight` and
# `class_weight` arguments.
sample_weights = training_utils.standardize_sample_weights(
sample_weight, feed_output_names)
class_weights = training_utils.standardize_class_weights(
class_weight, feed_output_names)
sample_weights = [
training_utils.standardize_weights(ref, sw, cw, mode)
for (ref, sw, cw, mode) in zip(y, sample_weights, class_weights,
feed_sample_weight_modes)
]
# Check that all arrays have the same length.
if not self._distribution_strategy:
training_utils.check_array_lengths(x, y, sample_weights)
if self._is_graph_network and not self.run_eagerly:
# Additional checks to avoid users mistakenly using improper loss fns.
training_utils.check_loss_and_target_compatibility(
y, self._feed_loss_fns, feed_output_shapes)
else:
y = []
sample_weights = []
if self.stateful and batch_size:
# Check that for stateful networks, number of samples is a multiple
# of the static batch size.
if x[0].shape[0] % batch_size != 0:
raise ValueError('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples')
# If dictionary inputs were provided, we return a dictionary as well.
if dict_inputs:
x = dict(zip(feed_input_names, x))
return x, y, sample_weights
@checkpointable.no_automatic_dependency_tracking
def _set_inputs(self, inputs, outputs=None, training=None):
"""Set model's input and output specs based on the input data received.
This is to be used for Model subclasses, which do not know at instantiation
time what their inputs look like.
Args:
inputs: Single array, or list of arrays. The arrays could be placeholders,
Numpy arrays, or data tensors.
- if placeholders: the model is built on top of these placeholders,
and we expect Numpy data to be fed for them when calling `fit`/etc.
- if Numpy data: we create placeholders matching the shape of the Numpy
arrays. We expect Numpy data to be fed for these placeholders
when calling `fit`/etc.
- if data tensors: the model is built on top of these tensors.
We do not expect any Numpy data to be provided when calling `fit`/etc.
outputs: None, a data tensor, or a list of tensors. If None, the
outputs will be determined by invoking `self.call()`, otherwise the
provided value will be used.
training: Boolean or None. Only relevant in symbolic mode. Specifies
whether to build the model's graph in inference mode (False), training
mode (True), or using the Keras learning phase (None).
Raises:
ValueError: If dict inputs are passed to a Sequential Model where the
first layer isn't FeatureLayer.
"""
if self.inputs:
raise ValueError('Model inputs are already set.')
if self.__class__.__name__ == 'Sequential' and not self.built:
if tensor_util.is_tensor(inputs):
input_shape = (None,) + tuple(inputs.get_shape().as_list()[1:])
self.build(input_shape=input_shape)
elif isinstance(inputs, dict):
# We assert that the first layer is a FeatureLayer.
if not training_utils.is_feature_layer(self.layers[0]):
raise ValueError('Passing a dictionary input to a Sequential Model '
'which doesn\'t have FeatureLayer as the first layer'
' is an error.')
input_shape = (None,)
self.build(input_shape=input_shape)
else:
input_shape = (None,) + inputs.shape[1:]
self.build(input_shape=input_shape)
# On-the-fly setting of symbolic model inputs (either by using the tensor
# provided, or by creating a placeholder if Numpy data was provided).
model_inputs = training_utils.ModelInputs(inputs)
inputs = model_inputs.get_symbolic_inputs()
self.inputs = model_inputs.get_symbolic_inputs(return_single_as_list=True)
self.input_names = model_inputs.get_input_names()
self._feed_inputs = []
self._feed_input_names = []
self._feed_input_shapes = []
for k, v in model_inputs.as_dict():
if K.is_placeholder(v):
self._feed_inputs.append(v)
self._feed_input_names.append(k)
self._feed_input_shapes.append(K.int_shape(v))
if outputs is None:
# Obtain symbolic outputs by calling the model.
with K.get_graph().as_default():
if self._expects_training_arg:
outputs = self.call(inputs, training=training)
else:
outputs = self.call(inputs)
outputs = nest.flatten(outputs)
self.outputs = outputs
self.output_names = [
'output_%d' % (i + 1) for i in range(len(self.outputs))]
self.built = True
def fit(self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
**kwargs):
"""Trains the model for a fixed number of epochs (iterations on a dataset).
Arguments:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data` dataset or a dataset iterator. Should return a tuple
of either `(inputs, targets)` or
`(inputs, targets, sample_weights)`.
- A generator or `keras.utils.Sequence` returning `(inputs, targets)`
or `(inputs, targets, sample weights)`.
y: Target data. Like the input data `x`,
it could be either Numpy array(s) or TensorFlow tensor(s).
It should be consistent with `x` (you cannot have Numpy inputs and
tensor targets, or inversely). If `x` is a dataset, dataset
iterator, generator, or `keras.utils.Sequence` instance, `y` should
not be specified (since targets will be obtained from `x`).
batch_size: Integer or `None`.
Number of samples per gradient update.
If unspecified, `batch_size` will default to 32.
Do not specify the `batch_size` if your data is in the
form of symbolic tensors, dataset, dataset iterators,
generators, or `keras.utils.Sequence` instances (since they generate
batches).
epochs: Integer. Number of epochs to train the model.
An epoch is an iteration over the entire `x` and `y`
data provided.
Note that in conjunction with `initial_epoch`,
`epochs` is to be understood as "final epoch".
The model is not trained for a number of iterations
given by `epochs`, but merely until the epoch
of index `epochs` is reached.
verbose: Integer. 0, 1, or 2. Verbosity mode.
0 = silent, 1 = progress bar, 2 = one line per epoch.
callbacks: List of `keras.callbacks.Callback` instances.
List of callbacks to apply during training.
See [callbacks](/api_docs/python/tf/keras/callbacks).
validation_split: Float between 0 and 1.
Fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate
the loss and any model metrics
on this data at the end of each epoch.
The validation data is selected from the last samples
in the `x` and `y` data provided, before shuffling. This argument is
not supported when `x` is a dataset, dataset iterator, generator or
`keras.utils.Sequence` instance.
validation_data: Data on which to evaluate
the loss and any model metrics at the end of each epoch.
The model will not be trained on this data.
`validation_data` will override `validation_split`.
`validation_data` could be:
- tuple `(x_val, y_val)` of Numpy arrays or tensors
- tuple `(x_val, y_val, val_sample_weights)` of Numpy arrays
- dataset or a dataset iterator
For the first two cases, `batch_size` must be provided.
For the last case, `validation_steps` must be provided.
shuffle: Boolean (whether to shuffle the training data
before each epoch) or str (for 'batch').
'batch' is a special option for dealing with the
limitations of HDF5 data; it shuffles in batch-sized chunks.
Has no effect when `steps_per_epoch` is not `None`.
class_weight: Optional dictionary mapping class indices (integers)
to a weight (float) value, used for weighting the loss function
(during training only).
This can be useful to tell the model to
"pay more attention" to samples from
an under-represented class.
sample_weight: Optional Numpy array of weights for
the training samples, used for weighting the loss function
(during training only). You can either pass a flat (1D)
Numpy array with the same length as the input samples
(1:1 mapping between weights and samples),
or in the case of temporal data,
you can pass a 2D array with shape
`(samples, sequence_length)`,
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
`sample_weight_mode="temporal"` in `compile()`. This argument is not
supported when `x` is a dataset, dataset iterator, generator, or
`keras.utils.Sequence` instance, instead provide the sample_weights
as the third element of `x`.
initial_epoch: Integer.
Epoch at which to start training
(useful for resuming a previous training run).
steps_per_epoch: Integer or `None`.
Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch. When training with input tensors such as
TensorFlow data tensors, the default `None` is equal to
the number of samples in your dataset divided by
the batch size, or 1 if that cannot be determined.
validation_steps: Only relevant if `validation_data` is provided and
is a dataset or dataset iterator. Total number of steps (batches of
samples) to draw before stopping when performing validation
at the end of every epoch.
max_queue_size: Integer. Used for generator or `keras.utils.Sequence`
input only. Maximum size for the generator queue.
If unspecified, `max_queue_size` will default to 10.
workers: Integer. Used for generator or `keras.utils.Sequence` input
only. Maximum number of processes to spin up
when using process-based threading. If unspecified, `workers`
will default to 1. If 0, will execute the generator on the main
thread.
use_multiprocessing: Boolean. Used for generator or
`keras.utils.Sequence` input only. If `True`, use process-based
threading. If unspecified, `use_multiprocessing` will default to
`False`. Note that because this implementation relies on
multiprocessing, you should not pass non-picklable arguments to
the generator as they can't be passed easily to children processes.
**kwargs: Used for backwards compatibility.
Returns:
A `History` object. Its `History.history` attribute is
a record of training loss values and metrics values
at successive epochs, as well as validation loss values
and validation metrics values (if applicable).
Raises:
RuntimeError: If the model was never compiled.
ValueError: In case of mismatch between the provided input data
and what the model expects.
"""
# TODO(fchollet): this method may be creating reference cycles, which would
# lead to accumulating garbage in memory when called in a loop. Investigate.
if data_utils.is_generator_or_sequence(x):
training_utils.check_generator_arguments(y, sample_weight)
return self.fit_generator(
x,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
validation_data=validation_data,
validation_steps=validation_steps,
class_weight=class_weight,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
shuffle=shuffle,
initial_epoch=initial_epoch)
# Legacy support
if 'nb_epoch' in kwargs:
logging.warning(
'The `nb_epoch` argument in `fit` '
'has been renamed `epochs`.')
epochs = kwargs.pop('nb_epoch')
if kwargs:
raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))
# Validate and standardize user data.
if self._distribution_strategy:
distributed_training_utils.validate_callbacks(callbacks)
distributed_training_utils.validate_inputs(
x, y, self._distribution_strategy)
first_x_value = nest.flatten(x)[0]
if not steps_per_epoch and isinstance(first_x_value, np.ndarray):
steps_per_epoch = distributed_training_utils.get_input_batch_params(
first_x_value, batch_size, self._distribution_strategy)
# Backwards compatibility
if batch_size is None and steps_per_epoch is None:
batch_size = 32
x, y, sample_weights = self._standardize_user_data(
x,
y,
sample_weight=sample_weight,
class_weight=class_weight,
batch_size=batch_size,
check_steps=True,
steps_name='steps_per_epoch',
steps=steps_per_epoch,
validation_split=validation_split,
shuffle=shuffle)
# Prepare validation data.
if validation_data:
if (isinstance(validation_data, iterator_ops.Iterator) or
isinstance(validation_data, iterator_ops.EagerIterator) or
isinstance(validation_data, dataset_ops.Dataset)):
val_x = validation_data
val_y = None
val_sample_weight = None
elif len(validation_data) == 2:
val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence
else:
raise ValueError(
'When passing a `validation_data` argument, '
'it must contain either 2 items (x_val, y_val), '
'or 3 items (x_val, y_val, val_sample_weights), '
'or alternatively it could be a dataset or a '
'dataset or a dataset iterator. '
'However we received `validation_data=%s`' % validation_data)
# Validate and standardize validation data.
if self._distribution_strategy:
distributed_training_utils.validate_inputs(
val_x, val_y, self._distribution_strategy)
first_valx_value = nest.flatten(val_x)[0]
if not validation_steps and isinstance(first_valx_value, np.ndarray):
validation_steps = distributed_training_utils.get_input_batch_params(
first_valx_value, batch_size, self._distribution_strategy)
val_x, val_y, val_sample_weights = self._standardize_user_data(
val_x,
val_y,
sample_weight=val_sample_weight,
batch_size=batch_size,
steps=validation_steps)
elif validation_split and 0. < validation_split < 1.:
if training_utils.has_symbolic_tensors(x):
raise ValueError('If your data is in the form of symbolic tensors, '
'you cannot use `validation_split`.')
if hasattr(x[0], 'shape'):
split_at = int(x[0].shape[0] * (1. - validation_split))
else:
split_at = int(len(x[0]) * (1. - validation_split))
x, val_x = (slice_arrays(x, 0, split_at), slice_arrays(x, split_at))
y, val_y = (slice_arrays(y, 0, split_at), slice_arrays(y, split_at))
sample_weights, val_sample_weights = (slice_arrays(
sample_weights, 0, split_at), slice_arrays(sample_weights, split_at))
elif validation_steps:
val_x = []
val_y = []
val_sample_weights = []
else:
val_x = None
val_y = None
val_sample_weights = None
if self.run_eagerly:
return training_eager.fit_loop(
self,
inputs=x,
targets=y,
sample_weights=sample_weights,
class_weight=class_weight,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_inputs=val_x,
val_targets=val_y,
val_sample_weights=val_sample_weights,
shuffle=shuffle,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps)
elif training_distributed.should_run_experimental_loop(self):
return training_distributed.experimental_fit_loop(
self,
x,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_iterator=val_x,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps)
else:
return training_arrays.fit_loop(
self,
x,
y,
sample_weights=sample_weights,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_inputs=val_x,
val_targets=val_y,
val_sample_weights=val_sample_weights,
shuffle=shuffle,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps)
def evaluate(self,
x=None,
y=None,
batch_size=None,
verbose=1,
sample_weight=None,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False):
"""Returns the loss value & metrics values for the model in test mode.
Computation is done in batches.
Arguments:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data` dataset or a dataset iterator.
- A generator or `keras.utils.Sequence` instance.
y: Target data. Like the input data `x`,
it could be either Numpy array(s) or TensorFlow tensor(s).
It should be consistent with `x` (you cannot have Numpy inputs and
tensor targets, or inversely).
If `x` is a dataset, dataset iterator, generator or
`keras.utils.Sequence` instance, `y` should not be specified (since
targets will be obtained from the iterator/dataset).
batch_size: Integer or `None`.
Number of samples per gradient update.
If unspecified, `batch_size` will default to 32.
Do not specify the `batch_size` is your data is in the
form of symbolic tensors, dataset, dataset iterators,
generators, or `keras.utils.Sequence` instances (since they generate
batches).
verbose: 0 or 1. Verbosity mode.
0 = silent, 1 = progress bar.
sample_weight: Optional Numpy array of weights for
the test samples, used for weighting the loss function.
You can either pass a flat (1D)
Numpy array with the same length as the input samples
(1:1 mapping between weights and samples),
or in the case of temporal data,
you can pass a 2D array with shape
`(samples, sequence_length)`,
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
`sample_weight_mode="temporal"` in `compile()`. This argument is not
supported when `x` is a dataset or a dataset iterator, instead pass
sample weights as the third element of `x`.
steps: Integer or `None`.
Total number of steps (batches of samples)
before declaring the evaluation round finished.
Ignored with the default value of `None`.
max_queue_size: Integer. Used for generator or `keras.utils.Sequence`
input only. Maximum size for the generator queue.
If unspecified, `max_queue_size` will default to 10.
workers: Integer. Used for generator or `keras.utils.Sequence` input
only. Maximum number of processes to spin up when using
process-based threading. If unspecified, `workers` will default
to 1. If 0, will execute the generator on the main thread.
use_multiprocessing: Boolean. Used for generator or
`keras.utils.Sequence` input only. If `True`, use process-based
threading. If unspecified, `use_multiprocessing` will default to
`False`. Note that because this implementation relies on
multiprocessing, you should not pass non-picklable arguments to
the generator as they can't be passed easily to children processes.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: in case of invalid arguments.
"""
if data_utils.is_generator_or_sequence(x):
training_utils.check_generator_arguments(y, sample_weight)
return self.evaluate_generator(
x,
steps=steps,
verbose=verbose,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing)
# Validate and standardize user data.
if self._distribution_strategy:
distributed_training_utils.validate_inputs(
x, y, self._distribution_strategy)
first_x_value = nest.flatten(x)[0]
if isinstance(first_x_value, np.ndarray) and not steps:
steps = distributed_training_utils.get_input_batch_params(
first_x_value, batch_size, self._distribution_strategy)
# Backwards compatibility.
if batch_size is None and steps is None:
batch_size = 32
x, y, sample_weights = self._standardize_user_data(
x,
y,
sample_weight=sample_weight,
batch_size=batch_size,
check_steps=True,
steps_name='steps',
steps=steps)
if self.run_eagerly:
return training_eager.test_loop(
self,
inputs=x,
targets=y,
sample_weights=sample_weights,
batch_size=batch_size,
verbose=verbose,
steps=steps)
elif training_distributed.should_run_experimental_loop(self):
return training_distributed.experimental_test_loop(
self, iterator=x, verbose=verbose, steps=steps)
else:
return training_arrays.test_loop(
self,
inputs=x,
targets=y,
sample_weights=sample_weights,
batch_size=batch_size,
verbose=verbose,
steps=steps)
def predict(self,
x,
batch_size=None,
verbose=0,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False):
"""Generates output predictions for the input samples.
Computation is done in batches.
Arguments:
x: Input samples. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A `tf.data` dataset or a dataset iterator.
- A generator or `keras.utils.Sequence` instance.
batch_size: Integer or `None`.
Number of samples per gradient update.
If unspecified, `batch_size` will default to 32.
Do not specify the `batch_size` is your data is in the
form of symbolic tensors, dataset, dataset iterators,
generators, or `keras.utils.Sequence` instances (since they generate
batches).
verbose: Verbosity mode, 0 or 1.
steps: Total number of steps (batches of samples)
before declaring the prediction round finished.
Ignored with the default value of `None`.
max_queue_size: Integer. Used for generator or `keras.utils.Sequence`
input only. Maximum size for the generator queue.
If unspecified, `max_queue_size` will default to 10.
workers: Integer. Used for generator or `keras.utils.Sequence` input
only. Maximum number of processes to spin up when using
process-based threading. If unspecified, `workers` will default
to 1. If 0, will execute the generator on the main thread.
use_multiprocessing: Boolean. Used for generator or
`keras.utils.Sequence` input only. If `True`, use process-based
threading. If unspecified, `use_multiprocessing` will default to
`False`. Note that because this implementation relies on
multiprocessing, you should not pass non-picklable arguments to
the generator as they can't be passed easily to children processes.
Returns:
Numpy array(s) of predictions.
Raises:
ValueError: In case of mismatch between the provided
input data and the model's expectations,
or in case a stateful model receives a number of samples
that is not a multiple of the batch size.
"""
if data_utils.is_generator_or_sequence(x):
return self.predict_generator(
x,
steps=steps,
verbose=verbose,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing)
if self._distribution_strategy:
distributed_training_utils.validate_inputs(
x, None, self._distribution_strategy)
first_x_value = nest.flatten(x)[0]
if isinstance(first_x_value, np.ndarray) and not steps:
steps = distributed_training_utils.get_input_batch_params(
first_x_value, batch_size, self._distribution_strategy)
# Backwards compatibility.
if batch_size is None and steps is None:
batch_size = 32
# Validate and standardize user data.
# TODO(anjalisridhar): We don't pass batch_size here for some reason. This
# means that we end up calculating it twice which we should avoid.
x, _, _ = self._standardize_user_data(
x, check_steps=True, steps_name='steps', steps=steps)
if self.run_eagerly:
return training_eager.predict_loop(
self, x, batch_size=batch_size, verbose=verbose, steps=steps)
elif training_distributed.should_run_experimental_loop(self):
results = training_distributed.experimental_predict_loop(
self, x, verbose=verbose, steps=steps)
return results
else:
return training_arrays.predict_loop(
self, x, batch_size=batch_size, verbose=verbose, steps=steps)
def train_on_batch(self, x, y=None, sample_weight=None, class_weight=None):
"""Runs a single gradient update on a single batch of data.
Arguments:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data` dataset or a dataset iterator.
y: Target data. Like the input data `x`, it could be either Numpy
array(s) or TensorFlow tensor(s). It should be consistent with `x`
(you cannot have Numpy inputs and tensor targets, or inversely). If
`x` is a dataset or a dataset iterator, `y` should not be specified
(since targets will be obtained from the iterator).
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample. In the case of
temporal data, you can pass a 2D array with shape (samples,
sequence_length), to apply a different weight to every timestep of
every sample. In this case you should make sure to specify
sample_weight_mode="temporal" in compile(). This argument is not
supported when `x` is a dataset or a dataset iterator.
class_weight: Optional dictionary mapping class indices (integers) to a
weight (float) to apply to the model's loss for the samples from this
class during training. This can be useful to tell the model to "pay
more attention" to samples from an under-represented class.
Returns:
Scalar training loss
(if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: In case of invalid user-provided arguments.
"""
if self._distribution_strategy:
raise NotImplementedError('`train_on_batch` is not supported for models '
'compiled with DistributionStrategy.')
# Validate and standardize user data.
x, y, sample_weights = self._standardize_user_data(
x, y, sample_weight=sample_weight, class_weight=class_weight)
if self.run_eagerly:
outputs = training_eager.train_on_batch(
self, x, y, sample_weights=sample_weights)
else:
if not isinstance(K.symbolic_learning_phase(), int):
ins = x + y + sample_weights + [True]
else:
ins = x + y + sample_weights
self._make_train_function()
outputs = self.train_function(ins) # pylint: disable=not-callable
if len(outputs) == 1:
return outputs[0]
return outputs
def test_on_batch(self, x, y=None, sample_weight=None):
"""Test the model on a single batch of samples.
Arguments:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data` dataset or a dataset iterator.
y: Target data. Like the input data `x`,
it could be either Numpy array(s) or TensorFlow tensor(s).
It should be consistent with `x` (you cannot have Numpy inputs and
tensor targets, or inversely). If `x` is a dataset or a
dataset iterator, `y` should not be specified
(since targets will be obtained from the iterator).
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile(). This argument is not
supported when `x` is a dataset or a dataset iterator.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: In case of invalid user-provided arguments.
"""
if self._distribution_strategy:
raise NotImplementedError('`test_on_batch` is not supported for models '
'compiled with DistributionStrategy.')
# Validate and standardize user data.
x, y, sample_weights = self._standardize_user_data(
x, y, sample_weight=sample_weight)
if self.run_eagerly:
outputs = training_eager.test_on_batch(
self, x, y, sample_weights=sample_weights)
else:
inputs = x + y + sample_weights
self._make_test_function()
outputs = self.test_function(inputs) # pylint: disable=not-callable
if len(outputs) == 1:
return outputs[0]
return outputs
def predict_on_batch(self, x):
"""Returns predictions for a single batch of samples.
Arguments:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A `tf.data` dataset or a dataset iterator.
Returns:
Numpy array(s) of predictions.
Raises:
ValueError: In case of mismatch between given number of inputs and
expectations of the model.
"""
if self._distribution_strategy:
raise NotImplementedError('`predict_on_batch` is not supported for '
'models compiled with DistributionStrategy.')
# Validate and standardize user data.
inputs, _, _ = self._standardize_user_data(x)
if self.run_eagerly:
if (isinstance(x, iterator_ops.EagerIterator) or
(isinstance(x, dataset_ops.Dataset))):
inputs = training_utils.cast_if_floating_dtype(inputs)
else:
inputs = [
ops.convert_to_tensor(val, dtype=K.floatx()) for val in inputs
]
return self(inputs) # pylint: disable=not-callable
self._make_predict_function()
outputs = self.predict_function(inputs)
if len(outputs) == 1:
return outputs[0]
return outputs
def fit_generator(self,
generator,
steps_per_epoch=None,
epochs=1,
verbose=1,
callbacks=None,
validation_data=None,
validation_steps=None,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
shuffle=True,
initial_epoch=0):
"""Fits the model on data yielded batch-by-batch by a Python generator.
The generator is run in parallel to the model, for efficiency.
For instance, this allows you to do real-time data augmentation
on images on CPU in parallel to training your model on GPU.
The use of `keras.utils.Sequence` guarantees the ordering
and guarantees the single use of every input per epoch when
using `use_multiprocessing=True`.
Arguments:
generator: A generator or an instance of `Sequence`
(`keras.utils.Sequence`)
object in order to avoid duplicate data
when using multiprocessing.
The output of the generator must be either
- a tuple `(inputs, targets)`
- a tuple `(inputs, targets, sample_weights)`.
This tuple (a single output of the generator) makes a single batch.
Therefore, all arrays in this tuple must have the same length (equal
to the size of this batch). Different batches may have different
sizes.
For example, the last batch of the epoch is commonly smaller than
the
others, if the size of the dataset is not divisible by the batch
size.
The generator is expected to loop over its data
indefinitely. An epoch finishes when `steps_per_epoch`
batches have been seen by the model.
steps_per_epoch: Total number of steps (batches of samples)
to yield from `generator` before declaring one epoch
finished and starting the next epoch. It should typically
be equal to the number of samples of your dataset
divided by the batch size.
Optional for `Sequence`: if unspecified, will use
the `len(generator)` as a number of steps.
epochs: Integer, total number of iterations on the data.
verbose: Verbosity mode, 0, 1, or 2.
callbacks: List of callbacks to be called during training.
validation_data: This can be either
- a generator for the validation data
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
validation_steps: Only relevant if `validation_data`
is a generator. Total number of steps (batches of samples)
to yield from `generator` before stopping.
Optional for `Sequence`: if unspecified, will use
the `len(validation_data)` as a number of steps.
class_weight: Dictionary mapping class indices to a weight
for the class.
max_queue_size: Integer. Maximum size for the generator queue.
If unspecified, `max_queue_size` will default to 10.
workers: Integer. Maximum number of processes to spin up
when using process-based threading.
If unspecified, `workers` will default to 1. If 0, will
execute the generator on the main thread.
use_multiprocessing: Boolean.
If `True`, use process-based threading.
If unspecified, `use_multiprocessing` will default to `False`.
Note that because this implementation relies on multiprocessing,
you should not pass non-picklable arguments to the generator
as they can't be passed easily to children processes.
shuffle: Boolean. Whether to shuffle the order of the batches at
the beginning of each epoch. Only used with instances
of `Sequence` (`keras.utils.Sequence`).
Has no effect when `steps_per_epoch` is not `None`.
initial_epoch: Epoch at which to start training
(useful for resuming a previous training run)
Returns:
A `History` object.
Example:
```python
def generate_arrays_from_file(path):
while 1:
f = open(path)
for line in f:
# create numpy arrays of input data
# and labels, from each line in the file
x1, x2, y = process_line(line)
yield ({'input_1': x1, 'input_2': x2}, {'output': y})
f.close()
model.fit_generator(generate_arrays_from_file('/my_file.txt'),
steps_per_epoch=10000, epochs=10)
```
Raises:
ValueError: In case the generator yields data in an invalid format.
"""
if self._distribution_strategy:
raise NotImplementedError('`fit_generator` is not supported for '
'models compiled with DistributionStrategy.')
return training_generator.fit_generator(
self,
generator,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
validation_data=validation_data,
validation_steps=validation_steps,
class_weight=class_weight,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
shuffle=shuffle,
initial_epoch=initial_epoch)
def evaluate_generator(self,
generator,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0):
"""Evaluates the model on a data generator.
The generator should return the same kind of data
as accepted by `test_on_batch`.
Arguments:
generator: Generator yielding tuples (inputs, targets)
or (inputs, targets, sample_weights)
or an instance of `keras.utils.Sequence`
object in order to avoid duplicate data
when using multiprocessing.
steps: Total number of steps (batches of samples)
to yield from `generator` before stopping.
Optional for `Sequence`: if unspecified, will use
the `len(generator)` as a number of steps.
max_queue_size: maximum size for the generator queue
workers: Integer. Maximum number of processes to spin up
when using process-based threading.
If unspecified, `workers` will default to 1. If 0, will
execute the generator on the main thread.
use_multiprocessing: Boolean.
If `True`, use process-based threading.
If unspecified, `use_multiprocessing` will default to `False`.
Note that because this implementation relies on multiprocessing,
you should not pass non-picklable arguments to the generator
as they can't be passed easily to children processes.
verbose: Verbosity mode, 0 or 1.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: in case of invalid arguments.
Raises:
ValueError: In case the generator yields data in an invalid format.
"""
if self._distribution_strategy:
raise NotImplementedError('`evaluate_generator` is not supported for '
'models compiled with DistributionStrategy.')
return training_generator.evaluate_generator(
self,
generator,
steps=steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
verbose=verbose)
def predict_generator(self,
generator,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0):
"""Generates predictions for the input samples from a data generator.
The generator should return the same kind of data as accepted by
`predict_on_batch`.
Arguments:
generator: Generator yielding batches of input samples
or an instance of `keras.utils.Sequence` object in order to
avoid duplicate data when using multiprocessing.
steps: Total number of steps (batches of samples)
to yield from `generator` before stopping.
Optional for `Sequence`: if unspecified, will use
the `len(generator)` as a number of steps.
max_queue_size: Maximum size for the generator queue.
workers: Integer. Maximum number of processes to spin up
when using process-based threading.
If unspecified, `workers` will default to 1. If 0, will
execute the generator on the main thread.
use_multiprocessing: Boolean.
If `True`, use process-based threading.
If unspecified, `use_multiprocessing` will default to `False`.
Note that because this implementation relies on multiprocessing,
you should not pass non-picklable arguments to the generator
as they can't be passed easily to children processes.
verbose: verbosity mode, 0 or 1.
Returns:
Numpy array(s) of predictions.
Raises:
ValueError: In case the generator yields data in an invalid format.
"""
if self._distribution_strategy:
raise NotImplementedError('`predict_generator` is not supported for '
'models compiled with DistributionStrategy.')
return training_generator.predict_generator(
self,
generator,
steps=steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
verbose=verbose)
def _get_callback_model(self):
"""Returns the Callback Model for this Model."""
if hasattr(self, '_replicated_model') and self._replicated_model:
# When using training_distributed, we set the callback model
# to an instance of the `DistributedModel` that we create in
# the `compile` call. The `DistributedModel` is initialized
# with the first replicated model. We need to set the callback
# model to a DistributedModel to allow us to override saving
# and loading weights when we checkpoint the model during training.
return self._replicated_model
if hasattr(self, 'callback_model') and self.callback_model:
return self.callback_model
return self
def _make_callback_model(self, grouped_model):
first_replicated_model = self._distribution_strategy.unwrap(
grouped_model)[0]
# We initialize the callback model with the first replicated model.
self._replicated_model = DistributedCallbackModel(first_replicated_model)
self._replicated_model.set_original_model(self)
class DistributedCallbackModel(Model):
"""Model that is used for callbacks with DistributionStrategy."""
def __init__(self, model):
super(DistributedCallbackModel, self).__init__()
# TODO(anjalisridhar): Right now the only attributes set are the layer and
# weights. We may need to set additional attributes as needed since we have
# not called compile on this model.
def set_original_model(self, orig_model):
self._original_model = orig_model
def save_weights(self, filepath, overwrite=True, save_format=None):
self._replicated_model.save_weights(filepath, overwrite=overwrite,
save_format=save_format)
def save(self, filepath, overwrite=True, include_optimizer=True):
# save weights from the distributed model to the original model
distributed_model_weights = self.get_weights()
self._original_model.set_weights(distributed_model_weights)
# TODO(anjalisridhar): Do we need to save the original model here?
# Saving the first replicated model works as well.
self._original_model.save(filepath, overwrite=True, include_optimizer=False)
def load_weights(self, filepath, by_name=False):
self._original_model.load_weights(filepath, by_name=False)
# Copy the weights from the original model to each of the replicated models.
orig_model_weights = self._original_model.get_weights()
distributed_training_utils.set_weights(
self._original_model._distribution_strategy, self, # pylint: disable=protected-access
orig_model_weights)
def __getattr__(self, item):
# Whitelisted atttributes of the model that can be accessed by the user
# during a callback.
if item not in ['_setattr_tracking']:
logging.warning('You are accessing attribute ' + item + ' of the '
'DistributedCallbackModel that may not have been set '
'correctly.')
|
import logging
logger = logging.getLogger(__name__)
from django.core.exceptions import PermissionDenied
from django.http import JsonResponse, Http404, HttpResponseServerError, HttpResponseBadRequest
from fir_irma import api
from fir_irma.models import IrmaScan
from fir_irma.settings import settings
ERROR_NOT_FOUND = 1
ERROR_SERVER_ERROR = 2
ERROR_CLIENT_ERROR = 3
ERROR_WRONG_METHOD = 4
ERROR_UNAUTHORIZED = 5
def process_error(request, error=ERROR_SERVER_ERROR, message=None):
if request.is_ajax():
if error == ERROR_NOT_FOUND:
if message is None:
message = 'Object not found'
return JsonResponse({'type': 'invalid_request_error', 'message': message}, status=404)
elif error == ERROR_SERVER_ERROR:
if message is None:
message = 'Server error'
return JsonResponse({'type': 'api_error', 'message': message}, status=500)
elif error == ERROR_CLIENT_ERROR:
if message is None:
message = 'Invalid request'
return JsonResponse({'type': 'invalid_request_error', 'message': message}, status=400)
elif error == ERROR_WRONG_METHOD:
if message is None:
message = 'Invalid method'
return JsonResponse({'type': 'invalid_request_error', 'message': message}, status=405)
elif error == ERROR_UNAUTHORIZED:
if message is None:
message = 'Unauthorized'
return JsonResponse({'type': 'invalid_request_error', 'message': message}, status=403)
else:
if error == ERROR_NOT_FOUND:
raise Http404()
elif error == ERROR_SERVER_ERROR:
return HttpResponseServerError()
elif error == ERROR_CLIENT_ERROR or error == ERROR_WRONG_METHOD:
return HttpResponseBadRequest()
elif error == ERROR_UNAUTHORIZED:
raise PermissionDenied
def scan_file(file_object, user):
logger.debug("Scanning uploaded file %s", file_object.getfilename())
try:
from fir_artifacts.models import Artifact
from fir_artifacts import Hash
code, payload = api.new_scan()
scan_id = payload['id']
scan = IrmaScan.objects.create(irma_scan=scan_id, user=user)
api.upload_files(scan_id, files={'file': file_object.file})
force = user.has_perm('fir_irma.can_force_scan')
api.launch_scan(scan_id, force=force)
hashes = file_object.get_hashes()
for h in hashes:
try:
a = Artifact.objects.get(value=hashes[h])
a.save()
except Exception:
a = Artifact()
a.type = Hash.key
a.value = hashes[h]
a.save()
a.relations.add(scan)
except api.APIError as error:
logger.error("IRMA automatic scan error - %s - %s", error.type, error.message)
except Exception as error:
logger.error("IRMA automatic scan error - generic_error - %s", str(error))
def fir_files_postsave(sender, **kwargs):
from django.contrib.contenttypes.models import ContentType
from fir_irma.models import IrmaScan
irmascan_type = ContentType.objects.get_for_model(IrmaScan)
instance = kwargs.get('instance')
if not instance.content_type == irmascan_type and instance.hashes.count() > 0:
try:
from django.contrib.auth import get_user_model
User = get_user_model()
except ImportError:
from django.contrib.auth.models import User
related = instance.get_related()
for field in settings.IRMA_SCAN_FIR_FILES_USER_FIELDS:
user = getattr(related, field)
if user is not None and isinstance(user, User) and user.has_perm('fir_irma.scan_files'):
scan_file(instance, user)
return
logger.error("IRMA automatic scan error - user_error - No user able to scan")
|
from unittest.mock import patch, Mock
from django.conf import settings
from django.contrib.auth.models import Permission
from django.test import TestCase
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from bulk_sms.models import Broadcast, Batch
from bulk_sms.tests.factories import BroadcastFactory
from libya_elections.tests.utils import ResponseCheckerMixin
from libya_site.tests.factories import UserFactory
from register.tests.factories import RegistrationCenterFactory
from staff.tests.base import StaffUserMixin
class BroadcastHelper(StaffUserMixin):
permissions = ['add_broadcast', 'browse_broadcast', 'read_broadcast', 'approve_broadcast']
def setUp(self):
self.staff_user = self.create_staff_user()
self.login(self.staff_user)
def create_staff_user(self):
user = UserFactory(username=self.username, email=self.email,
password=self.password)
user.is_staff = True
user.save()
return user
@staticmethod
def add_permissions(user, permissions):
for perm in permissions:
user.user_permissions.add(Permission.objects.get(codename=perm))
@staticmethod
def remove_permissions(user, permissions):
for perm in permissions:
user.user_permissions.remove(Permission.objects.get(codename=perm))
class BroadcastBreadTest(ResponseCheckerMixin, BroadcastHelper, TestCase):
def setUp(self):
super(BroadcastBreadTest, self).setUp()
self.broadcast = BroadcastFactory(message='test')
self.add_via_simple_form_url = reverse('add_broadcast')
self.add_via_csv_upload = reverse('upload_broadcast')
self.approve_url = reverse('approve_reject_broadcast',
kwargs={'broadcast_id': self.broadcast.id})
def test_browse_broadcasts(self):
perms = ['browse_broadcast']
# user with browse_broadcast permission can browse
self.add_permissions(self.staff_user, perms)
response = self.client.get(reverse('browse_broadcasts'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, template_name='bulk_sms/broadcast_browse.html')
# users without the browse_broadcast permission can't get to that page
self.remove_permissions(self.staff_user, perms)
self.assertForbidden(self.client.get(reverse('browse_broadcasts')))
def test_read_broadcast(self):
broadcast = BroadcastFactory()
perms = ['read_broadcast']
# user with read_broadcast permission can browse
self.add_permissions(self.staff_user, perms)
response = self.client.get(reverse('read_broadcast', kwargs={'pk': broadcast.id}))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response,
template_name='bulk_sms/broadcast_approve_reject.html')
# users without the read_broadcast permission can't get to that page
self.remove_permissions(self.staff_user, perms)
self.assertForbidden(self.client.get(reverse('read_broadcast',
kwargs={'pk': broadcast.id})))
def test_add_broadcast_via_simple_form(self):
perms = ['add_broadcast']
data = {'audience': 'staff', 'message': 'test broadcasting message'}
broadcast_count = Broadcast.objects.count()
# users with add_broadcast permission can view the add broadcast form
self.add_permissions(self.staff_user, perms)
response = self.client.get(self.add_via_simple_form_url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response,
template_name="bulk_sms/broadcast_add_via_form.html")
# users with add_broadcast permission can create broadcasts
response = self.client.post(self.add_via_simple_form_url, data=data)
self.assertEqual(response.status_code, 302)
new_broadcast_count = Broadcast.objects.count()
self.assertEqual(broadcast_count + 1, new_broadcast_count)
# users without add_broadcast permission can't create broadcasts
self.remove_permissions(self.staff_user, perms)
self.assertForbidden(self.client.post(self.add_via_simple_form_url, data=data))
def test_add_broadcast_via_csv_upload(self):
perms = ['add_broadcast']
mock_file = Mock()
mock_file.read.return_value = "218911234567,the quick brown fox etc.\n"
mock_file.name = 'foo.csv'
data = {'name': 'test_batch', 'description': 'test broadcasting description',
'csv': mock_file}
broadcast_count = Broadcast.objects.count()
# users with add_broadcast permission can view the add broadcast form
self.add_permissions(self.staff_user, perms)
response = self.client.get(self.add_via_csv_upload)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, template_name="bulk_sms/broadcast_add_via_csv.html")
# users with add_broadcast permission can create broadcasts
response = self.client.post(self.add_via_csv_upload, data=data)
self.assertEqual(response.status_code, 302)
new_broadcast_count = Broadcast.objects.count()
self.assertEqual(broadcast_count + 1, new_broadcast_count)
# users without add_broadcast permission can't create broadcasts
self.remove_permissions(self.staff_user, perms)
self.assertForbidden(self.client.post(self.add_via_csv_upload, data=data))
def test_center_required(self):
# center is a required field when creating a broadcast for a single
# center.
perms = ['add_broadcast']
data = {'audience': 'single_center', 'message': 'test broadcasting message'}
broadcast_count = Broadcast.objects.count()
center = RegistrationCenterFactory()
# form will have errors if the center field was not filled.
self.add_permissions(self.staff_user, perms)
response = self.client.post(self.add_via_simple_form_url, data=data)
self.assertEqual(response.status_code, 400)
self.assertTemplateUsed(response,
template_name="bulk_sms/broadcast_add_via_form.html")
errors = response.context['form'].errors
self.assertEqual(len(errors), 1)
# submitting all required fields
data['center'] = center.id
response = self.client.post(self.add_via_simple_form_url, data=data)
self.assertEqual(response.status_code, 302)
new_broadcast_count = Broadcast.objects.count()
self.assertEqual(broadcast_count + 1, new_broadcast_count)
@patch('bulk_sms.tasks.approve_broadcast.delay', autospec=True)
def test_approve_broadcast(self, approve_task):
# broadcasts can be approved by users with the `approve_broadcast` permission.
# approving a broadcast fires up the `approve_broadcast` task.
perms = ['approve_broadcast']
data = {'approve': True}
# user does not have permission to approve broadcasts
response = self.client.post(self.approve_url, data=data)
self.assertEqual(response.status_code, 403)
# give user permission to approve broadcasts
self.add_permissions(self.staff_user, perms)
self.client.post(self.approve_url, data=data)
# approve_broadcast task was fired
approve_task.assert_called_once_with(self.broadcast.id)
def test_reject_broadcast(self):
# broadcasts can be rejected.
self.assertEqual(self.broadcast.batch.status, Batch.PENDING)
perms = ['approve_broadcast']
data = {'reject': True}
# user does not have permission to approve broadcasts
response = self.client.post(self.approve_url, data=data)
self.assertEqual(response.status_code, 403)
# give user permission to approve broadcasts
self.add_permissions(self.staff_user, perms)
self.client.post(self.approve_url, data=data)
broadcast = Broadcast.objects.get(pk=self.broadcast.id)
self.assertEqual(broadcast.batch.status, Batch.REJECTED)
self.assertEqual(broadcast.reviewed_by, self.staff_user)
# an approved message can be unapproved
broadcast.batch.status = Batch.APPROVED
broadcast.batch.save()
self.client.post(self.approve_url, data=data)
broadcast = Broadcast.objects.get(pk=self.broadcast.id)
self.assertEqual(broadcast.batch.status, Batch.REJECTED)
self.assertEqual(broadcast.reviewed_by, self.staff_user)
class BroadcastModelTest(BroadcastHelper, TestCase):
def setUp(self):
super(BroadcastModelTest, self).setUp()
self.broadcast = Broadcast.objects.create(created_by=self.staff_user, message='test')
def test_batch(self):
# a batch is created the first time you save an instance.
self.assertIsInstance(self.broadcast.batch, Batch)
def test_status(self):
# the status for a broadcast is the same as that of the batch associated with
# it.
self.assertEqual(self.broadcast.batch.status, Batch.PENDING)
self.assertEqual(self.broadcast.status, _("Pending Approval"))
self.broadcast.batch.status = Batch.APPROVED
self.assertEqual(self.broadcast.status, self.broadcast.batch.get_status_display())
def test_get_messages(self):
# get_messages() yields tuples (phone_number, message, shortcode) for each individual
# in the audience.
# broadcast directed to staff users only (audience defaults to STAFF_ONLY)
broadcast = self.broadcast
broadcasting_message = "Broadcast for staff"
broadcast.message = broadcasting_message
with patch.object(Broadcast, 'get_numbers_for_staff') as staff_numbers:
phone_numbers = ['1', '2', '3']
staff_numbers.return_value = phone_numbers
messages = [message for message in self.broadcast.get_messages()]
staff_numbers.assert_called_once_with()
for index, (phone_number, message, shortcode) in enumerate(messages):
self.assertEqual(phone_number, phone_numbers[index])
self.assertEqual(message, broadcasting_message)
# STAFF_ONLY message, so from_shortcode should be REPORTS_SHORT_CODE
self.assertEqual(shortcode, settings.REPORTS_SHORT_CODE)
# broadcast directed to all registrants
broadcasting_message = "Broadcast for all registrants"
broadcast.audience = Broadcast.ALL_REGISTRANTS
broadcast.message = broadcasting_message
with patch.object(Broadcast, 'get_numbers_for_all_centers') as all_registrants:
phone_numbers = ['1', '2', '3']
all_registrants.return_value = phone_numbers
messages = [message for message in self.broadcast.get_messages()]
all_registrants.assert_called_once_with()
for index, (phone_number, message, shortcode) in enumerate(messages):
self.assertEqual(phone_number, phone_numbers[index])
self.assertEqual(message, broadcasting_message)
# ALL_REGISTRANTS message, so from_shortcode should be None
# (which will trigger default shortcode to be used)
self.assertEqual(shortcode, None)
# broadcasting message for a single center
broadcasting_message = "Broadcast for single center"
broadcast.audience = Broadcast.SINGLE_CENTER
broadcast.message = broadcasting_message
with patch.object(Broadcast, 'get_numbers_for_single_center') as single_center:
phone_numbers = ['1', '2', '3']
single_center.return_value = phone_numbers
messages = [message for message in self.broadcast.get_messages()]
single_center.assert_called_once_with()
for index, (phone_number, message, shortcode) in enumerate(messages):
self.assertEqual(phone_number, phone_numbers[index])
self.assertEqual(message, broadcasting_message)
# SINGLE_CENTER message, so from_shortcode should be None
# (which will trigger default shortcode to be used)
self.assertEqual(shortcode, None)
|
from rest_framework import serializers
from grades.models import *
class GradeSerializer(serializers.ModelSerializer):
class Meta:
model = Grade
fields = ('semester_code', 'average_grade', 'passed', 'a', 'b', 'c', 'd', 'e', 'f')
class CourseSerializer(serializers.ModelSerializer):
class Meta:
model = Course
fields = ('norwegian_name', 'short_name', 'code', 'faculty_code', 'english_name', 'credit', 'study_level',
'taught_in_spring', 'taught_in_autumn', 'taught_from', 'taught_in_english', 'last_year_taught',
'content', 'learning_form', 'learning_goal')
class CourseIndexSerializer(serializers.ModelSerializer):
class Meta:
model = Course
fields = ('code', 'norwegian_name', 'faculty_code')
class CourseTypeaheadSerializer(serializers.ModelSerializer):
class Meta:
model = Course
fields = ('code', 'norwegian_name', 'english_name')
|
import os
import sys
import MySQLdb
from util.config import get_conf
reload(sys)
sys.setdefaultencoding("utf-8")
class DBBase:
db = None
cursor = None
def __init__(self, dbconfig):
self._conf = get_conf(dbconfig)
def _connect(self, key, ttype):
_host = self._conf.get(key, "host")
_port = self._conf.getint(key, "port")
_user = self._conf.get(key, "username")
_pass = self._conf.get(key, "password")
_db = self._conf.get(key, "db")
_connect_timeout = self._conf.getint(key, "connect_timeout", 10)
#_read_timeout = self._conf.get(key, "read_timeout")
#_write_timeout = self._conf.get(key, "write_timeout")
_charset = self._conf.get(key, "charset")
configs = {
"host":_host,
"port":_port,
"user":_user,
"passwd":_pass,
"db":_db,
"charset":_charset,
"connect_timeout":_connect_timeout,
#"read_timeout":_read_timeout,
#"write_timeout":_write_timeout,
"charset":_charset,
}
self.db = MySQLdb.connect(**configs)
self.db.autocommit(True)
if ttype == "tuple":
self.cursor = self.db.cursor()
else:
self.cursor = self.db.cursor(cursorclass=MySQLdb.cursors.DictCursor)
def __del__(self):
self._close(self.db, self.cursor)
def _close(self, db, cursor):
if db:
try:
db.close()
except:
pass
if cursor:
try:
cursor.close()
except:
pass
def valid_sql(func):
def decorator(*args, **kwargs):
if not args[1]:
raise Exception("empty sql")
return func(*args, **kwargs)
return decorator
class MysqlHelper(DBBase):
def __init__(self, choice_db, confile, ttype='tuple'):
DBBase.__init__(self, confile)
self.ttype = ttype
self.choice_db = choice_db
self._connect(self.choice_db, self.ttype)
def escape_string(self, value):
return self.db.escape_string(value)
def check_alive(self):
try:
self.db.ping()
except Exception, e:
sys.stderr.write("%s\n" % repr(e))
sys.stderr.flush()
n = 5
while n > 0:
n -= 1
try:
self._close(self.db, self.cursor)
self._connect(self.choice_db, self.ttype)
return
except Exception, e:
sys.stderr.write("%s\n" % repr(e))
sys.stderr.flush()
continue
@valid_sql
def fetch_many(self, sql, size = 100):
self.check_alive()
self.cursor.execute(sql)
while True:
ret = self.cursor.fetchmany(size)
if ret:
yield ret
else:
break
@valid_sql
def fetchmany(self, sql, size = 100):
self.check_alive()
self.cursor.execute(sql)
while True:
ret = self.cursor.fetchmany(size)
if ret:
yield ret
else:
break
def fetch_all(self, sql):
self.check_alive()
self.cursor.execute(sql)
return self.cursor.fetchall()
def fetchall(self, sql):
self.check_alive()
self.cursor.execute(sql)
return self.cursor.fetchall()
def fetch_one(self, sql):
self.check_alive()
self.cursor.execute(sql)
return self.cursor.fetchone()
def fetchone(self, sql):
self.check_alive()
self.cursor.execute(sql)
return self.cursor.fetchone()
def execute(self, sql):
self.check_alive()
self.cursor.execute(sql)
#self.db.commit()
def execute_many(self, sql, values = []):
self.check_alive()
self.cursor.executemany(sql, values)
#self.db.commit()
def executemany(self, sql, values = []):
self.check_alive()
self.cursor.executemany(sql, values)
#self.db.commit()
def get_slave_db(db = "slave_db_12", ttype = "tuple", confile = "nice.cfg"):
'''获取统计从库'''
return MysqlHelper(choice_db = db, ttype = ttype, confile=confile)
|
import re
from core.alive import alive
from core.twitterc import TwitterC
state = {'CHIS': 'Chiapas', 'NL': 'Nuevo Leon', 'VER': 'Veracruz',
'JAL': 'Jalisco', 'OAX': 'Oaxaca', 'GRO': 'Guerrero',
'BC': 'Baja California', 'SON': 'Sonora', 'RT': 'Retweet'}
class Seismology(object):
def __init__(self, voicesynthetizer):
self.modulename = 'Seismology'
self.twitterc = TwitterC('twython')
self.voicesynthetizer = voicesynthetizer
def SismologicoMX(self):
print '[NuupXe] Seismology'
message = 'Servicio Sismologico '
tstatus = self.twitterc.timeline_get('skyalertmx', 3)
sismo = 'False'
for status in tstatus:
if not status['text'].partition(' ')[0] == 'SISMO' or not status['text'].partition(' ')[0] == 'Preliminar:':
status['text'] = status['text']
status['text'] = re.sub(r'(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4} /)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))', '', status['text'])
URLless_string = re.sub(r'(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))', '', status['text'])
status['text'] = status['text'].replace("Loc", "Localizacion")
status['text'] = status['text'].replace("CD", "Ciudad")
status['text'] = status['text'].replace("Lat", "Latitud")
status['text'] = status['text'].replace("Lon", "Longitud")
status['text'] = status['text'].replace("Pf", "Profundidad")
status['text'] = status['text'].replace("SSN", "Servicio Sismologico Nacional")
pattern = re.compile(r'\b(' + '|'.join(state.keys()) + r')\b')
status['text'] = pattern.sub(lambda x: state[x.group()], status['text'])
try:
message = message + status['text']
except:
print 'Seismology Error'
sismo = 'True'
if sismo == 'False':
self.voicesynthetizer.speechit("No se encontraron sismos en las ultimas horas")
else:
self.voicesynthetizer.speechit(message)
alive(modulename=self.modulename, modulemessage=message)
|
import string
import random
from enum import Enum
import redis
import config
from config import SESSION_SALT, MYSQL, MYSQL_NAME
from flask import Flask, current_app, render_template,session
from flask_login import LoginManager
from flask_session import Session
from flask_session.sessions import RedisSessionInterface
from playhouse.pool import PooledMySQLDatabase
from website import blueprints
from flask_mail import Mail
from website.helper.sentry_helper import SentryHelper
from website.util.common_utils import is_dev_mode
from playhouse.flask_utils import FlaskDB
__author__ = 'walker_lee'
"""应用初始化入口以及配置"""
mail = Mail()
db_wrapper = None
db = None
login_manager = LoginManager()
login_manager.session_protection = None
login_manager.login_view = 'backend.login'
class Server(Enum):
all = -1
backend = 1
fronted = 2
def _init_db(app):
pooled_db = PooledMySQLDatabase(database=MYSQL_NAME, max_connections=32, stale_timeout=300, **MYSQL)
global db_wrapper
db_wrapper = FlaskDB(app,pooled_db)
global db
db = db_wrapper.database
def create_app(config=None, server=Server.all):
"""初始化flask相关配置"""
app = Flask('pyforum')
app.config.from_object(config)
_init_db(app)
init_blueprint(app, server)
_config_session(app)
_config_sitemap(app)
mail.init_app(app)
login_manager.init_app(app)
SentryHelper.init_app(app)
app.jinja_env.globals['csrf_token'] = _generate_csrf_token
_init_logger()
return app
def init_blueprint(app, server):
if Server.backend == server:
blueprints.init_backend(app, server=server)
elif server == Server.all:
blueprints.init_backend(app, server=server)
def _init_logger():
# peewee的debug模式.
if is_dev_mode():
import logging
logger = logging.getLogger('peewee')
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
def _config_session(app):
app.secret_key = SESSION_SALT
app.config['SESSION_TYPE'] = 'redis'
redis_session = redis.StrictRedis(host=config.REDIS_HOST,
port=config.REDIS_PORT,
db=config.REDIS_DB,
password=config.REDIS_PASSWORD)
app.config['SESSION_REDIS'] = redis_session
Session(app)
# app.session_interface = RedisSessionInterface()
def _generate_csrf_token():
if '_csrf_token' not in session:
session['_csrf_token'] = ''.join(random.sample(string.ascii_letters + string.digits, 8))
return session['_csrf_token']
def _config_sitemap(app):
"""
显示所有注册uri地址
:param app:
:return:
"""
@app.route('/site_map')
def site_map():
if 'RELEASE' in current_app.config:
return ''
links = []
for rule in app.url_map.iter_rules():
f = app.view_functions[rule.endpoint]
links.append({
'methods': rule.methods,
'rule': rule.rule,
'endpoint': rule.endpoint,
'function': f
})
links = sorted(links, key=lambda x: x['rule'])
return render_template('backend/site_map.html', site_map=links)
|
from pymongo import MongoClient
import datetime as dt
class MongoDbUtil:
@classmethod
def getDB(cls, user, password, host, port, dbname):
uri = "mongodb://{}:{}@{}:{}/{}?authMechanism=SCRAM-SHA-1".format(user, password, host, port, dbname)
client = MongoClient(uri)
db = client.get_default_database()
return db
@staticmethod
def test(db):
# get the name list of collections in the given db
for name in db.collection_names():
print(name)
|
__author__ = 'maru'
__copyright__ = "Copyright 2013, ML Lab"
__version__ = "0.1"
__status__ = "Development"
import sys
import os
sys.path.append(os.path.abspath("."))
from experiment_utils import *
import argparse
import numpy as np
from sklearn.datasets.base import Bunch
from datautil.load_data import *
from sklearn import linear_model
import time
from sklearn import metrics
from collections import defaultdict
from datautil.textutils import StemTokenizer
from strategy import randomsampling
from expert import baseexpert
from sklearn.feature_extraction.text import CountVectorizer
import pickle
ap = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
ap.add_argument('--train',
metavar='TRAIN',
default="20news",
help='training data (libSVM format)')
ap.add_argument('--neutral-threshold',
metavar='NEUTRAL',
type=float,
default=.4,
help='neutrality threshold of uncertainty')
ap.add_argument('--expert-penalty',
metavar='EXPERT_PENALTY',
type=float,
default=0.3,
help='Expert penalty value for the classifier simulation')
ap.add_argument('--trials',
metavar='TRIALS',
type=int,
default=5,
help='number of trials')
ap.add_argument('--folds',
metavar='FOLDS',
type=int,
default=1,
help='number of folds')
ap.add_argument('--budget',
metavar='BUDGET',
type=int,
default=20000,
help='budget')
ap.add_argument('--step-size',
metavar='STEP_SIZE',
type=int,
default=10,
help='instances to acquire at every iteration')
ap.add_argument('--bootstrap',
metavar='BOOTSTRAP',
type=int,
default=50,
help='size of the initial labeled dataset')
ap.add_argument('--cost-function',
metavar='COST_FUNCTION',
type=str,
default="direct",
help='cost function of the x-axis [uniform|log|linear|direct]')
ap.add_argument('--cost-model',
metavar='COST_MODEL',
type=str,
default="[[10.0,5.7], [25.0,8.2], [50.1,10.9], [75,15.9], [100,16.7], [125,17.8], [150,22.7], [175,19.9], [200,17.4]]",
help='cost function parameters of the cost function')
ap.add_argument('--fixk',
metavar='FIXK',
type=int,
default=10,
help='fixed k number of words')
ap.add_argument('--maxiter',
metavar='MAXITER',
type=int,
default=5,
help='Max number of iterations')
ap.add_argument('--seed',
metavar='SEED',
type=int,
default=8765432,
help='Max number of iterations')
ap.add_argument('--method',
metavar='METHOD',
type=str,
default="unc",
help='Sampling method [rnd|unc]')
ap.add_argument('--classifier',
metavar='CLASSIFIER',
type=str,
default="lr",
help='underlying classifier')
args = ap.parse_args()
rand = np.random.mtrand.RandomState(args.seed)
print args
print
def main():
accuracies = defaultdict(lambda: [])
aucs = defaultdict(lambda: [])
x_axis = defaultdict(lambda: [])
vct = CountVectorizer(encoding='ISO-8859-1', min_df=5, max_df=1.0, binary=True, ngram_range=(1, 3),
token_pattern='\\b\\w+\\b', tokenizer=StemTokenizer())
vct_analizer = vct.build_tokenizer()
print("Start loading ...")
# data fields: data, bow, file_names, target_names, target
########## NEWS GROUPS ###############
# easy to hard. see "Less is More" paper: http://axon.cs.byu.edu/~martinez/classes/678/Presentations/Clawson.pdf
categories = [['alt.atheism', 'talk.religion.misc'],
['comp.graphics', 'comp.windows.x'],
['comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware'],
['rec.sport.baseball', 'sci.crypt']]
min_size = max(100, args.fixk)
if args.fixk < 0:
args.fixk = None
data, vct = load_from_file(args.train, categories, args.fixk, min_size, vct)
# data = load_dataset(args.train, args.fixk, categories[0], vct, min_size)
print("Data %s" % args.train)
print("Data size %s" % len(data.train.data))
parameters = parse_parameters_mat(args.cost_model)
print "Cost Parameters %s" % parameters
cost_model = set_cost_model(args.cost_function, parameters=parameters)
print "\nCost Model: %s" % cost_model.__class__.__name__
#### STUDENT CLASSIFIER
clf = set_classifier(args.classifier)
print "\nClassifier: %s" % clf
#### EXPERT CLASSIFIER
exp_clf = linear_model.LogisticRegression(penalty='l1', C=args.expert_penalty)
exp_clf.fit(data.test.bow, data.test.target)
expert = baseexpert.NeutralityExpert(exp_clf, threshold=args.neutral_threshold,
cost_function=cost_model.cost_function)
print "\nExpert: %s " % expert
#### ACTIVE LEARNING SETTINGS
step_size = args.step_size
bootstrap_size = args.bootstrap
evaluation_points = 200
print("\nExperiment: step={0}, BT={1}, plot points={2}, fixk:{3}, minsize:{4}".format(step_size, bootstrap_size,
evaluation_points, args.fixk,
min_size))
print ("Cheating experiment - use full uncertainty query k words")
t0 = time.time()
### experiment starts
tx =[]
tac = []
tau = []
for t in range(args.trials):
trial_accu =[]
trial_aucs = []
trial_x_axis = []
print "*" * 60
print "Trial: %s" % t
student = randomsampling.UncertaintyLearner(model=clf, accuracy_model=None, budget=args.budget, seed=t,
subpool=250)
print "\nStudent: %s " % student
train_indices = []
train_x = []
train_y = []
pool = Bunch()
pool.data = data.train.bow.tocsr() # full words, for training
pool.fixk = data.train.bowk.tocsr() # k words BOW for querying
pool.target = data.train.target
pool.predicted = []
pool.kwords = np.array(data.train.kwords) # k words
pool.remaining = set(range(pool.data.shape[0])) # indices of the pool
bootstrapped = False
current_cost = 0
iteration = 0
while 0 < student.budget and len(pool.remaining) > step_size and iteration <= args.maxiter:
if not bootstrapped:
## random from each bootstrap
bt = randomsampling.BootstrapFromEach(t * 10)
query_index = bt.bootstrap(pool=pool, k=bootstrap_size)
bootstrapped = True
print "Bootstrap: %s " % bt.__class__.__name__
print
else:
query_index = student.pick_next(pool=pool, k=step_size)
query = pool.fixk[query_index] # query with k words
query_size = [len(vct_analizer(x)) for x in pool.kwords[query_index]]
ground_truth = pool.target[query_index]
#labels, spent = expert.label(unlabeled=query, target=ground_truth)
if iteration == 0: ## bootstrap uses ground truth
labels = ground_truth
spent = [0] * len(ground_truth) ## bootstrap cost is ignored
else:
labels = expert.label_instances(query, ground_truth)
spent = expert.estimate_instances(query_size)
### accumulate the cost of the query
query_cost = np.array(spent).sum()
current_cost += query_cost
## add data recent acquired to train
useful_answers = np.array([[x, y] for x, y in zip(query_index, labels) if y is not None])
# train_indices.extend(query_index)
if useful_answers.shape[0] != 0:
train_indices.extend(useful_answers[:, 0])
# add labels to training
train_x = pool.data[train_indices] ## train with all the words
# update labels with the expert labels
#train_y = pool.target[train_indices]
train_y.extend(useful_answers[:, 1])
if train_x.shape[0] != len(train_y):
raise Exception("Training data corrupted!")
# remove labels from pool
pool.remaining.difference_update(query_index)
# retrain the model
current_model = student.train(train_x, train_y)
# evaluate and save results
y_probas = current_model.predict_proba(data.test.bow)
auc = metrics.roc_auc_score(data.test.target, y_probas[:, 1])
pred_y = current_model.classes_[np.argmax(y_probas, axis=1)]
accu = metrics.accuracy_score(data.test.target, pred_y)
print ("TS:{0}\tAccu:{1:.3f}\tAUC:{2:.3f}\tCost:{3:.2f}\tCumm:{4:.2f}\tSpent:{5}\tuseful:{6}".format(len(train_indices),
accu,
auc, query_cost,
current_cost, format_spent(spent), useful_answers.shape[0]))
## the results should be based on the cost of the labeling
if iteration > 0: # bootstrap iteration
student.budget -= query_cost ## Bootstrap doesn't count
x_axis_range = current_cost
x_axis[x_axis_range].append(current_cost)
## save results
accuracies[x_axis_range].append(accu)
aucs[x_axis_range].append(auc)
trial_accu.append([x_axis_range, accu])
trial_aucs.append([x_axis_range, auc])
iteration += 1
# end of budget loop
tac.append(trial_accu)
tau.append(trial_aucs)
#end trial loop
accuracies = extrapolate_trials(tac, cost_25=parameters[1][1], step_size=args.step_size)
aucs = extrapolate_trials(tau, cost_25=parameters[1][1], step_size=args.step_size)
print("Elapsed time %.3f" % (time.time() - t0))
print_extrapolated_results(accuracies, aucs)
if __name__ == '__main__':
main()
|
"""Tests for user-related one-off computations."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import ast
import datetime
import re
from core.domain import collection_domain
from core.domain import collection_services
from core.domain import event_services
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import feedback_services
from core.domain import rating_services
from core.domain import rights_domain
from core.domain import rights_manager
from core.domain import subscription_services
from core.domain import user_jobs_continuous
from core.domain import user_jobs_one_off
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
import feconf
import python_utils
from google.appengine.ext import ndb
(user_models, feedback_models, exp_models) = models.Registry.import_models(
[models.NAMES.user, models.NAMES.feedback, models.NAMES.exploration])
taskqueue_services = models.Registry.import_taskqueue_services()
search_services = models.Registry.import_search_services()
class UserContributionsOneOffJobTests(test_utils.GenericTestBase):
"""Tests for the one-off dashboard subscriptions job."""
EXP_ID_1 = 'exp_id_1'
EXP_ID_2 = 'exp_id_2'
USER_A_EMAIL = 'a@example.com'
USER_A_USERNAME = 'a'
USER_B_EMAIL = 'b@example.com'
USER_B_USERNAME = 'b'
USER_C_EMAIL = 'c@example.com'
USER_C_USERNAME = 'c'
USER_D_EMAIL = 'd@example.com'
USER_D_USERNAME = 'd'
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = user_jobs_one_off.UserContributionsOneOffJob.create_new()
user_jobs_one_off.UserContributionsOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_tasks()
def setUp(self):
super(UserContributionsOneOffJobTests, self).setUp()
# User A has no created or edited explorations.
# User B has one created exploration.
# User C has one edited exploration.
# User D has created an exploration and then edited it.
# (This is used to check that there are no duplicate
# entries in the contribution lists).
self.signup(self.USER_A_EMAIL, self.USER_A_USERNAME)
self.user_a_id = self.get_user_id_from_email(self.USER_A_EMAIL)
self.signup(self.USER_B_EMAIL, self.USER_B_USERNAME)
self.user_b_id = self.get_user_id_from_email(self.USER_B_EMAIL)
self.signup(self.USER_C_EMAIL, self.USER_C_USERNAME)
self.user_c_id = self.get_user_id_from_email(self.USER_C_EMAIL)
self.signup(self.USER_D_EMAIL, self.USER_D_USERNAME)
self.user_d_id = self.get_user_id_from_email(self.USER_D_EMAIL)
self.save_new_valid_exploration(
self.EXP_ID_1, self.user_b_id, end_state_name='End')
exp_services.update_exploration(
self.user_c_id, self.EXP_ID_1, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'the objective'
})], 'Test edit')
self.save_new_valid_exploration(
self.EXP_ID_2, self.user_d_id, end_state_name='End')
exp_services.update_exploration(
self.user_d_id, self.EXP_ID_2, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'the objective'
})], 'Test edit')
def test_null_case(self):
"""Tests the case where user has no created or edited explorations."""
self._run_one_off_job()
user_a_contributions_model = user_models.UserContributionsModel.get(
self.user_a_id, strict=False)
self.assertEqual(user_a_contributions_model.created_exploration_ids, [])
self.assertEqual(user_a_contributions_model.edited_exploration_ids, [])
def test_created_exp(self):
"""Tests the case where user has created (and therefore edited)
an exploration.
"""
self._run_one_off_job()
user_b_contributions_model = user_models.UserContributionsModel.get(
self.user_b_id)
self.assertEqual(
user_b_contributions_model.created_exploration_ids, [self.EXP_ID_1])
self.assertEqual(
user_b_contributions_model.edited_exploration_ids, [self.EXP_ID_1])
def test_edited_exp(self):
"""Tests the case where user has an edited exploration."""
self._run_one_off_job()
user_c_contributions_model = user_models.UserContributionsModel.get(
self.user_c_id)
self.assertEqual(
user_c_contributions_model.created_exploration_ids, [])
self.assertEqual(
user_c_contributions_model.edited_exploration_ids, [self.EXP_ID_1])
def test_for_duplicates(self):
"""Tests the case where user has an edited exploration, and edits
it again making sure it is not duplicated.
"""
self._run_one_off_job()
user_d_contributions_model = user_models.UserContributionsModel.get(
self.user_d_id)
self.assertEqual(
user_d_contributions_model.edited_exploration_ids,
[self.EXP_ID_2])
self.assertEqual(
user_d_contributions_model.created_exploration_ids,
[self.EXP_ID_2])
def test_no_new_user_contributions_model_get_created_with_existing_model(
self):
model1 = exp_models.ExplorationSnapshotMetadataModel(
id='exp_id-1', committer_id=self.user_a_id, commit_type='create')
model1.put()
user_models.UserContributionsModel(
id=self.user_a_id,
created_exploration_ids=['exp_id']
).put()
user_contributions_model = user_models.UserContributionsModel.get(
self.user_a_id)
self.assertEqual(
user_contributions_model.created_exploration_ids,
['exp_id'])
self._run_one_off_job()
user_contributions_model = user_models.UserContributionsModel.get(
self.user_a_id)
self.assertEqual(
user_contributions_model.created_exploration_ids,
['exp_id'])
def test_user_contributions_get_created_after_running_the_job(self):
model1 = exp_models.ExplorationSnapshotMetadataModel(
id='exp_id-1', committer_id='new_user', commit_type='create')
model1.put()
user_contributions_model = user_models.UserContributionsModel.get(
'new_user', strict=False)
self.assertIsNone(user_contributions_model)
self._run_one_off_job()
user_contributions_model = user_models.UserContributionsModel.get(
'new_user', strict=False)
self.assertEqual(
user_contributions_model.created_exploration_ids,
['exp_id'])
class PopulateUserAuthDetailsModelOneOffJobTests(test_utils.GenericTestBase):
"""Tests for the one-off PopulateUserAuthDetailsModel migration job."""
USER_A_EMAIL = 'a@example.com'
USER_A_ID = 'uid_voxecidnxaqdvhmoilhxxgeixffkauxc'
USER_A_GAE_ID = 'user_a_gae_id'
USER_B_EMAIL = 'b@example.com'
USER_B_ID = 'uid_mjmohemylmjjdqredntquhfvcyuindem'
USER_B_GAE_ID = 'user_b_gae_id'
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = (
user_jobs_one_off.PopulateUserAuthDetailsModelOneOffJob.
create_new()
)
user_jobs_one_off.PopulateUserAuthDetailsModelOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_tasks()
stringified_output = (
user_jobs_one_off.PopulateUserAuthDetailsModelOneOffJob.get_output(
job_id))
output = {}
for stringified_distribution in stringified_output:
message_list = ast.literal_eval(stringified_distribution)
# The following is output:
# ['SUCCESS - Created UserAuthDetails model'] = number of users.
output[message_list[0]] = int(message_list[1])
return output
def setUp(self):
super(PopulateUserAuthDetailsModelOneOffJobTests, self).setUp()
user_models.UserSettingsModel.get_by_id(
self.get_user_id_from_email('tmpsuperadmin@example.com')).delete()
self.user_a_model = user_models.UserSettingsModel(
id=self.USER_A_ID,
gae_id=self.USER_A_GAE_ID,
email=self.USER_A_EMAIL,
)
self.user_b_model = user_models.UserSettingsModel(
id=self.USER_B_ID,
gae_id=self.USER_B_GAE_ID,
email=self.USER_B_EMAIL
)
self.user_a_model.put()
self.user_b_model.put()
def test_before_migration_old_users_do_not_exist_in_user_auth_model(self):
self.assertIsNone(user_models.UserAuthDetailsModel.get_by_id(
self.USER_A_ID))
self.assertIsNone(user_models.UserAuthDetailsModel.get_by_id(
self.USER_B_ID))
def test_one_off_job_migrates_old_users_successfully(self):
self.assertIsNone(user_models.UserAuthDetailsModel.get_by_id(
self.USER_A_ID))
self.assertIsNone(user_models.UserAuthDetailsModel.get_by_id(
self.USER_B_ID))
output = self._run_one_off_job()
expected_output = {
'SUCCESS - Created UserAuthDetails model': 2
}
self.assertEqual(output, expected_output)
user_auth_details_model = (
user_models.UserAuthDetailsModel.get_by_auth_id(
feconf.AUTH_METHOD_GAE, self.USER_A_GAE_ID)
)
self.assertEqual(user_auth_details_model.id, self.USER_A_ID)
user_auth_details_model = (
user_models.UserAuthDetailsModel.get_by_auth_id(
feconf.AUTH_METHOD_GAE, self.USER_B_GAE_ID)
)
self.assertEqual(user_auth_details_model.id, self.USER_B_ID)
def test_one_off_job_migrates_old_and_new_users_combined_successfully(self):
new_gae_id = 'new_gae_id'
new_email = 'new@example.com'
user_services.create_new_user(new_gae_id, new_email)
new_user_id = user_services.get_user_settings_by_gae_id(
new_gae_id).user_id
user_auth_details_model = (
user_models.UserAuthDetailsModel.get_by_auth_id(
feconf.AUTH_METHOD_GAE, new_gae_id)
)
# To ensure that UserAuthDetailsModel was created for newly registered
# user.
self.assertEqual(user_auth_details_model.id, new_user_id)
self.assertIsNone(user_models.UserAuthDetailsModel.get_by_id(
self.USER_A_ID))
self.assertIsNone(user_models.UserAuthDetailsModel.get_by_id(
self.USER_B_ID))
output = self._run_one_off_job()
expected_output = {
'SUCCESS - Created UserAuthDetails model': 3
}
self.assertEqual(output, expected_output)
user_auth_details_model = (
user_models.UserAuthDetailsModel.get_by_auth_id(
feconf.AUTH_METHOD_GAE, self.USER_A_GAE_ID)
)
self.assertEqual(user_auth_details_model.id, self.USER_A_ID)
user_auth_details_model = (
user_models.UserAuthDetailsModel.get_by_auth_id(
feconf.AUTH_METHOD_GAE, self.USER_B_GAE_ID)
)
self.assertEqual(user_auth_details_model.id, self.USER_B_ID)
user_auth_details_model = (
user_models.UserAuthDetailsModel.get_by_auth_id(
feconf.AUTH_METHOD_GAE, new_gae_id)
)
self.assertEqual(user_auth_details_model.id, new_user_id)
def test_one_off_job_run_multiple_times_fills_auth_model_correctly(self):
expected_output = {
'SUCCESS - Created UserAuthDetails model': 2
}
output = self._run_one_off_job()
self.assertEqual(output, expected_output)
output = self._run_one_off_job()
self.assertEqual(output, expected_output)
user_auth_details_model = (
user_models.UserAuthDetailsModel.get_by_auth_id(
feconf.AUTH_METHOD_GAE, self.USER_A_GAE_ID)
)
self.assertEqual(user_auth_details_model.id, self.USER_A_ID)
user_auth_details_model = (
user_models.UserAuthDetailsModel.get_by_auth_id(
feconf.AUTH_METHOD_GAE, self.USER_B_GAE_ID)
)
self.assertEqual(user_auth_details_model.id, self.USER_B_ID)
def test_mark_user_for_deletion_for_migrated_old_users_is_correct(self):
self._run_one_off_job()
user_auth_details_model = (
user_models.UserAuthDetailsModel.get_by_auth_id(
feconf.AUTH_METHOD_GAE, self.USER_A_GAE_ID)
)
self.assertEqual(user_auth_details_model.id, self.USER_A_ID)
self.assertFalse(user_auth_details_model.deleted)
user_services.mark_user_for_deletion(self.USER_A_ID)
self._run_one_off_job()
user_auth_details_model = (
user_models.UserAuthDetailsModel.get_by_auth_id(
feconf.AUTH_METHOD_GAE, self.USER_A_GAE_ID)
)
self.assertEqual(user_auth_details_model.id, self.USER_A_ID)
self.assertTrue(user_auth_details_model.deleted)
class UsernameLengthDistributionOneOffJobTests(test_utils.GenericTestBase):
"""Tests for the one-off username length distribution job."""
USER_A_EMAIL = 'a@example.com'
USER_A_USERNAME = 'a'
USER_B_EMAIL = 'ab@example.com'
USER_B_USERNAME = 'ab'
USER_C_EMAIL = 'bc@example.com'
USER_C_USERNAME = 'bc'
USER_D_EMAIL = 'bcd@example.com'
USER_D_USERNAME = 'bcd'
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = (
user_jobs_one_off.UsernameLengthDistributionOneOffJob.create_new())
user_jobs_one_off.UsernameLengthDistributionOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_tasks()
stringified_output = (
user_jobs_one_off.UsernameLengthDistributionOneOffJob.get_output(
job_id))
output = {}
for stringified_distribution in stringified_output:
value = re.findall(r'\d+', stringified_distribution)
# The following is output['username length'] = number of users.
output[value[0]] = int(value[1])
return output
def test_null_case(self):
"""Tests the case when there are no signed up users but there is one
default user having the username - 'tmpsuperadm1n'.
"""
output = self._run_one_off_job()
# Number of users = 1.
# length of usernames = 13 (tmpsuperadm1n).
self.assertEqual(output['13'], 1)
def test_single_user_case(self):
"""Tests the case when there is only one signed up user and a default
user - 'tmpsuperadm1n'.
"""
self.signup(self.USER_A_EMAIL, self.USER_A_USERNAME)
output = self._run_one_off_job()
# Number of users = 2.
# length of usernames = 13 (tmpsuperadm1n), 1 (a).
self.assertEqual(output['13'], 1)
self.assertEqual(output['1'], 1)
def test_multiple_users_case(self):
"""Tests the case when there are multiple signed up users and a
default user - 'tmpsuperadm1n'.
"""
self.signup(self.USER_A_EMAIL, self.USER_A_USERNAME)
self.signup(self.USER_B_EMAIL, self.USER_B_USERNAME)
output = self._run_one_off_job()
# Number of users = 3
# length of usernames = 13 (tmpsuperadm1n), 2 (ab), 1 (a).
self.assertEqual(output['13'], 1)
self.assertEqual(output['2'], 1)
self.assertEqual(output['1'], 1)
self.signup(self.USER_C_EMAIL, self.USER_C_USERNAME)
self.signup(self.USER_D_EMAIL, self.USER_D_USERNAME)
output = self._run_one_off_job()
# Number of users = 5
# length of usernames = 13 (tmpsuperadm1n), 3 (bcd), 2 (ab, bc), 1 (a).
self.assertEqual(output['13'], 1)
self.assertEqual(output['3'], 1)
self.assertEqual(output['2'], 2)
self.assertEqual(output['1'], 1)
class UsernameLengthAuditOneOffJobTests(test_utils.GenericTestBase):
"""Tests for the one-off username length limit job."""
USER_1_EMAIL = '1@example.com'
USER_1_USERNAME = '123456789123456789123'
USER_2_EMAIL = '2@example.com'
USER_2_USERNAME = '123456789123456789124'
USER_3_EMAIL = '3@example.com'
USER_3_USERNAME = 'a' * 30
USER_4_EMAIL = '4@example.com'
# Username 4 length is 20, so it shouldn't be in the output.
USER_4_USERNAME = '12345678912345678912'
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = (
user_jobs_one_off.UsernameLengthAuditOneOffJob.create_new())
user_jobs_one_off.UsernameLengthAuditOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_tasks()
return user_jobs_one_off.UsernameLengthAuditOneOffJob.get_output(job_id)
def test_username_length_limit(self):
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.signup(self.USER_3_EMAIL, self.USER_3_USERNAME)
expected_output = [u'[u\'Length: 21\', u"Usernames: [\'%s\', \'%s\']"]'
% (self.USER_1_USERNAME, self.USER_2_USERNAME),
u'[u\'Length: 30\', u"Usernames: [\'%s\']"]'
% self.USER_3_USERNAME]
actual_output = self._run_one_off_job()
self.assertEqual(actual_output, expected_output)
class LongUserBiosOneOffJobTests(test_utils.GenericTestBase):
"""Tests for the one-off long userbio length job."""
USER_A_EMAIL = 'a@example.com'
USER_A_USERNAME = 'a'
USER_A_BIO = 'I am less than 500'
USER_B_EMAIL = 'b@example.com'
USER_B_USERNAME = 'b'
USER_B_BIO = 'Long Bio' * 100
USER_C_EMAIL = 'c@example.com'
USER_C_USERNAME = 'c'
USER_C_BIO = 'Same Bio' * 100
USER_D_EMAIL = 'd@example.com'
USER_D_USERNAME = 'd'
USER_D_BIO = 'Diff Bio' * 300
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = (
user_jobs_one_off.LongUserBiosOneOffJob.create_new())
user_jobs_one_off.LongUserBiosOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_tasks()
stringified_output = (
user_jobs_one_off.LongUserBiosOneOffJob.get_output(
job_id))
eval_output = [ast.literal_eval(stringified_item)
for stringified_item in stringified_output]
output = [[int(eval_item[0]), eval_item[1]]
for eval_item in eval_output]
return output
def test_no_userbio_returns_empty_list(self):
"""Tests the case when userbio is None."""
self.signup(self.USER_C_EMAIL, self.USER_C_USERNAME)
result = self._run_one_off_job()
self.assertEqual(result, [])
def test_short_userbio_returns_empty_list(self):
"""Tests the case where the userbio is less than 500 characters."""
self.signup(self.USER_A_EMAIL, self.USER_A_USERNAME)
user_id_a = self.get_user_id_from_email(self.USER_A_EMAIL)
user_services.update_user_bio(user_id_a, self.USER_A_BIO)
result = self._run_one_off_job()
self.assertEqual(result, [])
def test_long_userbio_length(self):
"""Tests the case where the userbio is more than 500 characters."""
self.signup(self.USER_B_EMAIL, self.USER_B_USERNAME)
user_id_b = self.get_user_id_from_email(self.USER_B_EMAIL)
user_services.update_user_bio(user_id_b, self.USER_B_BIO)
result = self._run_one_off_job()
expected_result = [[800, ['b']]]
self.assertEqual(result, expected_result)
def test_same_userbio_length(self):
"""Tests the case where two users have same userbio length."""
self.signup(self.USER_B_EMAIL, self.USER_B_USERNAME)
user_id_b = self.get_user_id_from_email(self.USER_B_EMAIL)
user_services.update_user_bio(user_id_b, self.USER_B_BIO)
self.signup(self.USER_C_EMAIL, self.USER_C_USERNAME)
user_id_c = self.get_user_id_from_email(self.USER_C_EMAIL)
user_services.update_user_bio(user_id_c, self.USER_C_BIO)
result = self._run_one_off_job()
result[0][1].sort()
expected_result = [[800, ['b', 'c']]]
self.assertEqual(result, expected_result)
def test_diff_userbio_length(self):
"""Tests the case where two users have different userbio lengths."""
self.signup(self.USER_C_EMAIL, self.USER_C_USERNAME)
user_id_c = self.get_user_id_from_email(self.USER_C_EMAIL)
user_services.update_user_bio(user_id_c, self.USER_C_BIO)
self.signup(self.USER_D_EMAIL, self.USER_D_USERNAME)
user_id_d = self.get_user_id_from_email(self.USER_D_EMAIL)
user_services.update_user_bio(user_id_d, self.USER_D_BIO)
result = sorted(self._run_one_off_job(), key=lambda x: x[0])
expected_result = [[800, ['c']], [2400, ['d']]]
self.assertEqual(result, expected_result)
def test_bio_length_for_users_with_no_bio(self):
self.signup(self.USER_A_EMAIL, self.USER_A_USERNAME)
user_id_a = self.get_user_id_from_email(self.USER_A_EMAIL)
model1 = user_models.UserSettingsModel(
id=user_id_a,
gae_id='gae_' + user_id_a,
email=self.USER_A_EMAIL)
model1.put()
result = self._run_one_off_job()
self.assertEqual(result, [])
class DashboardSubscriptionsOneOffJobTests(test_utils.GenericTestBase):
"""Tests for the one-off dashboard subscriptions job."""
EXP_ID_1 = 'exp_id_1'
EXP_ID_2 = 'exp_id_2'
COLLECTION_ID_1 = 'col_id_1'
COLLECTION_ID_2 = 'col_id_2'
EXP_ID_FOR_COLLECTION_1 = 'id_of_exp_in_collection_1'
USER_A_EMAIL = 'a@example.com'
USER_A_USERNAME = 'a'
USER_B_EMAIL = 'b@example.com'
USER_B_USERNAME = 'b'
USER_C_EMAIL = 'c@example.com'
USER_C_USERNAME = 'c'
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = user_jobs_one_off.DashboardSubscriptionsOneOffJob.create_new()
user_jobs_one_off.DashboardSubscriptionsOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_tasks()
def _null_fn(self, *args, **kwargs):
"""A mock for functions of the form subscribe_to_*() to represent
behavior prior to the implementation of subscriptions.
"""
pass
def setUp(self):
super(DashboardSubscriptionsOneOffJobTests, self).setUp()
self.signup(self.USER_A_EMAIL, self.USER_A_USERNAME)
self.user_a_id = self.get_user_id_from_email(self.USER_A_EMAIL)
self.signup(self.USER_B_EMAIL, self.USER_B_USERNAME)
self.user_b_id = self.get_user_id_from_email(self.USER_B_EMAIL)
self.signup(self.USER_C_EMAIL, self.USER_C_USERNAME)
self.user_c_id = self.get_user_id_from_email(self.USER_C_EMAIL)
self.user_a = user_services.UserActionsInfo(self.user_a_id)
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
):
# User A creates and saves a new valid exploration.
self.save_new_valid_exploration(
self.EXP_ID_1, self.user_a_id, end_state_name='End')
def test_null_case(self):
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id, strict=False)
self.assertEqual(user_b_subscriptions_model, None)
self._run_one_off_job()
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id, strict=False)
self.assertEqual(user_b_subscriptions_model, None)
def test_feedback_thread_subscription(self):
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id, strict=False)
user_c_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_c_id, strict=False)
self.assertEqual(user_b_subscriptions_model, None)
self.assertEqual(user_c_subscriptions_model, None)
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
):
# User B starts a feedback thread.
feedback_services.create_thread(
'exploration', self.EXP_ID_1, self.user_b_id, 'subject', 'text')
# User C adds to that thread.
thread_id = feedback_services.get_all_threads(
'exploration', self.EXP_ID_1, False)[0].id
feedback_services.create_message(
thread_id, self.user_c_id, None, None, 'more text')
self._run_one_off_job()
# Both users are subscribed to the feedback thread.
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id)
user_c_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_c_id)
self.assertEqual(user_b_subscriptions_model.activity_ids, [])
self.assertEqual(user_c_subscriptions_model.activity_ids, [])
self.assertEqual(
user_b_subscriptions_model.general_feedback_thread_ids, [thread_id])
self.assertEqual(
user_c_subscriptions_model.general_feedback_thread_ids, [thread_id])
def test_exploration_subscription(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
):
# User A adds user B as an editor to the exploration.
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID_1, self.user_b_id,
rights_domain.ROLE_EDITOR)
# User A adds user C as a viewer of the exploration.
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID_1, self.user_c_id,
rights_domain.ROLE_VIEWER)
self._run_one_off_job()
# Users A and B are subscribed to the exploration. User C is not.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id)
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id)
user_c_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_c_id, strict=False)
self.assertEqual(
user_a_subscriptions_model.activity_ids, [self.EXP_ID_1])
self.assertEqual(
user_b_subscriptions_model.activity_ids, [self.EXP_ID_1])
self.assertEqual(user_a_subscriptions_model.feedback_thread_ids, [])
self.assertEqual(user_b_subscriptions_model.feedback_thread_ids, [])
self.assertEqual(user_c_subscriptions_model, None)
def test_two_explorations(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
):
# User A creates and saves another valid exploration.
self.save_new_valid_exploration(self.EXP_ID_2, self.user_a_id)
self._run_one_off_job()
# User A is subscribed to two explorations.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id)
self.assertEqual(
sorted(user_a_subscriptions_model.activity_ids),
sorted([self.EXP_ID_1, self.EXP_ID_2]))
def test_community_owned_exploration(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
):
# User A adds user B as an editor to the exploration.
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID_1, self.user_b_id,
rights_domain.ROLE_EDITOR)
# The exploration becomes community-owned.
rights_manager.publish_exploration(self.user_a, self.EXP_ID_1)
rights_manager.release_ownership_of_exploration(
self.user_a, self.EXP_ID_1)
# User C edits the exploration.
exp_services.update_exploration(
self.user_c_id, self.EXP_ID_1, [], 'Update exploration')
self._run_one_off_job()
# User A and user B are subscribed to the exploration; user C is not.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id)
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id)
user_c_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_c_id, strict=False)
self.assertEqual(
user_a_subscriptions_model.activity_ids, [self.EXP_ID_1])
self.assertEqual(
user_b_subscriptions_model.activity_ids, [self.EXP_ID_1])
self.assertEqual(user_c_subscriptions_model, None)
def test_deleted_exploration(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
):
# User A deletes the exploration.
exp_services.delete_exploration(self.user_a_id, self.EXP_ID_1)
self.process_and_flush_pending_tasks()
self._run_one_off_job()
# User A is not subscribed to the exploration.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id, strict=False)
self.assertEqual(user_a_subscriptions_model, None)
def test_collection_subscription(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_collection', self._null_fn
):
# User A creates and saves a new valid collection.
self.save_new_valid_collection(
self.COLLECTION_ID_1, self.user_a_id,
exploration_id=self.EXP_ID_FOR_COLLECTION_1)
# User A adds user B as an editor to the collection.
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID_1, self.user_b_id,
rights_domain.ROLE_EDITOR)
# User A adds user C as a viewer of the collection.
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID_1, self.user_c_id,
rights_domain.ROLE_VIEWER)
self._run_one_off_job()
# Users A and B are subscribed to the collection. User C is not.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id)
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id)
user_c_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_c_id, strict=False)
self.assertEqual(
user_a_subscriptions_model.collection_ids, [self.COLLECTION_ID_1])
# User A is also subscribed to the exploration within the collection
# because they created both.
self.assertEqual(
sorted(user_a_subscriptions_model.activity_ids), [
self.EXP_ID_1, self.EXP_ID_FOR_COLLECTION_1])
self.assertEqual(
user_b_subscriptions_model.collection_ids, [self.COLLECTION_ID_1])
self.assertEqual(user_a_subscriptions_model.feedback_thread_ids, [])
self.assertEqual(user_b_subscriptions_model.feedback_thread_ids, [])
self.assertEqual(user_c_subscriptions_model, None)
def test_two_collections(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_collection', self._null_fn
):
# User A creates and saves a new valid collection.
self.save_new_valid_collection(
self.COLLECTION_ID_1, self.user_a_id,
exploration_id=self.EXP_ID_FOR_COLLECTION_1)
# User A creates and saves another valid collection.
self.save_new_valid_collection(
self.COLLECTION_ID_2, self.user_a_id,
exploration_id=self.EXP_ID_FOR_COLLECTION_1)
self._run_one_off_job()
# User A is subscribed to two collections.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id)
self.assertEqual(
sorted(user_a_subscriptions_model.collection_ids),
sorted([self.COLLECTION_ID_1, self.COLLECTION_ID_2]))
def test_deleted_collection(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_collection', self._null_fn
):
# User A creates and saves a new collection.
self.save_new_default_collection(
self.COLLECTION_ID_1, self.user_a_id)
# User A deletes the collection.
collection_services.delete_collection(
self.user_a_id, self.COLLECTION_ID_1)
# User A deletes the exploration from earlier.
exp_services.delete_exploration(self.user_a_id, self.EXP_ID_1)
self.process_and_flush_pending_tasks()
self._run_one_off_job()
# User A is not subscribed to the collection.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id, strict=False)
self.assertEqual(user_a_subscriptions_model, None)
def test_adding_exploration_to_collection(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_collection', self._null_fn
):
# User B creates and saves a new collection.
self.save_new_default_collection(
self.COLLECTION_ID_1, self.user_b_id)
# User B adds the exploration created by user A to the collection.
collection_services.update_collection(
self.user_b_id, self.COLLECTION_ID_1, [{
'cmd': collection_domain.CMD_ADD_COLLECTION_NODE,
'exploration_id': self.EXP_ID_1
}], 'Add new exploration to collection.')
# Users A and B have no subscriptions (to either explorations or
# collections).
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id, strict=False)
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id, strict=False)
self.assertEqual(user_a_subscriptions_model, None)
self.assertEqual(user_b_subscriptions_model, None)
self._run_one_off_job()
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id)
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id)
# User B should be subscribed to the collection and user A to the
# exploration.
self.assertEqual(
user_a_subscriptions_model.activity_ids, [self.EXP_ID_1])
self.assertEqual(
user_a_subscriptions_model.collection_ids, [])
self.assertEqual(
user_b_subscriptions_model.activity_ids, [])
self.assertEqual(
user_b_subscriptions_model.collection_ids, [self.COLLECTION_ID_1])
def test_community_owned_collection(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_collection', self._null_fn
):
rights_manager.publish_exploration(self.user_a, self.EXP_ID_1)
# User A creates and saves a new valid collection.
self.save_new_valid_collection(
self.COLLECTION_ID_1, self.user_a_id,
exploration_id=self.EXP_ID_1)
# User A adds user B as an editor to the collection.
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID_1, self.user_b_id,
rights_domain.ROLE_EDITOR)
# The collection becomes community-owned.
rights_manager.publish_collection(self.user_a, self.COLLECTION_ID_1)
rights_manager.release_ownership_of_collection(
self.user_a, self.COLLECTION_ID_1)
# User C edits the collection.
collection_services.update_collection(
self.user_c_id, self.COLLECTION_ID_1, [{
'cmd': collection_domain.CMD_EDIT_COLLECTION_PROPERTY,
'property_name': (
collection_domain.COLLECTION_PROPERTY_TITLE),
'new_value': 'New title'
}], 'Changed title.')
self._run_one_off_job()
# User A and user B are subscribed to the collection; user C is not.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id)
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id)
user_c_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_c_id, strict=False)
self.assertEqual(
user_a_subscriptions_model.collection_ids, [self.COLLECTION_ID_1])
self.assertEqual(
user_b_subscriptions_model.collection_ids, [self.COLLECTION_ID_1])
self.assertEqual(user_c_subscriptions_model, None)
class MockUserStatsAggregator(
user_jobs_continuous.UserStatsAggregator):
"""A modified UserStatsAggregator that does not start a new
batch job when the previous one has finished.
"""
@classmethod
def _get_batch_job_manager_class(cls):
return MockUserStatsMRJobManager
@classmethod
def _kickoff_batch_job_after_previous_one_ends(cls):
pass
class MockUserStatsMRJobManager(
user_jobs_continuous.UserStatsMRJobManager):
@classmethod
def _get_continuous_computation_class(cls):
return MockUserStatsAggregator
class DashboardStatsOneOffJobTests(test_utils.GenericTestBase):
"""Tests for the one-off dashboard stats job."""
CURRENT_DATE_AS_STRING = user_services.get_current_date_as_string()
DATE_AFTER_ONE_WEEK = (
(datetime.datetime.utcnow() + datetime.timedelta(7)).strftime(
feconf.DASHBOARD_STATS_DATETIME_STRING_FORMAT))
USER_SESSION_ID = 'session1'
EXP_ID_1 = 'exp_id_1'
EXP_ID_2 = 'exp_id_2'
EXP_VERSION = 1
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = user_jobs_one_off.DashboardStatsOneOffJob.create_new()
user_jobs_one_off.DashboardStatsOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_tasks()
def setUp(self):
super(DashboardStatsOneOffJobTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
def mock_get_current_date_as_string(self):
return self.CURRENT_DATE_AS_STRING
def _rate_exploration(self, user_id, exp_id, rating):
"""Assigns rating to the exploration corresponding to the given
exploration id.
Args:
user_id: str. The user id.
exp_id: str. The exploration id.
rating: int. The rating to be assigned to the given exploration.
"""
rating_services.assign_rating_to_exploration(user_id, exp_id, rating)
def _record_play(self, exp_id, state):
"""Calls StartExplorationEventHandler and records the 'play' event
corresponding to the given exploration id.
Args:
exp_id: str. The exploration id.
state: dict(str, *). The state of the exploration corresponding to
the given id.
"""
event_services.StartExplorationEventHandler.record(
exp_id, self.EXP_VERSION, state, self.USER_SESSION_ID, {},
feconf.PLAY_TYPE_NORMAL)
def test_weekly_stats_if_continuous_stats_job_has_not_been_run(self):
exploration = self.save_new_valid_exploration(
self.EXP_ID_1, self.owner_id)
exp_id = exploration.id
init_state_name = exploration.init_state_name
self._record_play(exp_id, init_state_name)
self._rate_exploration('user1', exp_id, 5)
weekly_stats = user_services.get_weekly_dashboard_stats(self.owner_id)
self.assertEqual(weekly_stats, None)
self.assertEqual(
user_services.get_last_week_dashboard_stats(self.owner_id), None)
with self.swap(
user_services,
'get_current_date_as_string',
self.mock_get_current_date_as_string):
self._run_one_off_job()
weekly_stats = user_services.get_weekly_dashboard_stats(self.owner_id)
expected_results_list = [{
self.mock_get_current_date_as_string(): {
'num_ratings': 0,
'average_ratings': None,
'total_plays': 0
}
}]
self.assertEqual(weekly_stats, expected_results_list)
self.assertEqual(
user_services.get_last_week_dashboard_stats(self.owner_id),
expected_results_list[0])
def test_weekly_stats_if_no_explorations(self):
MockUserStatsAggregator.start_computation()
self.process_and_flush_pending_tasks()
with self.swap(
user_services,
'get_current_date_as_string',
self.mock_get_current_date_as_string):
self._run_one_off_job()
weekly_stats = user_services.get_weekly_dashboard_stats(self.owner_id)
self.assertEqual(
weekly_stats, [{
self.mock_get_current_date_as_string(): {
'num_ratings': 0,
'average_ratings': None,
'total_plays': 0
}
}])
def test_weekly_stats_for_single_exploration(self):
exploration = self.save_new_valid_exploration(
self.EXP_ID_1, self.owner_id)
exp_id = exploration.id
init_state_name = exploration.init_state_name
self._record_play(exp_id, init_state_name)
self._rate_exploration('user1', exp_id, 5)
event_services.StatsEventsHandler.record(
self.EXP_ID_1, 1, {
'num_starts': 1,
'num_actual_starts': 0,
'num_completions': 0,
'state_stats_mapping': {}
})
MockUserStatsAggregator.start_computation()
self.process_and_flush_pending_tasks()
with self.swap(
user_services,
'get_current_date_as_string',
self.mock_get_current_date_as_string):
self._run_one_off_job()
weekly_stats = user_services.get_weekly_dashboard_stats(self.owner_id)
self.assertEqual(
weekly_stats, [{
self.mock_get_current_date_as_string(): {
'num_ratings': 1,
'average_ratings': 5.0,
'total_plays': 1
}
}])
def test_weekly_stats_for_multiple_explorations(self):
exploration_1 = self.save_new_valid_exploration(
self.EXP_ID_1, self.owner_id)
exp_id_1 = exploration_1.id
exploration_2 = self.save_new_valid_exploration(
self.EXP_ID_2, self.owner_id)
exp_id_2 = exploration_2.id
init_state_name_1 = exploration_1.init_state_name
self._record_play(exp_id_1, init_state_name_1)
self._rate_exploration('user1', exp_id_1, 5)
self._rate_exploration('user2', exp_id_2, 4)
event_services.StatsEventsHandler.record(
self.EXP_ID_1, 1, {
'num_starts': 1,
'num_actual_starts': 0,
'num_completions': 0,
'state_stats_mapping': {}
})
MockUserStatsAggregator.start_computation()
self.process_and_flush_pending_tasks()
with self.swap(
user_services,
'get_current_date_as_string',
self.mock_get_current_date_as_string):
self._run_one_off_job()
weekly_stats = user_services.get_weekly_dashboard_stats(self.owner_id)
self.assertEqual(
weekly_stats, [{
self.mock_get_current_date_as_string(): {
'num_ratings': 2,
'average_ratings': 4.5,
'total_plays': 1
}
}])
def test_stats_for_multiple_weeks(self):
exploration = self.save_new_valid_exploration(
self.EXP_ID_1, self.owner_id)
exp_id = exploration.id
init_state_name = exploration.init_state_name
self._rate_exploration('user1', exp_id, 4)
self._record_play(exp_id, init_state_name)
self._record_play(exp_id, init_state_name)
event_services.StatsEventsHandler.record(
self.EXP_ID_1, 1, {
'num_starts': 2,
'num_actual_starts': 0,
'num_completions': 0,
'state_stats_mapping': {}
})
MockUserStatsAggregator.start_computation()
self.process_and_flush_pending_tasks()
with self.swap(
user_services,
'get_current_date_as_string',
self.mock_get_current_date_as_string):
self._run_one_off_job()
weekly_stats = user_services.get_weekly_dashboard_stats(self.owner_id)
self.assertEqual(
weekly_stats, [{
self.mock_get_current_date_as_string(): {
'num_ratings': 1,
'average_ratings': 4.0,
'total_plays': 2
}
}])
MockUserStatsAggregator.stop_computation(self.owner_id)
self.process_and_flush_pending_tasks()
self._rate_exploration('user2', exp_id, 2)
MockUserStatsAggregator.start_computation()
self.process_and_flush_pending_tasks()
def _mock_get_date_after_one_week():
"""Returns the date of the next week."""
return self.DATE_AFTER_ONE_WEEK
with self.swap(
user_services,
'get_current_date_as_string',
_mock_get_date_after_one_week):
self._run_one_off_job()
expected_results_list = [
{
self.mock_get_current_date_as_string(): {
'num_ratings': 1,
'average_ratings': 4.0,
'total_plays': 2
}
},
{
_mock_get_date_after_one_week(): {
'num_ratings': 2,
'average_ratings': 3.0,
'total_plays': 2
}
}
]
weekly_stats = user_services.get_weekly_dashboard_stats(self.owner_id)
self.assertEqual(weekly_stats, expected_results_list)
self.assertEqual(
user_services.get_last_week_dashboard_stats(self.owner_id),
expected_results_list[1])
class UserFirstContributionMsecOneOffJobTests(test_utils.GenericTestBase):
EXP_ID = 'test_exp'
def setUp(self):
super(UserFirstContributionMsecOneOffJobTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
self.admin = user_services.UserActionsInfo(self.admin_id)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.owner = user_services.UserActionsInfo(self.owner_id)
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
def test_contribution_msec_updates_on_published_explorations(self):
exploration = self.save_new_valid_exploration(
self.EXP_ID, self.admin_id, end_state_name='End')
init_state_name = exploration.init_state_name
# Test that no contribution time is set.
job_id = (
user_jobs_one_off.UserFirstContributionMsecOneOffJob.create_new())
user_jobs_one_off.UserFirstContributionMsecOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
self.assertIsNone(
user_services.get_user_settings(
self.admin_id).first_contribution_msec)
# Test all owners and editors of exploration after publication have
# updated times.
exp_services.publish_exploration_and_update_user_profiles(
self.admin, self.EXP_ID)
rights_manager.release_ownership_of_exploration(
self.admin, self.EXP_ID)
exp_services.update_exploration(
self.editor_id, self.EXP_ID, [exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': init_state_name,
'property_name': 'widget_id',
'new_value': 'MultipleChoiceInput'
}), exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': init_state_name,
'property_name': 'widget_customization_args',
'new_value': {
'choices': {
'value': [{
'content_id': 'ca_choices_0',
'html': '<p>Choice 1</p>'
}]
},
'showChoicesInShuffledOrder': {'value': True}
}
})], 'commit')
job_id = (
user_jobs_one_off.UserFirstContributionMsecOneOffJob.create_new())
user_jobs_one_off.UserFirstContributionMsecOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
self.assertIsNotNone(user_services.get_user_settings(
self.admin_id).first_contribution_msec)
self.assertIsNotNone(user_services.get_user_settings(
self.editor_id).first_contribution_msec)
def test_contribution_msec_does_not_update_on_unpublished_explorations(
self):
self.save_new_valid_exploration(
self.EXP_ID, self.owner_id, end_state_name='End')
exp_services.publish_exploration_and_update_user_profiles(
self.owner, self.EXP_ID)
# We now manually reset the user's first_contribution_msec to None.
# This is to test that the one off job skips over the unpublished
# exploration and does not reset the user's first_contribution_msec.
user_models.UserSettingsModel(
id=self.owner_id,
gae_id='gae_id',
email='email@email.com',
username='username',
first_contribution_msec=None
).put()
rights_manager.unpublish_exploration(self.admin, self.EXP_ID)
# Test that first contribution time is not set for unpublished
# explorations.
job_id = (
user_jobs_one_off.UserFirstContributionMsecOneOffJob.create_new())
user_jobs_one_off.UserFirstContributionMsecOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
self.assertIsNone(user_services.get_user_settings(
self.owner_id).first_contribution_msec)
def test_contribution_msec_is_not_generated_if_exploration_not_created(
self):
model1 = exp_models.ExplorationRightsSnapshotMetadataModel(
id='exp_id-1', committer_id=self.owner_id, commit_type='create')
model1.put()
self.assertIsNone(user_services.get_user_settings(
self.owner_id).first_contribution_msec)
job_id = (
user_jobs_one_off.UserFirstContributionMsecOneOffJob.create_new())
user_jobs_one_off.UserFirstContributionMsecOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
self.assertIsNone(user_services.get_user_settings(
self.owner_id).first_contribution_msec)
class UserLastExplorationActivityOneOffJobTests(test_utils.GenericTestBase):
def setUp(self):
super(UserLastExplorationActivityOneOffJobTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.exp_id = 'exp'
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = (
user_jobs_one_off.UserLastExplorationActivityOneOffJob.create_new())
user_jobs_one_off.UserLastExplorationActivityOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_tasks()
def test_that_last_created_time_is_updated(self):
self.login(self.OWNER_EMAIL)
self.save_new_valid_exploration(
self.exp_id, self.owner_id, end_state_name='End')
self.logout()
user_models.UserSettingsModel(
id=self.owner_id,
gae_id='gae_' + self.owner_id,
email=self.OWNER_EMAIL,
last_created_an_exploration=None
).put()
owner_settings = user_services.get_user_settings(self.owner_id)
self.assertIsNone(owner_settings.last_created_an_exploration)
self.assertIsNone(owner_settings.last_edited_an_exploration)
self._run_one_off_job()
owner_settings = user_services.get_user_settings(self.owner_id)
self.assertIsNotNone(owner_settings.last_created_an_exploration)
self.assertIsNotNone(owner_settings.last_edited_an_exploration)
def test_that_last_edited_time_is_updated(self):
self.login(self.OWNER_EMAIL)
self.save_new_valid_exploration(
self.exp_id, self.owner_id, end_state_name='End')
self.logout()
self.login(self.EDITOR_EMAIL)
exp_services.update_exploration(
self.editor_id, self.exp_id, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'the objective'
})], 'Test edit')
self.logout()
user_models.UserSettingsModel(
id=self.editor_id,
gae_id='gae_' + self.editor_id,
email=self.EDITOR_EMAIL,
last_edited_an_exploration=None
).put()
editor_settings = user_services.get_user_settings(self.editor_id)
self.assertIsNone(editor_settings.last_created_an_exploration)
self.assertIsNone(editor_settings.last_edited_an_exploration)
self._run_one_off_job()
editor_settings = user_services.get_user_settings(self.editor_id)
self.assertIsNotNone(editor_settings.last_edited_an_exploration)
self.assertIsNone(editor_settings.last_created_an_exploration)
def test_that_last_edited_and_created_time_both_updated(self):
self.login(self.OWNER_EMAIL)
self.save_new_valid_exploration(
self.exp_id, self.owner_id, end_state_name='End')
exp_services.update_exploration(
self.owner_id, self.exp_id, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'the objective'
})], 'Test edit')
self.logout()
self.login(self.EDITOR_EMAIL)
exp_services.update_exploration(
self.editor_id, self.exp_id, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'new objective'
})], 'Test edit new')
self.logout()
user_models.UserSettingsModel(
id=self.owner_id,
gae_id='gae_' + self.owner_id,
email=self.OWNER_EMAIL,
last_created_an_exploration=None,
last_edited_an_exploration=None
).put()
user_models.UserSettingsModel(
id=self.editor_id,
gae_id='gae_' + self.editor_id,
email=self.EDITOR_EMAIL,
last_edited_an_exploration=None
).put()
owner_settings = user_services.get_user_settings(self.owner_id)
editor_settings = user_services.get_user_settings(self.editor_id)
self.assertIsNone(owner_settings.last_created_an_exploration)
self.assertIsNone(owner_settings.last_edited_an_exploration)
self.assertIsNone(editor_settings.last_created_an_exploration)
self.assertIsNone(editor_settings.last_edited_an_exploration)
self._run_one_off_job()
owner_settings = user_services.get_user_settings(self.owner_id)
editor_settings = user_services.get_user_settings(self.editor_id)
self.assertIsNotNone(owner_settings.last_edited_an_exploration)
self.assertIsNotNone(owner_settings.last_created_an_exploration)
self.assertIsNotNone(editor_settings.last_edited_an_exploration)
self.assertIsNone(editor_settings.last_created_an_exploration)
def test_that_last_edited_and_created_time_are_not_updated(self):
user_models.UserSettingsModel(
id=self.owner_id,
gae_id='gae_' + self.owner_id,
email=self.OWNER_EMAIL,
last_created_an_exploration=None,
last_edited_an_exploration=None
).put()
owner_settings = user_services.get_user_settings(self.owner_id)
self.assertIsNone(owner_settings.last_created_an_exploration)
self.assertIsNone(owner_settings.last_edited_an_exploration)
self._run_one_off_job()
owner_settings = user_services.get_user_settings(self.owner_id)
self.assertIsNone(owner_settings.last_created_an_exploration)
self.assertIsNone(owner_settings.last_edited_an_exploration)
class CleanupUserSubscriptionsModelUnitTests(test_utils.GenericTestBase):
def setUp(self):
super(CleanupUserSubscriptionsModelUnitTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup('user@email', 'user')
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.user_id = self.get_user_id_from_email('user@email')
self.owner = user_services.UserActionsInfo(self.owner_id)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i
) for i in python_utils.RANGE(3)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
rights_manager.publish_exploration(self.owner, exp.id)
for exp in explorations:
subscription_services.subscribe_to_exploration(
self.user_id, exp.id)
self.process_and_flush_pending_tasks()
def test_standard_operation(self):
for exp_id in python_utils.RANGE(3):
exp_models.ExplorationModel.get('%s' % exp_id).delete(
self.owner_id, 'deleted exploration')
owner_subscription_model = user_models.UserSubscriptionsModel.get(
self.owner_id)
self.assertEqual(len(owner_subscription_model.activity_ids), 3)
user_subscription_model = user_models.UserSubscriptionsModel.get(
self.user_id)
self.assertEqual(len(user_subscription_model.activity_ids), 3)
job = user_jobs_one_off.CleanupActivityIdsFromUserSubscriptionsModelOneOffJob # pylint: disable=line-too-long
job_id = job.create_new()
job.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_tasks()
owner_subscription_model = user_models.UserSubscriptionsModel.get(
self.owner_id)
self.assertEqual(len(owner_subscription_model.activity_ids), 0)
user_subscription_model = user_models.UserSubscriptionsModel.get(
self.user_id)
self.assertEqual(len(user_subscription_model.activity_ids), 0)
actual_output = job.get_output(job_id)
expected_output = [
u'[u\'Successfully cleaned up UserSubscriptionsModel %s and '
'removed explorations 0, 1, 2\', 1]' %
self.owner_id,
u'[u\'Successfully cleaned up UserSubscriptionsModel %s and '
'removed explorations 0, 1, 2\', 1]' %
self.user_id]
self.assertEqual(sorted(actual_output), sorted(expected_output))
class MockUserSettingsModel(user_models.UserSettingsModel):
"""Mock UserSettingsModel so that it allows to set `gae_user_id`."""
gae_user_id = ndb.StringProperty(indexed=False, required=False)
class RemoveGaeUserIdOneOffJobTests(test_utils.GenericTestBase):
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = (
user_jobs_one_off.RemoveGaeUserIdOneOffJob.create_new())
user_jobs_one_off.RemoveGaeUserIdOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_tasks()
stringified_output = (
user_jobs_one_off.RemoveGaeUserIdOneOffJob
.get_output(job_id))
eval_output = [ast.literal_eval(stringified_item) for
stringified_item in stringified_output]
return eval_output
def test_one_setting_model_with_gae_user_id(self):
with self.swap(user_models, 'UserSettingsModel', MockUserSettingsModel):
original_setting_model = (
user_models.UserSettingsModel(
id='id',
gae_id='gae_id',
email='test@email.com',
gae_user_id='gae_user_id'
)
)
original_setting_model.put()
self.assertIsNotNone(original_setting_model.gae_user_id)
self.assertIn('gae_user_id', original_setting_model._values) # pylint: disable=protected-access
self.assertIn('gae_user_id', original_setting_model._properties) # pylint: disable=protected-access
output = self._run_one_off_job()
self.assertItemsEqual(
[['SUCCESS_REMOVED - UserSettingsModel', 1]], output)
migrated_setting_model = (
user_models.UserSettingsModel.get_by_id('id'))
self.assertNotIn('gae_user_id', migrated_setting_model._values) # pylint: disable=protected-access
self.assertNotIn('gae_user_id', migrated_setting_model._properties) # pylint: disable=protected-access
self.assertEqual(
original_setting_model.last_updated,
migrated_setting_model.last_updated)
def test_one_setting_model_without_gae_user_id(self):
original_setting_model = (
user_models.UserSettingsModel(
id='id',
gae_id='gae_id',
email='test@email.com',
)
)
original_setting_model.put()
self.assertNotIn('gae_user_id', original_setting_model._values) # pylint: disable=protected-access
self.assertNotIn('gae_user_id', original_setting_model._properties) # pylint: disable=protected-access
output = self._run_one_off_job()
# There already exists a userSetting because it is being created
# test_utils. So 2 is used here.
self.assertItemsEqual(
[['SUCCESS_ALREADY_REMOVED - UserSettingsModel', 2]], output)
migrated_setting_model = user_models.UserSettingsModel.get_by_id('id')
self.assertNotIn('gae_user_id', migrated_setting_model._values) # pylint: disable=protected-access
self.assertNotIn('gae_user_id', migrated_setting_model._properties) # pylint: disable=protected-access
self.assertEqual(
original_setting_model.last_updated,
migrated_setting_model.last_updated)
|
"""Code for more fancy file handles.
Classes:
- UndoHandle File object decorator with support for undo-like operations.
Additional private classes used in Bio.SeqIO and Bio.SearchIO for indexing
files are also defined under Bio.File but these are not intended for direct
use.
"""
from __future__ import print_function
import codecs
import os
import sys
import contextlib
import itertools
from Bio._py3k import basestring
try:
from collections import UserDict as _dict_base
except ImportError:
from UserDict import DictMixin as _dict_base
try:
from sqlite3 import dbapi2 as _sqlite
from sqlite3 import IntegrityError as _IntegrityError
from sqlite3 import OperationalError as _OperationalError
except ImportError:
# Not present on Jython, but should be included in Python 2.5
# or later (unless compiled from source without its dependencies)
# Still want to offer in-memory indexing.
_sqlite = None
pass
__docformat__ = "restructuredtext en"
@contextlib.contextmanager
def as_handle(handleish, mode='r', **kwargs):
r"""Context manager to ensure we are using a handle.
Context manager for arguments that can be passed to
SeqIO and AlignIO read, write, and parse methods: either file objects or strings.
When given a string, returns a file handle open to handleish with provided
mode which will be closed when the manager exits.
All other inputs are returned, and are *not* closed
- handleish - Either a string or file handle
- mode - Mode to open handleish (used only if handleish is a string)
- kwargs - Further arguments to pass to open(...)
Example:
>>> with as_handle('seqs.fasta', 'w') as fp:
... fp.write('>test\nACGT')
>>> fp.closed
True
>>> handle = open('seqs.fasta', 'w')
>>> with as_handle(handle) as fp:
... fp.write('>test\nACGT')
>>> fp.closed
False
>>> fp.close()
Note that if the mode argument includes U (for universal new lines)
this will be removed under Python 3 where is is redundant and has
been deprecated (this happens automatically in text mode).
"""
if isinstance(handleish, basestring):
if sys.version_info[0] >= 3 and "U" in mode:
mode = mode.replace("U", "")
if 'encoding' in kwargs:
with codecs.open(handleish, mode, **kwargs) as fp:
yield fp
else:
with open(handleish, mode, **kwargs) as fp:
yield fp
else:
yield handleish
def _open_for_random_access(filename):
"""Open a file in binary mode, spot if it is BGZF format etc (PRIVATE).
This functionality is used by the Bio.SeqIO and Bio.SearchIO index
and index_db functions.
"""
handle = open(filename, "rb")
from . import bgzf
try:
return bgzf.BgzfReader(mode="rb", fileobj=handle)
except ValueError as e:
assert "BGZF" in str(e)
# Not a BGZF file after all, rewind to start:
handle.seek(0)
return handle
class UndoHandle(object):
"""A Python handle that adds functionality for saving lines.
Saves lines in a LIFO fashion.
Added methods:
- saveline Save a line to be returned next time.
- peekline Peek at the next line without consuming it.
"""
def __init__(self, handle):
self._handle = handle
self._saved = []
try:
# If wrapping an online handle, this this is nice to have:
self.url = handle.url
except AttributeError:
pass
def __iter__(self):
return self
def __next__(self):
next = self.readline()
if not next:
raise StopIteration
return next
if sys.version_info[0] < 3:
def next(self):
"""Python 2 style alias for Python 3 style __next__ method."""
return self.__next__()
def readlines(self, *args, **keywds):
lines = self._saved + self._handle.readlines(*args, **keywds)
self._saved = []
return lines
def readline(self, *args, **keywds):
if self._saved:
line = self._saved.pop(0)
else:
line = self._handle.readline(*args, **keywds)
return line
def read(self, size=-1):
if size == -1:
saved = "".join(self._saved)
self._saved[:] = []
else:
saved = ''
while size > 0 and self._saved:
if len(self._saved[0]) <= size:
size = size - len(self._saved[0])
saved = saved + self._saved.pop(0)
else:
saved = saved + self._saved[0][:size]
self._saved[0] = self._saved[0][size:]
size = 0
return saved + self._handle.read(size)
def saveline(self, line):
if line:
self._saved = [line] + self._saved
def peekline(self):
if self._saved:
line = self._saved[0]
else:
line = self._handle.readline()
self.saveline(line)
return line
def tell(self):
return self._handle.tell() - sum(len(line) for line in self._saved)
def seek(self, *args):
self._saved = []
self._handle.seek(*args)
def __getattr__(self, attr):
return getattr(self._handle, attr)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self._handle.close()
class _IndexedSeqFileProxy(object):
"""Base class for file format specific random access (PRIVATE).
This is subclasses in both Bio.SeqIO for indexing as SeqRecord
objects, and in Bio.SearchIO for indexing QueryResult objects.
Subclasses for each file format should define '__iter__', 'get'
and optionally 'get_raw' methods.
"""
def __iter__(self):
"""Returns (identifier, offset, length in bytes) tuples.
The length can be zero where it is not implemented or not
possible for a particular file format.
"""
raise NotImplementedError("Subclass should implement this")
def get(self, offset):
"""Returns parsed object for this entry."""
# Most file formats with self contained records can be handled by
# parsing StringIO(_bytes_to_string(self.get_raw(offset)))
raise NotImplementedError("Subclass should implement this")
def get_raw(self, offset):
"""Returns bytes string (if implemented for this file format)."""
# Should be done by each sub-class (if possible)
raise NotImplementedError("Not available for this file format.")
class _IndexedSeqFileDict(_dict_base):
"""Read only dictionary interface to a sequential record file.
This code is used in both Bio.SeqIO for indexing as SeqRecord
objects, and in Bio.SearchIO for indexing QueryResult objects.
Keeps the keys and associated file offsets in memory, reads the file
to access entries as objects parsing them on demand. This approach
is memory limited, but will work even with millions of records.
Note duplicate keys are not allowed. If this happens, a ValueError
exception is raised.
As used in Bio.SeqIO, by default the SeqRecord's id string is used
as the dictionary key. In Bio.SearchIO, the query's id string is
used. This can be changed by suppling an optional key_function,
a callback function which will be given the record id and must
return the desired key. For example, this allows you to parse
NCBI style FASTA identifiers, and extract the GI number to use
as the dictionary key.
Note that this dictionary is essentially read only. You cannot
add or change values, pop values, nor clear the dictionary.
"""
def __init__(self, random_access_proxy, key_function,
repr, obj_repr):
# Use key_function=None for default value
self._proxy = random_access_proxy
self._key_function = key_function
self._repr = repr
self._obj_repr = obj_repr
if key_function:
offset_iter = (
(key_function(k), o, l) for (k, o, l) in random_access_proxy)
else:
offset_iter = random_access_proxy
offsets = {}
for key, offset, length in offset_iter:
# Note - we don't store the length because I want to minimise the
# memory requirements. With the SQLite backend the length is kept
# and is used to speed up the get_raw method (by about 3 times).
# The length should be provided by all the current backends except
# SFF where there is an existing Roche index we can reuse (very fast
# but lacks the record lengths)
# assert length or format in ["sff", "sff-trim"], \
# "%s at offset %i given length %r (%s format %s)" \
# % (key, offset, length, filename, format)
if key in offsets:
self._proxy._handle.close()
raise ValueError("Duplicate key '%s'" % key)
else:
offsets[key] = offset
self._offsets = offsets
def __repr__(self):
return self._repr
def __str__(self):
# TODO - How best to handle the __str__ for SeqIO and SearchIO?
if self:
return "{%r : %s(...), ...}" % (list(self.keys())[0], self._obj_repr)
else:
return "{}"
def __contains__(self, key):
return key in self._offsets
def __len__(self):
"""How many records are there?"""
return len(self._offsets)
def items(self):
"""Iterate over the (key, SeqRecord) items.
This tries to act like a Python 3 dictionary, and does not return
a list of (key, value) pairs due to memory concerns.
"""
for key in self.__iter__():
yield key, self.__getitem__(key)
def values(self):
"""Iterate over the SeqRecord items.
This tries to act like a Python 3 dictionary, and does not return
a list of value due to memory concerns.
"""
for key in self.__iter__():
yield self.__getitem__(key)
def keys(self):
"""Iterate over the keys.
This tries to act like a Python 3 dictionary, and does not return
a list of keys due to memory concerns.
"""
return self.__iter__()
if hasattr(dict, "iteritems"):
# Python 2, also define iteritems etc
def itervalues(self):
"""Iterate over the SeqRecord) items."""
for key in self.__iter__():
yield self.__getitem__(key)
def iteritems(self):
"""Iterate over the (key, SeqRecord) items."""
for key in self.__iter__():
yield key, self.__getitem__(key)
def iterkeys(self):
"""Iterate over the keys."""
return self.__iter__()
def __iter__(self):
"""Iterate over the keys."""
return iter(self._offsets)
def __getitem__(self, key):
"""x.__getitem__(y) <==> x[y]"""
# Pass the offset to the proxy
record = self._proxy.get(self._offsets[key])
if self._key_function:
key2 = self._key_function(record.id)
else:
key2 = record.id
if key != key2:
raise ValueError("Key did not match (%s vs %s)" % (key, key2))
return record
def get(self, k, d=None):
"""D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None."""
try:
return self.__getitem__(k)
except KeyError:
return d
def get_raw(self, key):
"""Similar to the get method, but returns the record as a raw string.
If the key is not found, a KeyError exception is raised.
Note that on Python 3 a bytes string is returned, not a typical
unicode string.
NOTE - This functionality is not supported for every file format.
"""
# Pass the offset to the proxy
return self._proxy.get_raw(self._offsets[key])
def __setitem__(self, key, value):
"""Would allow setting or replacing records, but not implemented."""
raise NotImplementedError("An indexed a sequence file is read only.")
def update(self, *args, **kwargs):
"""Would allow adding more values, but not implemented."""
raise NotImplementedError("An indexed a sequence file is read only.")
def pop(self, key, default=None):
"""Would remove specified record, but not implemented."""
raise NotImplementedError("An indexed a sequence file is read only.")
def popitem(self):
"""Would remove and return a SeqRecord, but not implemented."""
raise NotImplementedError("An indexed a sequence file is read only.")
def clear(self):
"""Would clear dictionary, but not implemented."""
raise NotImplementedError("An indexed a sequence file is read only.")
def fromkeys(self, keys, value=None):
"""A dictionary method which we don't implement."""
raise NotImplementedError("An indexed a sequence file doesn't "
"support this.")
def copy(self):
"""A dictionary method which we don't implement."""
raise NotImplementedError("An indexed a sequence file doesn't "
"support this.")
def close(self):
"""Close the file handle being used to read the data.
Once called, further use of the index won't work. The sole purpose
of this method is to allow explicit handle closure - for example
if you wish to delete the file, on Windows you must first close
all open handles to that file.
"""
self._proxy._handle.close()
class _SQLiteManySeqFilesDict(_IndexedSeqFileDict):
"""Read only dictionary interface to many sequential record files.
This code is used in both Bio.SeqIO for indexing as SeqRecord
objects, and in Bio.SearchIO for indexing QueryResult objects.
Keeps the keys, file-numbers and offsets in an SQLite database. To access
a record by key, reads from the offset in the appropriate file and then
parses the record into an object.
There are OS limits on the number of files that can be open at once,
so a pool are kept. If a record is required from a closed file, then
one of the open handles is closed first.
"""
def __init__(self, index_filename, filenames,
proxy_factory, format,
key_function, repr, max_open=10):
"""Loads or creates an SQLite based index."""
# TODO? - Don't keep filename list in memory (just in DB)?
# Should save a chunk of memory if dealing with 1000s of files.
# Furthermore could compare a generator to the DB on reloading
# (no need to turn it into a list)
if not _sqlite:
# Hack for Jython (of if Python is compiled without it)
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError("Requires sqlite3, which is "
"included Python 2.5+")
if filenames is not None:
filenames = list(filenames) # In case it was a generator
# Cache the arguments as private variables
self._index_filename = index_filename
self._filenames = filenames
self._format = format
self._key_function = key_function
self._proxy_factory = proxy_factory
self._repr = repr
self._max_open = max_open
self._proxies = {}
# Note if using SQLite :memory: trick index filename, this will
# give $PWD as the relative path (which is fine).
self._relative_path = os.path.abspath(os.path.dirname(index_filename))
if os.path.isfile(index_filename):
self._load_index()
else:
self._build_index()
def _load_index(self):
"""Called from __init__ to re-use an existing index (PRIVATE)."""
index_filename = self._index_filename
relative_path = self._relative_path
filenames = self._filenames
format = self._format
proxy_factory = self._proxy_factory
con = _sqlite.connect(index_filename)
self._con = con
# Check the count...
try:
count, = con.execute(
"SELECT value FROM meta_data WHERE key=?;",
("count",)).fetchone()
self._length = int(count)
if self._length == -1:
con.close()
raise ValueError("Unfinished/partial database")
count, = con.execute(
"SELECT COUNT(key) FROM offset_data;").fetchone()
if self._length != int(count):
con.close()
raise ValueError("Corrupt database? %i entries not %i"
% (int(count), self._length))
self._format, = con.execute(
"SELECT value FROM meta_data WHERE key=?;",
("format",)).fetchone()
if format and format != self._format:
con.close()
raise ValueError("Index file says format %s, not %s"
% (self._format, format))
try:
filenames_relative_to_index, = con.execute(
"SELECT value FROM meta_data WHERE key=?;",
("filenames_relative_to_index",)).fetchone()
filenames_relative_to_index = (filenames_relative_to_index.upper() == "TRUE")
except TypeError:
# Original behaviour, assume if meta_data missing
filenames_relative_to_index = False
self._filenames = [row[0] for row in
con.execute("SELECT name FROM file_data "
"ORDER BY file_number;").fetchall()]
if filenames_relative_to_index:
# Not implicitly relative to $PWD, explicitly relative to index file
relative_path = os.path.abspath(os.path.dirname(index_filename))
tmp = []
for f in self._filenames:
if os.path.isabs(f):
tmp.append(f)
else:
# Would be stored with Unix / path separator, so convert
# it to the local OS path separator here:
tmp.append(os.path.join(relative_path, f.replace("/", os.path.sep)))
self._filenames = tmp
del tmp
if filenames and len(filenames) != len(self._filenames):
con.close()
raise ValueError("Index file says %i files, not %i"
% (len(self._filenames), len(filenames)))
if filenames and filenames != self._filenames:
for old, new in zip(self._filenames, filenames):
# Want exact match (after making relative to the index above)
if os.path.abspath(old) != os.path.abspath(new):
con.close()
if filenames_relative_to_index:
raise ValueError("Index file has different filenames, e.g. %r != %r"
% (os.path.abspath(old), os.path.abspath(new)))
else:
raise ValueError("Index file has different filenames "
"[This is an old index where any relative paths "
"were relative to the original working directory]. "
"e.g. %r != %r"
% (os.path.abspath(old), os.path.abspath(new)))
# Filenames are equal (after imposing abspath)
except _OperationalError as err:
con.close()
raise ValueError("Not a Biopython index database? %s" % err)
# Now we have the format (from the DB if not given to us),
if not proxy_factory(self._format):
con.close()
raise ValueError("Unsupported format '%s'" % self._format)
def _build_index(self):
"""Called from __init__ to create a new index (PRIVATE)."""
index_filename = self._index_filename
relative_path = self._relative_path
filenames = self._filenames
format = self._format
key_function = self._key_function
proxy_factory = self._proxy_factory
max_open = self._max_open
random_access_proxies = self._proxies
if not format or not filenames:
raise ValueError("Filenames to index and format required to build %r" % index_filename)
if not proxy_factory(format):
raise ValueError("Unsupported format '%s'" % format)
# Create the index
con = _sqlite.connect(index_filename)
self._con = con
# print("Creating index")
# Sqlite PRAGMA settings for speed
con.execute("PRAGMA synchronous=OFF")
con.execute("PRAGMA locking_mode=EXCLUSIVE")
# Don't index the key column until the end (faster)
# con.execute("CREATE TABLE offset_data (key TEXT PRIMARY KEY, "
# "offset INTEGER);")
con.execute("CREATE TABLE meta_data (key TEXT, value TEXT);")
con.execute("INSERT INTO meta_data (key, value) VALUES (?,?);",
("count", -1))
con.execute("INSERT INTO meta_data (key, value) VALUES (?,?);",
("format", format))
con.execute("INSERT INTO meta_data (key, value) VALUES (?,?);",
("filenames_relative_to_index", "True"))
# TODO - Record the alphabet?
# TODO - Record the file size and modified date?
con.execute(
"CREATE TABLE file_data (file_number INTEGER, name TEXT);")
con.execute("CREATE TABLE offset_data (key TEXT, file_number INTEGER, offset INTEGER, length INTEGER);")
count = 0
for i, filename in enumerate(filenames):
# Default to storing as an absolute path,
f = os.path.abspath(filename)
if not os.path.isabs(filename) and not os.path.isabs(index_filename):
# Since user gave BOTH filename & index as relative paths,
# we will store this relative to the index file even though
# if it may now start ../ (meaning up a level)
# Note for cross platfrom use (e.g. shared data drive over SAMBA),
# convert any Windows slash into Unix style / for relative paths.
f = os.path.relpath(filename, relative_path).replace(os.path.sep, "/")
elif (os.path.dirname(os.path.abspath(filename)) + os.path.sep).startswith(relative_path + os.path.sep):
# Since sequence file is in same directory or sub directory,
# might as well make this into a relative path:
f = os.path.relpath(filename, relative_path).replace(os.path.sep, "/")
assert not f.startswith("../"), f
# print("DEBUG - storing %r as [%r] %r" % (filename, relative_path, f))
con.execute(
"INSERT INTO file_data (file_number, name) VALUES (?,?);",
(i, f))
random_access_proxy = proxy_factory(format, filename)
if key_function:
offset_iter = ((key_function(k), i, o, l)
for (k, o, l) in random_access_proxy)
else:
offset_iter = ((k, i, o, l)
for (k, o, l) in random_access_proxy)
while True:
batch = list(itertools.islice(offset_iter, 100))
if not batch:
break
# print("Inserting batch of %i offsets, %s ... %s"
# % (len(batch), batch[0][0], batch[-1][0]))
con.executemany(
"INSERT INTO offset_data (key,file_number,offset,length) VALUES (?,?,?,?);",
batch)
con.commit()
count += len(batch)
if len(random_access_proxies) < max_open:
random_access_proxies[i] = random_access_proxy
else:
random_access_proxy._handle.close()
self._length = count
# print("About to index %i entries" % count)
try:
con.execute("CREATE UNIQUE INDEX IF NOT EXISTS "
"key_index ON offset_data(key);")
except _IntegrityError as err:
self._proxies = random_access_proxies
self.close()
con.close()
raise ValueError("Duplicate key? %s" % err)
con.execute("PRAGMA locking_mode=NORMAL")
con.execute("UPDATE meta_data SET value = ? WHERE key = ?;",
(count, "count"))
con.commit()
# print("Index created")
def __repr__(self):
return self._repr
def __contains__(self, key):
return bool(
self._con.execute("SELECT key FROM offset_data WHERE key=?;",
(key,)).fetchone())
def __len__(self):
"""How many records are there?"""
return self._length
# return self._con.execute("SELECT COUNT(key) FROM offset_data;").fetchone()[0]
def __iter__(self):
"""Iterate over the keys."""
for row in self._con.execute("SELECT key FROM offset_data;"):
yield str(row[0])
if hasattr(dict, "iteritems"):
# Python 2, use iteritems but not items etc
# Just need to override this...
def keys(self):
"""Return a list of all the keys (SeqRecord identifiers)."""
return [str(row[0]) for row in
self._con.execute("SELECT key FROM offset_data;").fetchall()]
def __getitem__(self, key):
"""x.__getitem__(y) <==> x[y]"""
# Pass the offset to the proxy
row = self._con.execute(
"SELECT file_number, offset FROM offset_data WHERE key=?;",
(key,)).fetchone()
if not row:
raise KeyError
file_number, offset = row
proxies = self._proxies
if file_number in proxies:
record = proxies[file_number].get(offset)
else:
if len(proxies) >= self._max_open:
# Close an old handle...
proxies.popitem()[1]._handle.close()
# Open a new handle...
proxy = self._proxy_factory(self._format, self._filenames[file_number])
record = proxy.get(offset)
proxies[file_number] = proxy
if self._key_function:
key2 = self._key_function(record.id)
else:
key2 = record.id
if key != key2:
raise ValueError("Key did not match (%s vs %s)" % (key, key2))
return record
def get(self, k, d=None):
"""D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None."""
try:
return self.__getitem__(k)
except KeyError:
return d
def get_raw(self, key):
"""Similar to the get method, but returns the record as a raw string.
If the key is not found, a KeyError exception is raised.
Note that on Python 3 a bytes string is returned, not a typical
unicode string.
**NOTE** - This functionality is not supported for every file format.
"""
# Pass the offset to the proxy
row = self._con.execute(
"SELECT file_number, offset, length FROM offset_data WHERE key=?;",
(key,)).fetchone()
if not row:
raise KeyError
file_number, offset, length = row
proxies = self._proxies
if file_number in proxies:
if length:
# Shortcut if we have the length
h = proxies[file_number]._handle
h.seek(offset)
return h.read(length)
else:
return proxies[file_number].get_raw(offset)
else:
# This code is duplicated from __getitem__ to avoid a function call
if len(proxies) >= self._max_open:
# Close an old handle...
proxies.popitem()[1]._handle.close()
# Open a new handle...
proxy = self._proxy_factory(self._format, self._filenames[file_number])
proxies[file_number] = proxy
if length:
# Shortcut if we have the length
h = proxy._handle
h.seek(offset)
return h.read(length)
else:
return proxy.get_raw(offset)
def close(self):
"""Close any open file handles."""
proxies = self._proxies
while proxies:
proxies.popitem()[1]._handle.close()
|
import glob
import os
import re
import time
import azurelinuxagent.common.logger as logger
import azurelinuxagent.common.utils.shellutil as shellutil
from azurelinuxagent.common.rdma import RDMAHandler
class CentOSRDMAHandler(RDMAHandler):
rdma_user_mode_package_name = 'microsoft-hyper-v-rdma'
rdma_kernel_mode_package_name = 'kmod-microsoft-hyper-v-rdma'
rdma_wrapper_package_name = 'msft-rdma-drivers'
hyper_v_package_name = "hypervkvpd"
hyper_v_package_name_new = "microsoft-hyper-v"
version_major = None
version_minor = None
def __init__(self, distro_version):
v = distro_version.split('.')
if len(v) < 2:
raise Exception('Unexpected centos version: %s' % distro_version)
self.version_major, self.version_minor = v[0], v[1]
def install_driver(self):
"""
Install the KVP daemon and the appropriate RDMA driver package for the
RDMA firmware.
"""
# Check and install the KVP deamon if it not running
time.sleep(10) # give some time for the hv_hvp_daemon to start up.
kvpd_running = RDMAHandler.is_kvp_daemon_running()
logger.info('RDMA: kvp daemon running: %s' % kvpd_running)
if not kvpd_running:
self.check_or_install_kvp_daemon()
time.sleep(10) # wait for post-install reboot or kvp to come up
# Find out RDMA firmware version and see if the existing package needs
# updating or if the package is missing altogether (and install it)
fw_version = RDMAHandler.get_rdma_version()
if not fw_version:
raise Exception('Cannot determine RDMA firmware version')
logger.info("RDMA: found firmware version: {0}".format(fw_version))
fw_version = self.get_int_rdma_version(fw_version)
installed_pkg = self.get_rdma_package_info()
if installed_pkg:
logger.info(
'RDMA: driver package present: {0}'.format(installed_pkg))
if self.is_rdma_package_up_to_date(installed_pkg, fw_version):
logger.info('RDMA: driver package is up-to-date')
return
else:
logger.info('RDMA: driver package needs updating')
self.update_rdma_package(fw_version)
else:
logger.info('RDMA: driver package is NOT installed')
self.update_rdma_package(fw_version)
def is_rdma_package_up_to_date(self, pkg, fw_version):
# Example match (pkg name, -, followed by 3 segments, fw_version and -):
# - pkg=microsoft-hyper-v-rdma-4.1.0.142-20160323.x86_64
# - fw_version=142
pattern = '{0}-(\d+\.){{3,}}({1})-'.format(self.rdma_user_mode_package_name, fw_version)
return re.match(pattern, pkg)
@staticmethod
def get_int_rdma_version(version):
s = version.split('.')
if len(s) == 0:
raise Exception('Unexpected RDMA firmware version: "%s"' % version)
return s[0]
def get_rdma_package_info(self):
"""
Returns the installed rdma package name or None
"""
ret, output = shellutil.run_get_output(
'rpm -q %s' % self.rdma_user_mode_package_name, chk_err=False)
if ret != 0:
return None
return output
def update_rdma_package(self, fw_version):
logger.info("RDMA: updating RDMA packages")
self.refresh_repos()
self.force_install_package(self.rdma_wrapper_package_name)
self.install_rdma_drivers(fw_version)
def force_install_package(self, pkg_name):
"""
Attempts to remove existing package and installs the package
"""
logger.info('RDMA: Force installing package: %s' % pkg_name)
if self.uninstall_package(pkg_name) != 0:
logger.info('RDMA: Erasing package failed but will continue')
if self.install_package(pkg_name) != 0:
raise Exception('Failed to install package "{0}"'.format(pkg_name))
logger.info('RDMA: installation completed: %s' % pkg_name)
@staticmethod
def uninstall_package(pkg_name):
return shellutil.run('yum erase -y -q {0}'.format(pkg_name))
@staticmethod
def install_package(pkg_name):
return shellutil.run('yum install -y -q {0}'.format(pkg_name))
def refresh_repos(self):
logger.info("RDMA: refreshing yum repos")
if shellutil.run('yum clean all') != 0:
raise Exception('Cleaning yum repositories failed')
if shellutil.run('yum updateinfo') != 0:
raise Exception('Failed to act on yum repo update information')
logger.info("RDMA: repositories refreshed")
def install_rdma_drivers(self, fw_version):
"""
Installs the drivers from /opt/rdma/rhel[Major][Minor] directory,
particularly the microsoft-hyper-v-rdma-* kmod-* and (no debuginfo or
src). Tries to uninstall them first.
"""
pkg_dir = '/opt/microsoft/rdma/rhel{0}{1}'.format(
self.version_major, self.version_minor)
logger.info('RDMA: pkgs dir: {0}'.format(pkg_dir))
if not os.path.isdir(pkg_dir):
raise Exception('RDMA packages directory %s is missing' % pkg_dir)
pkgs = os.listdir(pkg_dir)
logger.info('RDMA: found %d files in package directory' % len(pkgs))
# Uninstal KVP daemon first (if exists)
self.uninstall_kvp_driver_package_if_exists()
# Install kernel mode driver (kmod-microsoft-hyper-v-rdma-*)
kmod_pkg = self.get_file_by_pattern(
pkgs, "%s-(\d+\.){3,}(%s)-\d{8}\.x86_64.rpm" % (self.rdma_kernel_mode_package_name, fw_version))
if not kmod_pkg:
raise Exception("RDMA kernel mode package not found")
kmod_pkg_path = os.path.join(pkg_dir, kmod_pkg)
self.uninstall_pkg_and_install_from(
'kernel mode', self.rdma_kernel_mode_package_name, kmod_pkg_path)
# Install user mode driver (microsoft-hyper-v-rdma-*)
umod_pkg = self.get_file_by_pattern(
pkgs, "%s-(\d+\.){3,}(%s)-\d{8}\.x86_64.rpm" % (self.rdma_user_mode_package_name, fw_version))
if not umod_pkg:
raise Exception("RDMA user mode package not found")
umod_pkg_path = os.path.join(pkg_dir, umod_pkg)
self.uninstall_pkg_and_install_from(
'user mode', self.rdma_user_mode_package_name, umod_pkg_path)
logger.info("RDMA: driver packages installed")
if not self.load_driver_module() or not self.is_driver_loaded():
logger.info("RDMA: driver module is not loaded; reboot required")
self.reboot_system()
else:
logger.info("RDMA: kernel module is loaded")
@staticmethod
def get_file_by_pattern(list, pattern):
for l in list:
if re.match(pattern, l):
return l
return None
def uninstall_pkg_and_install_from(self, pkg_type, pkg_name, pkg_path):
logger.info(
"RDMA: Processing {0} driver: {1}".format(pkg_type, pkg_path))
logger.info("RDMA: Try to uninstall existing version: %s" % pkg_name)
if self.uninstall_package(pkg_name) == 0:
logger.info("RDMA: Successfully uninstaled %s" % pkg_name)
logger.info(
"RDMA: Installing {0} package from {1}".format(pkg_type, pkg_path))
if self.install_package(pkg_path) != 0:
raise Exception(
"Failed to install RDMA {0} package".format(pkg_type))
@staticmethod
def is_package_installed(pkg):
"""Runs rpm -q and checks return code to find out if a package
is installed"""
return shellutil.run("rpm -q %s" % pkg, chk_err=False) == 0
def uninstall_kvp_driver_package_if_exists(self):
logger.info('RDMA: deleting existing kvp driver packages')
kvp_pkgs = [self.hyper_v_package_name,
self.hyper_v_package_name_new]
for kvp_pkg in kvp_pkgs:
if not self.is_package_installed(kvp_pkg):
logger.info(
"RDMA: kvp package %s does not exist, skipping" % kvp_pkg)
else:
logger.info('RDMA: erasing kvp package "%s"' % kvp_pkg)
if shellutil.run("yum erase -q -y %s" % kvp_pkg, chk_err=False) == 0:
logger.info("RDMA: successfully erased package")
else:
logger.error("RDMA: failed to erase package")
def check_or_install_kvp_daemon(self):
"""Checks if kvp daemon package is installed, if not installs the
package and reboots the machine.
"""
logger.info("RDMA: Checking kvp daemon packages.")
kvp_pkgs = [self.hyper_v_package_name,
self.hyper_v_package_name_new]
for pkg in kvp_pkgs:
logger.info("RDMA: Checking if package %s installed" % pkg)
installed = self.is_package_installed(pkg)
if installed:
raise Exception('RDMA: package %s is installed, but the kvp daemon is not running' % pkg)
kvp_pkg_to_install=self.hyper_v_package_name
logger.info("RDMA: no kvp drivers installed, will install '%s'" % kvp_pkg_to_install)
logger.info("RDMA: trying to install kvp package '%s'" % kvp_pkg_to_install)
if self.install_package(kvp_pkg_to_install) != 0:
raise Exception("RDMA: failed to install kvp daemon package '%s'" % kvp_pkg_to_install)
logger.info("RDMA: package '%s' successfully installed" % kvp_pkg_to_install)
logger.info("RDMA: Machine will now be rebooted.")
self.reboot_system()
|
from csrv.model import actions
from csrv.model import events
from csrv.model import timing_phases
from csrv.model.actions.subroutines import trace
from csrv.model.cards import card_info
from csrv.model.cards import ice
class TraceForPowerCounter(trace.Trace):
DESCRIPTION = 'Trace 3 - if successful, place 1 power counter on Card01088'
def on_success(self, corp_total, runner_total):
self.card.power_counters += 1
class GiveATag(actions.Action):
DESCRIPTION = 'Give the runner 1 tag'
def resolve(self, response=None, ignore_clicks=False, ignore_all_costs=False):
self.card.power_counters -= 1
self.game.insert_next_phase(
timing_phases.TakeTags(self.game, self.game.runner, 1))
class TakeATag(actions.Action):
DESCRIPTION = 'Take a tag'
def __init__(self, game, player, card=None):
actions.Action.__init__(self, game, player, card=card)
self._is_mandatory = True
self._choice_made = False
def resolve(self, response=None, ignore_clicks=False, ignore_all_costs=False):
self.game.runner.tags += 1
self.card.log('The runner takes a tag from Card01088')
self.card.choice_made()
def choice_made(self):
self._choice_made = True
def is_usable(self):
return not self._choice_made
class JackOut(actions.JackOut):
def __init__(self, game, player, run, card=None):
actions.JackOut.__init__(self, game, player, run, card=card)
self._choice_made = False
def resolve(self, response=None, ignore_clicks=False, ignore_all_costs=False):
actions.JackOut.resolve(
self,
ignore_clicks=ignore_clicks,
ignore_all_costs=ignore_all_costs)
self.card.choice_made()
def choice_made(self):
self._choice_made = True
def is_usable(self):
return not self._choice_made
class Card01088(ice.Ice):
NAME = u'Card01088'
SET = card_info.CORE
NUMBER = 88
SIDE = card_info.CORP
FACTION = card_info.NEWSCORP
INFLUENCE = 2
UNIQUE = False
KEYWORDS = set([
card_info.OBSERVER,
card_info.SENTRY,
card_info.TRACER,
])
COST = 4
IMAGE_SRC = '01088.png'
STRENGTH = 4
WHEN_REZZED_PROVIDES_CHOICES_FOR = {
timing_phases.EncounterIce_3_1: 'encounter_actions',
timing_phases.CorpUseAbilities: 'card01088_abilities',
}
WHEN_INSTALLED_LISTENS = [
events.BeginEncounterIce_3_1
]
def __init__(self, game, player):
ice.Ice.__init__(self, game, player)
self.subroutines = [
TraceForPowerCounter(self.game, self.player, card=self)
]
self._encounter_actions = []
def on_begin_encounter_ice_3_1(self, sender, event):
self._encounter_actions = [
TakeATag(self.game, self.game.runner, card=self),
JackOut(self.game, self.game.runner, self.game.run, card=self)
]
def encounter_actions(self):
if self.game.run and self.game.run.current_ice() == self:
return self._encounter_actions
else:
return []
def choice_made(self):
for action in self._encounter_actions:
action.choice_made()
def card01088_abilities(self):
actions = []
if self.power_counters:
actions.append(GiveATag(self.game, self.player, card=self))
return actions
|
"""
This module implements the Lowess function for nonparametric regression.
Functions:
lowess Fit a smooth nonparametric regression curve to a scatterplot.
For more information, see
William S. Cleveland: "Robust locally weighted regression and smoothing
scatterplots", Journal of the American Statistical Association, December 1979,
volume 74, number 368, pp. 829-836.
William S. Cleveland and Susan J. Devlin: "Locally weighted regression: An
approach to regression analysis by local fitting", Journal of the American
Statistical Association, September 1988, volume 83, number 403, pp. 596-610.
"""
try:
from Numeric import *
from LinearAlgebra import solve_linear_equations
except ImportError, x:
raise ImportError, "This module requires Numeric (precursor to NumPy) with the LinearAlgebra and MLab libraries"
try:
from Bio.Cluster import median
# The function median in Bio.Cluster is faster than the function median
# in Numeric's MLab, as it does not require a full sort.
except ImportError, x:
# Use the median function in Numeric's MLab if Bio.Cluster is not available
try:
from MLab import median
except ImportError, x:
raise ImportError, "This module requires Numeric (precursor to NumPy) with the LinearAlgebra and MLab libraries"
def lowess(x, y, f=2./3., iter=3):
"""lowess(x, y, f=2./3., iter=3) -> yest
Lowess smoother: Robust locally weighted regression.
The lowess function fits a nonparametric regression curve to a scatterplot.
The arrays x and y contain an equal number of elements; each pair
(x[i], y[i]) defines a data point in the scatterplot. The function returns
the estimated (smooth) values of y.
The smoothing span is given by f. A larger value for f will result in a
smoother curve. The number of robustifying iterations is given by iter. The
function will run faster with a smaller number of iterations."""
n = len(x)
r = int(ceil(f*n))
h = [sort(abs(x-x[i]))[r] for i in range(n)]
w = clip(abs(([x]-transpose([x]))/h),0.0,1.0)
w = 1-w*w*w
w = w*w*w
yest = zeros(n,'d')
delta = ones(n,'d')
for iteration in range(iter):
for i in range(n):
weights = delta * w[:,i]
b = array([sum(weights*y), sum(weights*y*x)])
A = array([[sum(weights), sum(weights*x)],
[sum(weights*x), sum(weights*x*x)]])
beta = solve_linear_equations(A,b)
yest[i] = beta[0] + beta[1]*x[i]
residuals = y-yest
s = median(abs(residuals))
delta = clip(residuals/(6*s),-1,1)
delta = 1-delta*delta
delta = delta*delta
return yest
|
"""Storage Execution Classes."""
class StorageExecution:
def __init__(self):
"""Initalize and run."""
import sys
import os
import json
# print "os.getcwd(): %s" % os.getcwd()
self.storage_cmd = self.get_args()
sys.path.append(self.storage_cmd.HOME_FOLDER)
# from pprint import pprint
# pprint(self.storage_cmd)
# Load storage_templates.json
template_path = os.path.join(
self.storage_cmd.HOME_FOLDER,
self.storage_cmd.DEFAULT_TEMPLATE_FILE
)
with open(template_path, 'r') as f:
self.templates = json.load(f)
# pprint(self.templates)
if self.storage_cmd.ACTION.lower() == "upload":
self.upload_content()
elif self.storage_cmd.ACTION.lower() == "remove":
self.remove_content()
elif self.storage_cmd.ACTION.lower() == "list":
self.list_content()
elif self.storage_cmd.ACTION.lower() == "size":
self.check_size_content()
else:
print "ACTION %s is not defined." % self.storage_cmd.ACTION
print "Available actions are UPLOAD, REMOVE, LIST, SIZE"
exit(1)
def get_args(self):
"""Get arguments to script."""
import argparse
parser_object = argparse.ArgumentParser()
parser_object.add_argument(
'-o', '--OBJECTIVES', '--TARGETS',
type=str,
help='Objectives to encrypt',
required=False
)
parser_object.add_argument(
'-w', '--WORK_FOLDER',
type=str,
help='Backup working directory.',
required=False
)
parser_object.add_argument(
'-D', '--DESTINATION',
type=str,
help='Backup destination: local, s3, oss, etc',
required=True)
parser_object.add_argument(
'-A', '--ACTION',
type=str,
help='Storage Action: upload, list or remove.',
required=False,
default='Upload')
parser_object.add_argument(
'-H', '--HOME_FOLDER',
type=str,
help='Path to the nc-backup-py folder',
required=True)
parser_object.add_argument(
'-hn', '--HOSTNAME',
type=str,
help='Server name (client Host Name) e.g: nc-backup-kr',
required=True)
parser_object.add_argument(
'-u', '--UPLOAD_COMMAND_TEMPLATE',
type=str,
help='Upload command template.',
required=False)
parser_object.add_argument(
'-l', '--LS_COMMAND_TEMPLATE',
type=str,
help='List Command Template',
required=False)
parser_object.add_argument(
'-r', '--RM_COMMAND_TEMPLATE',
type=str,
help='List Command Template',
required=False)
parser_object.add_argument(
'-R', '--REMOVE_OBJECTIVES',
'--REMOVE_TARGETS',
type=str,
help='Remove Encrypted files and folder after execution',
required=False)
parser_object.add_argument(
'--ARGS_DICT',
type=str,
help='ARGS Dictionary',
required=False,
default="{}")
parser_object.add_argument(
'--DEFAULT_TEMPLATE_FILE',
type=str,
help='Default Templates File',
required=False,
default="storage/storage_templates.json")
args_list, unknown = parser_object.parse_known_args()
return args_list
def iterate_result(self, uploads_to_cloud):
succesful = 0
count_file = 1
print uploads_to_cloud
for upload_to_cloud in uploads_to_cloud:
if upload_to_cloud[0] is not 0:
print 'upload of file number ' + str(
count_file) + ' failed to upload.'
exit(1)
count_file = count_file + 1
def execute(self):
print "calling %s command to upload." % self.storage_cmd.DESTINATION
from storages import Storage
command = Storage(self.storage_cmd)
command_output = command.execute()
if command_output:
StorageExecution.iterate_result(StorageExecution(), command_output)
else:
print 'Executing %s retuned a None result' % self.storage_cmd.DESTINATION
def list_content(self):
"""Set defaults for list if not present."""
print "Listing directory content"
def upload_content(self):
"""Set defaults for upload_command if not present."""
if not self.storage_cmd.REMOVE_OBJECTIVES:
self.storage_cmd.REMOVE_OBJECTIVES = 'True'
print "WARNING: REMOVE_OBJECTIVES is missing, default is true."
if not self.storage_cmd.OBJECTIVES:
self.storage_cmd.OBJECTIVES = '/opt/backup/encrypted'
print 'Storage Destination: ' + self.storage_cmd.DESTINATION
if self.storage_cmd.UPLOAD_COMMAND_TEMPLATE:
print "Using template %s " % self.storage_cmd.UPLOAD_COMMAND_TEMPLATE
else:
print "Trying to set default %s storage upload functions" % self.storage_cmd.DESTINATION
if str(self.storage_cmd.DESTINATION) not in ['s3', 'oss', 'ssh', 'local']:
print ('Upload command template for not available storage destination: %s ' % self.storage_cmd.DESTINATION)
print 'You can use your own template for the upload command. See:'
print '<add link to docs>'
exit(1)
else:
self.storage_cmd.UPLOAD_COMMAND_TEMPLATE = self.templates[self.storage_cmd.DESTINATION]['UPLOAD_COMMAND_TEMPLATE']
print "Sucessfully set Upload command template for %s." % self.storage_cmd.DESTINATION
def remove_content(self):
"""Set defaults for remove if not present."""
print 'General: removing files from storage'
def check_size_content(self):
"""Set defaults for size if not present."""
print 'checking the files size'
if __name__ == "__main__":
executor = StorageExecution()
executor.execute()
|
"""Tests for the flow."""
import time
from grr.lib import server_plugins
from grr.client import actions
from grr.client import vfs
from grr.lib import access_control
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import data_store
from grr.lib import flags
from grr.lib import flow
from grr.lib import flow_runner
from grr.lib import queue_manager
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib import type_info
from grr.lib import utils
from grr.proto import tests_pb2
class FlowResponseSerialization(flow.GRRFlow):
"""Demonstrate saving responses in the flow."""
@flow.StateHandler(next_state="Response1")
def Start(self, unused_message=None):
self.CallClient("ReturnBlob",
rdfvalue.EchoRequest(data="test"),
next_state="Response1")
@flow.StateHandler(next_state="Response2")
def Response1(self, messages):
"""Record the message id for testing."""
self.state.Register("messages", messages)
self.CallClient("ReturnBlob",
rdfvalue.EchoRequest(data="test"),
next_state="Response2")
@flow.StateHandler()
def Response2(self, messages):
# We need to receive one response and it must be the same as that stored in
# the previous state.
if (len(list(messages)) != 1 or
messages.status.status != rdfvalue.GrrStatus.ReturnedStatus.OK or
list(messages) != list(self.state.messages)):
raise RuntimeError("Messages not serialized")
class NoRequestChildFlow(flow.GRRFlow):
"""This flow just returns and does not generate any requests."""
@flow.StateHandler()
def Start(self, unused_message):
return
class CallClientChildFlow(flow.GRRFlow):
"""This flow just returns and does not generate any requests."""
@flow.StateHandler()
def Start(self, unused_message):
self.CallClient("GetClientStats", next_state="End")
class NoRequestParentFlow(flow.GRRFlow):
child_flow = "NoRequestChildFlow"
@flow.StateHandler(next_state="End")
def Start(self, unused_message):
self.CallFlow(self.child_flow, next_state="End")
@flow.StateHandler()
def End(self, unused_message):
pass
class CallClientParentFlow(NoRequestParentFlow):
child_flow = "CallClientChildFlow"
class AdminOnlyChildFlow(CallClientChildFlow):
AUTHORIZED_LABELS = ["admin"]
class AdminOnlyParentFlow(NoRequestParentFlow):
child_flow = "AdminOnlyChildFlow"
class BasicFlowTest(test_lib.FlowTestsBaseclass):
pass
class FlowCreationTest(BasicFlowTest):
"""Test flow creation."""
def testInvalidClientId(self):
"""Should raise if the client_id is invalid."""
self.assertRaises(ValueError, flow.GRRFlow.StartFlow,
client_id="hello", flow_name="FlowOrderTest",
token=self.token)
def testUnknownArg(self):
"""Check that flows reject unknown args."""
self.assertRaises(type_info.UnknownArg, flow.GRRFlow.StartFlow,
client_id=self.client_id, flow_name="FlowOrderTest",
token=self.token, foobar=1)
def testTypeAttributeIsNotAppendedWhenFlowIsClosed(self):
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="FlowOrderTest", token=self.token)
flow_obj = aff4.FACTORY.Open(session_id, aff4_type="FlowOrderTest",
age=aff4.ALL_TIMES, mode="rw",
token=self.token)
flow_obj.Close()
flow_obj = aff4.FACTORY.Open(session_id, aff4_type="FlowOrderTest",
age=aff4.ALL_TIMES, token=self.token)
types = list(flow_obj.GetValuesForAttribute(flow_obj.Schema.TYPE))
self.assertEqual(len(types), 1)
def testFlowSerialization(self):
"""Check that we can unpickle flows."""
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="FlowOrderTest", token=self.token)
flow_obj = aff4.FACTORY.Open(session_id, aff4_type="FlowOrderTest",
age=aff4.ALL_TIMES, token=self.token)
self.assertEqual(flow_obj.__class__, test_lib.FlowOrderTest)
def testFlowSerialization2(self):
"""Check that we can unpickle flows."""
class TestClientMock(object):
in_rdfvalue = rdfvalue.EchoRequest
out_rdfvalue = rdfvalue.DataBlob
def __init__(self):
# Register us as an action plugin.
actions.ActionPlugin.classes["ReturnBlob"] = self
def ReturnBlob(self, unused_args):
return [rdfvalue.DataBlob(integer=100)]
# Run the flow in the simulated way
for _ in test_lib.TestFlowHelper("FlowResponseSerialization",
TestClientMock(), token=self.token,
client_id=self.client_id):
pass
def testTerminate(self):
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="FlowOrderTest", token=self.token)
flow.GRRFlow.TerminateFlow(session_id, token=self.token)
flow_obj = aff4.FACTORY.Open(session_id, aff4_type="FlowOrderTest",
age=aff4.ALL_TIMES, token=self.token)
runner = flow_obj.GetRunner()
self.assertEqual(runner.IsRunning(), False)
self.assertEqual(runner.context.state,
rdfvalue.Flow.State.ERROR)
reason = "no reason"
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="FlowOrderTest", token=self.token)
flow.GRRFlow.TerminateFlow(session_id, reason=reason, token=self.token)
flow_obj = aff4.FACTORY.Open(session_id, aff4_type="FlowOrderTest",
age=aff4.ALL_TIMES, token=self.token)
runner = flow_obj.GetRunner()
self.assertEqual(runner.IsRunning(), False)
self.assertEqual(runner.context.state,
rdfvalue.Flow.State.ERROR)
self.assertTrue(reason in runner.context.status)
def testChildTermination(self):
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="CallClientParentFlow",
token=self.token)
# The child URN should be contained within the parent session_id URN.
flow_obj = aff4.FACTORY.Open(session_id, token=self.token)
children = list(flow_obj.ListChildren())
self.assertEqual(len(children), 1)
reason = "just so"
flow.GRRFlow.TerminateFlow(session_id, reason=reason, token=self.token)
flow_obj = aff4.FACTORY.Open(session_id,
aff4_type="CallClientParentFlow",
token=self.token)
runner = flow_obj.GetRunner()
self.assertEqual(runner.IsRunning(), False)
self.assertEqual(runner.context.state,
rdfvalue.Flow.State.ERROR)
self.assertTrue("user test" in runner.context.status)
self.assertTrue(reason in runner.context.status)
child = aff4.FACTORY.Open(children[0],
aff4_type="CallClientChildFlow",
token=self.token)
runner = child.GetRunner()
self.assertEqual(runner.IsRunning(), False)
self.assertEqual(runner.context.state,
rdfvalue.Flow.State.ERROR)
self.assertTrue("user test" in runner.context.status)
self.assertTrue("Parent flow terminated." in runner.context.status)
def testNotification(self):
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="FlowOrderTest", token=self.token)
with aff4.FACTORY.Open(session_id, aff4_type="FlowOrderTest",
age=aff4.ALL_TIMES, mode="rw",
token=self.token) as flow_obj:
msg = "Flow terminated due to error"
flow_obj.GetRunner().Notify("FlowStatus", session_id, msg)
user_fd = aff4.FACTORY.Open(rdfvalue.RDFURN("aff4:/users").Add(
self.token.username), mode="r", token=self.token)
notifications = user_fd.ShowNotifications(reset=False)
self.assertEqual(len(notifications), 1)
for notification in notifications:
self.assertTrue(notification.message.endswith(": " + msg))
self.assertEqual(notification.subject, rdfvalue.RDFURN(session_id))
def testFormatstringNotification(self):
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="FlowOrderTest", token=self.token)
with aff4.FACTORY.Open(session_id, aff4_type="FlowOrderTest",
age=aff4.ALL_TIMES, mode="rw",
token=self.token) as flow_obj:
runner = flow_obj.GetRunner()
# msg contains %s.
msg = "Flow reading %system% terminated due to error"
runner.Notify("FlowStatus", session_id, msg)
runner.Status(msg)
def testSendRepliesAttribute(self):
# Run the flow in the simulated way. Child's send_replies is set to False.
# Parent flow will raise if number of responses is > 0.
for _ in test_lib.TestFlowHelper(
"ParentFlowWithoutResponses", ClientMock(), client_id=self.client_id,
check_flow_errors=False, token=self.token,):
pass
self.assertEqual(ParentFlowWithoutResponses.success, True)
notifications = {}
def CollectNotifications(self, queue, notifications, **kwargs):
now = time.time()
for notification in notifications:
self.notifications.setdefault(notification.session_id, []).append(now)
self.old_notify(queue, notifications, **kwargs)
def testNoRequestChildFlowRace(self):
manager = queue_manager.QueueManager(token=self.token)
self.old_notify = manager._MultiNotifyQueue
with utils.Stubber(queue_manager.QueueManager, "_MultiNotifyQueue",
self.CollectNotifications):
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="NoRequestParentFlow",
token=self.token)
self.assertIn(session_id, self.notifications)
f = aff4.FACTORY.Open(session_id, token=self.token)
# Check that the first notification came in after the flow was created.
self.assertLess(int(f.Get(f.Schema.TYPE).age),
1e6 * min(self.notifications[session_id]),
"There was a notification for a flow before "
"the flow was created.")
def testCallClientChildFlowRace(self):
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="CallClientParentFlow",
token=self.token)
client_requests = data_store.DB.ResolveRegex(
self.client_id.Queue(), "task:.*", token=self.token)
self.assertEqual(len(client_requests), 1)
f = aff4.FACTORY.Open(session_id, token=self.token)
for (_, _, timestamp) in client_requests:
# Check that the client request was written after the flow was created.
self.assertLess(int(f.Get(f.Schema.TYPE).age), timestamp,
"The client request was issued before "
"the flow was created.")
def testFlowLogging(self):
"""Check that flows log correctly."""
flow_urn = None
for session_id in test_lib.TestFlowHelper("DummyLogFlow",
action_mocks.ActionMock(),
token=self.token,
client_id=self.client_id):
flow_urn = session_id
with aff4.FACTORY.Open(flow_urn.Add("Logs"), age=aff4.ALL_TIMES,
token=self.token) as log_collection:
count = 0
# Can't use len with PackedVersionCollection
for log in log_collection:
self.assertEqual(log.client_id, self.client_id)
self.assertTrue(log.log_message in ["First", "Second", "Third",
"Fourth", "Uno", "Dos", "Tres",
"Cuatro"])
self.assertTrue(log.flow_name in ["DummyLogFlow",
"DummyLogFlowChild"])
self.assertTrue(str(flow_urn) in str(log.urn))
count += 1
self.assertEqual(count, 8)
class FlowTest(BasicFlowTest):
"""Tests the Flow."""
def testBrokenFlow(self):
"""Check that flows which call to incorrect states raise."""
client_mock = action_mocks.ActionMock("ReadBuffer")
with self.assertRaises(RuntimeError):
for _ in test_lib.TestFlowHelper(
"BrokenFlow", client_mock, client_id=self.client_id,
check_flow_errors=True, token=self.token):
pass
def SendMessages(self, response_ids, session_id, authenticated=True):
"""Send messages to the flow."""
for response_id in response_ids:
message = rdfvalue.GrrMessage(
request_id=1,
response_id=response_id,
session_id=session_id)
if authenticated:
auth_state = rdfvalue.GrrMessage.AuthorizationState.AUTHENTICATED
message.auth_state = auth_state
self.SendMessage(message)
def SendMessage(self, message):
# Now messages are set in the data store
with queue_manager.QueueManager(token=self.token) as manager:
manager.QueueResponse(message.session_id, message)
def SendOKStatus(self, response_id, session_id):
"""Send a message to the flow."""
message = rdfvalue.GrrMessage(
request_id=1,
response_id=response_id,
session_id=session_id,
type=rdfvalue.GrrMessage.Type.STATUS,
auth_state=rdfvalue.GrrMessage.AuthorizationState.AUTHENTICATED)
status = rdfvalue.GrrStatus(status=rdfvalue.GrrStatus.ReturnedStatus.OK)
message.payload = status
self.SendMessage(message)
# Now also set the state on the RequestState
request_state, _ = data_store.DB.Resolve(
message.session_id.Add("state"),
queue_manager.QueueManager.FLOW_REQUEST_TEMPLATE % message.request_id,
token=self.token)
request_state = rdfvalue.RequestState(request_state)
request_state.status = status
data_store.DB.Set(
message.session_id.Add("state"),
queue_manager.QueueManager.FLOW_REQUEST_TEMPLATE % message.request_id,
request_state, token=self.token)
return message
def testReordering(self):
"""Check that out of order client messages are reordered."""
flow_obj = self.FlowSetup("FlowOrderTest")
# Simultate processing messages arriving in random order
message_ids = [2, 1, 4, 3, 5]
self.SendMessages(message_ids, flow_obj.session_id)
# Send the status message
message = self.SendOKStatus(6, flow_obj.session_id)
runner = flow_runner.FlowRunner(flow_obj)
notification = rdfvalue.Notification(timestamp=rdfvalue.RDFDatetime().Now())
runner.ProcessCompletedRequests(notification, [message])
# Check that the messages were processed in order
self.assertEqual(flow_obj.messages, [1, 2, 3, 4, 5])
def testCallClient(self):
"""Flows can send client messages using CallClient()."""
flow_obj = self.FlowSetup("FlowOrderTest")
# Check that a message went out to the client
manager = queue_manager.QueueManager(token=self.token)
tasks = manager.Query(self.client_id, limit=100)
self.assertEqual(len(tasks), 1)
message = tasks[0]
self.assertEqual(message.session_id, flow_obj.session_id)
self.assertEqual(message.request_id, 1)
self.assertEqual(message.name, "Test")
def testCallClientWellKnown(self):
"""Well known flows can also call the client."""
cls = flow.GRRFlow.classes["GetClientStatsAuto"]
flow_obj = cls(cls.well_known_session_id, mode="rw", token=self.token)
flow_obj.CallClient(self.client_id, "GetClientStats")
# Check that a message went out to the client
manager = queue_manager.QueueManager(token=self.token)
tasks = manager.Query(self.client_id, limit=100)
self.assertEqual(len(tasks), 1)
message = tasks[0]
# If we don't specify where to send the replies, they go to the devnull flow
devnull = flow.GRRFlow.classes["IgnoreResponses"]
self.assertEqual(message.session_id, devnull.well_known_session_id)
self.assertEqual(message.request_id, 0)
self.assertEqual(message.name, "GetClientStats")
messages = []
def StoreMessage(_, msg):
messages.append(msg)
with utils.Stubber(devnull, "ProcessMessage", StoreMessage):
client_mock = action_mocks.ActionMock("GetClientStats")
for _ in test_lib.TestFlowHelper(
"ClientActionRunner", client_mock, client_id=self.client_id,
action="GetClientStats", token=self.token):
pass
# Make sure the messages arrived.
self.assertEqual(len(messages), 1)
def testAuthentication1(self):
"""Test that flows refuse to processes unauthenticated messages."""
flow_obj = self.FlowSetup("FlowOrderTest")
# Simultate processing messages arriving in random order
message_ids = [2, 1, 4, 3, 5]
self.SendMessages(message_ids, flow_obj.session_id,
authenticated=False)
# Send the status message
message = self.SendOKStatus(6, flow_obj.session_id)
runner = flow_runner.FlowRunner(flow_obj)
notification = rdfvalue.Notification(timestamp=rdfvalue.RDFDatetime().Now())
runner.ProcessCompletedRequests(notification, [message])
# Now messages should actually be processed
self.assertEqual(flow_obj.messages, [])
def testAuthentication2(self):
"""Test that flows refuse to processes unauthenticated messages.
Here we try to simulate an attacker injecting unauthenticated
messages midstream.
The current implementation actually fails to process the entire
flow since the injected messages displace the real ones if they
arrive earlier. This can be an effective DoS against legitimate
clients but would require attackers to guess session ids.
"""
flow_obj = self.FlowSetup("FlowOrderTest")
# Simultate processing messages arriving in random order
message_ids = [1, 2]
self.SendMessages(message_ids, flow_obj.session_id,
authenticated=True)
# Now suppose some of the messages are spoofed
message_ids = [3, 4, 5]
self.SendMessages(message_ids, flow_obj.session_id,
authenticated=False)
# And now our real messages arrive
message_ids = [5, 6]
self.SendMessages(message_ids, flow_obj.session_id,
authenticated=True)
# Send the status message
message = self.SendOKStatus(7, flow_obj.session_id)
runner = flow_runner.FlowRunner(flow_obj)
notification = rdfvalue.Notification(timestamp=rdfvalue.RDFDatetime().Now())
runner.ProcessCompletedRequests(notification, [message])
# Some messages should actually be processed
self.assertEqual(flow_obj.messages, [1, 2, 5, 6])
def testWellKnownFlows(self):
"""Test the well known flows."""
test_flow = self.FlowSetup("WellKnownSessionTest")
# Make sure the session ID is well known
self.assertEqual(test_flow.session_id,
test_lib.WellKnownSessionTest.well_known_session_id)
# Messages to Well Known flows can be unauthenticated
messages = [rdfvalue.GrrMessage(args=str(i)) for i in range(10)]
for message in messages:
test_flow.ProcessMessage(message)
# The messages might be processed in arbitrary order
test_flow.messages.sort()
# Make sure that messages were processed even without a status
# message to complete the transaction (Well known flows do not
# have transactions or states - all messages always get to the
# ProcessMessage method):
self.assertEqual(test_flow.messages, range(10))
def testArgParsing(self):
"""Test that arguments can be extracted and annotated successfully."""
# Should raise on parsing default.
self.assertRaises(type_info.TypeValueError, flow.GRRFlow.StartFlow,
client_id=self.client_id, flow_name="BadArgsFlow1",
arg1=False, token=self.token)
# Should not raise now if we provide the correct type.
flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="BadArgsFlow1",
arg1=rdfvalue.PathSpec(), token=self.token)
class NoClientListener(flow.EventListener): # pylint: disable=unused-variable
well_known_session_id = rdfvalue.SessionID(flow_name="test2")
EVENTS = ["TestEvent"]
received_events = []
@flow.EventHandler(auth_required=True)
def ProcessMessage(self, message=None, event=None):
# Store the results for later inspection.
self.__class__.received_events.append((message, event))
class ClientListener(flow.EventListener):
well_known_session_id = rdfvalue.SessionID(flow_name="test3")
EVENTS = ["TestEvent"]
received_events = []
@flow.EventHandler(auth_required=True, allow_client_access=True)
def ProcessMessage(self, message=None, event=None):
# Store the results for later inspection.
self.__class__.received_events.append((message, event))
class FlowDoneListener(flow.EventListener):
well_known_session_id = rdfvalue.SessionID(queue=rdfvalue.RDFURN("EV"),
flow_name="FlowDone")
EVENTS = ["Not used"]
received_events = []
@flow.EventHandler(auth_required=True)
def ProcessMessage(self, message=None, event=None):
_ = event
# Store the results for later inspection.
FlowDoneListener.received_events.append(message)
class GeneralFlowsTest(BasicFlowTest):
"""Tests some flows."""
def testCallState(self):
"""Test the ability to chain flows."""
CallStateFlow.success = False
# Run the flow in the simulated way
for _ in test_lib.TestFlowHelper("CallStateFlow", ClientMock(),
client_id=self.client_id,
token=self.token):
pass
self.assertEqual(CallStateFlow.success, True)
def Work(self, client_mock, worker_mock):
while True:
client_processed = client_mock.Next()
flows_run = []
for flow_run in worker_mock.Next():
flows_run.append(flow_run)
if client_processed == 0 and not flows_run:
break
def testDelayedCallState(self):
"""Tests the ability to delay a CallState invocation."""
with test_lib.FakeTime(10000):
client_mock = ClientMock()
client_mock = test_lib.MockClient(self.client_id, client_mock,
token=self.token)
worker_mock = test_lib.MockWorker(check_flow_errors=True,
token=self.token)
flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="DelayedCallStateFlow",
token=self.token)
self.Work(client_mock, worker_mock)
# We should have done the first CallState so far.
self.assertEqual(DelayedCallStateFlow.flow_ran, 1)
with test_lib.FakeTime(10050):
# 50 seconds more is not enough.
self.Work(client_mock, worker_mock)
self.assertEqual(DelayedCallStateFlow.flow_ran, 1)
with test_lib.FakeTime(10100):
# But 100 is.
self.Work(client_mock, worker_mock)
self.assertEqual(DelayedCallStateFlow.flow_ran, 2)
def testChainedFlow(self):
"""Test the ability to chain flows."""
ParentFlow.success = False
# Run the flow in the simulated way
for _ in test_lib.TestFlowHelper("ParentFlow", ClientMock(),
client_id=self.client_id,
token=self.token):
pass
self.assertEqual(ParentFlow.success, True)
def testCreatorPropagation(self):
# Instantiate the flow using one username.
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="ParentFlow", sync=False,
token=access_control.ACLToken(username="original_user",
reason="testing"))
# Run the flow using another user ("test").
for _ in test_lib.TestFlowHelper(session_id, ClientMock(),
client_id=self.client_id,
token=self.token):
pass
self.assertEqual(ParentFlow.success, True)
subflows = list(aff4.FACTORY.Open(
session_id, token=self.token).ListChildren())
self.assertEqual(len(subflows), 1)
child_flow = aff4.FACTORY.Open(subflows[0], token=self.token)
self.assertEqual(child_flow.GetRunner().context.creator, "original_user")
def testFlowLabelChecking(self):
self.CreateUser("noadmin")
noadmin_token = access_control.ACLToken(username="noadmin",
reason="testing")
with self.assertRaises(access_control.UnauthorizedAccess):
for _ in test_lib.TestFlowHelper("AdminOnlyChildFlow", ClientMock(),
client_id=self.client_id,
token=noadmin_token, sync=False):
pass
with self.assertRaises(RuntimeError):
for _ in test_lib.TestFlowHelper("AdminOnlyParentFlow", ClientMock(),
client_id=self.client_id,
token=noadmin_token, sync=False):
pass
self.CreateAdminUser("adminuser")
admin_token = access_control.ACLToken(username="adminuser",
reason="testing")
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="AdminOnlyChildFlow", sync=False,
token=admin_token)
for _ in test_lib.TestFlowHelper(session_id, ClientMock(),
client_id=self.client_id,
token=noadmin_token):
pass
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="AdminOnlyParentFlow", sync=False,
token=admin_token)
for _ in test_lib.TestFlowHelper(session_id, ClientMock(),
client_id=self.client_id,
token=noadmin_token):
pass
def testBrokenChainedFlow(self):
"""Test that exceptions are properly handled in chain flows."""
BrokenParentFlow.success = False
# Run the flow in the simulated way
for _ in test_lib.TestFlowHelper(
"BrokenParentFlow", ClientMock(), client_id=self.client_id,
check_flow_errors=False, token=self.token):
pass
self.assertEqual(BrokenParentFlow.success, True)
def testIteratedDirectoryListing(self):
"""Test that the client iterator works."""
# Install the mock
vfs.VFS_HANDLERS[rdfvalue.PathSpec.PathType.OS] = MockVFSHandler
path = "/"
# Run the flow in the simulated way
client_mock = action_mocks.ActionMock("IteratedListDirectory")
for _ in test_lib.TestFlowHelper(
"IteratedListDirectory", client_mock, client_id=self.client_id,
pathspec=rdfvalue.PathSpec(path="/",
pathtype=rdfvalue.PathSpec.PathType.OS),
token=self.token):
pass
fd = aff4.FACTORY.Open(self.client_id.Add("fs/os").Add(path),
token=self.token)
directory = [ch for ch in fd.OpenChildren()]
pb = rdfvalue.PathSpec(path=path,
pathtype=rdfvalue.PathSpec.PathType.OS)
directory2 = list(vfs.VFSOpen(pb).ListFiles())
directory.sort()
result = [x.Get(x.Schema.STAT) for x in directory]
# Make sure that the resulting directory is what it should be
for x, y in zip(result, directory2):
x.aff4path = None
self.assertEqual(x.st_mode, y.st_mode)
self.assertProtoEqual(x, y)
def testClientEventNotification(self):
"""Make sure that client events handled securely."""
ClientListener.received_events = []
NoClientListener.received_events = []
event = rdfvalue.GrrMessage(
source="C.1395c448a443c7d9",
auth_state=rdfvalue.GrrMessage.AuthorizationState.AUTHENTICATED)
event.payload = rdfvalue.PathSpec(path="foobar")
flow.Events.PublishEvent("TestEvent", event, token=self.token)
test_lib.MockWorker(token=self.token).Simulate()
# The same event should be sent to both listeners, but only the listener
# which accepts client messages should register it.
self.assertProtoEqual(ClientListener.received_events[0][0].payload,
event.payload)
self.assertEqual(NoClientListener.received_events, [])
def testFlowNotification(self):
FlowDoneListener.received_events = []
# Install the mock
vfs.VFS_HANDLERS[rdfvalue.PathSpec.PathType.OS] = MockVFSHandler
path = rdfvalue.PathSpec(path="/",
pathtype=rdfvalue.PathSpec.PathType.OS)
# Run the flow in the simulated way
client_mock = action_mocks.ActionMock("IteratedListDirectory")
for _ in test_lib.TestFlowHelper(
"IteratedListDirectory", client_mock, client_id=self.client_id,
notification_urn=rdfvalue.SessionID(queue=rdfvalue.RDFURN("EV"),
flow_name="FlowDone"),
pathspec=path, token=self.token):
pass
# The event goes to an external queue so we need another worker.
worker = test_lib.MockWorker(queues=[rdfvalue.RDFURN("EV")],
token=self.token)
worker.Simulate()
self.assertEqual(len(FlowDoneListener.received_events), 1)
flow_event = FlowDoneListener.received_events[0].payload
self.assertEqual(flow_event.flow_name, "IteratedListDirectory")
self.assertEqual(flow_event.client_id, "aff4:/C.1000000000000000")
self.assertEqual(flow_event.status, rdfvalue.FlowNotification.Status.OK)
def testEventNotification(self):
"""Test that events are sent to listeners."""
NoClientListener.received_events = []
worker = test_lib.MockWorker(token=self.token)
event = rdfvalue.GrrMessage(
session_id=rdfvalue.SessionID(flow_name="SomeFlow"),
name="test message",
payload=rdfvalue.PathSpec(path="foobar", pathtype="TSK"),
source="aff4:/C.0000000000000001", auth_state="AUTHENTICATED")
# Not allowed to publish a message from a client..
flow.Events.PublishEvent("TestEvent", event, token=self.token)
worker.Simulate()
self.assertEqual(NoClientListener.received_events, [])
event.source = "Source"
# First make the message unauthenticated.
event.auth_state = rdfvalue.GrrMessage.AuthorizationState.UNAUTHENTICATED
# Publish the event.
flow.Events.PublishEvent("TestEvent", event, token=self.token)
worker.Simulate()
# This should not work - the unauthenticated message is dropped.
self.assertEqual(NoClientListener.received_events, [])
# Now make the message authenticated.
event.auth_state = rdfvalue.GrrMessage.AuthorizationState.AUTHENTICATED
# Publish the event.
flow.Events.PublishEvent("TestEvent", event, token=self.token)
worker.Simulate()
# This should now work:
self.assertEqual(len(NoClientListener.received_events), 1)
# Make sure the source is correctly propagated.
self.assertEqual(NoClientListener.received_events[0][0].source,
"aff4:/Source")
self.assertEqual(NoClientListener.received_events[0][1].path, "foobar")
NoClientListener.received_events = []
# Now schedule ten events at the same time.
for i in xrange(10):
event.source = "Source%d" % i
flow.Events.PublishEvent("TestEvent", event, token=self.token)
worker.Simulate()
self.assertEqual(len(NoClientListener.received_events), 10)
# Events do not have to be delivered in order so we sort them here for
# comparison.
NoClientListener.received_events.sort(key=lambda x: x[0].source)
for i in range(10):
self.assertEqual(NoClientListener.received_events[i][0].source,
"aff4:/Source%d" % i)
self.assertEqual(NoClientListener.received_events[i][1].path, "foobar")
def testClientPrioritization(self):
"""Test that flow priorities work on the client side."""
result = []
client_mock = PriorityClientMock(result)
client_mock = test_lib.MockClient(self.client_id, client_mock,
token=self.token)
worker_mock = test_lib.MockWorker(check_flow_errors=True,
token=self.token)
# Start some flows with different priorities.
args = [(rdfvalue.GrrMessage.Priority.LOW_PRIORITY, "low priority"),
(rdfvalue.GrrMessage.Priority.MEDIUM_PRIORITY, "medium priority"),
(rdfvalue.GrrMessage.Priority.LOW_PRIORITY, "low priority2"),
(rdfvalue.GrrMessage.Priority.HIGH_PRIORITY, "high priority"),
(rdfvalue.GrrMessage.Priority.MEDIUM_PRIORITY, "medium priority2")]
for (priority, msg) in args:
flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="PriorityFlow", msg=msg,
priority=priority, token=self.token)
while True:
client_processed = client_mock.Next()
flows_run = []
for flow_run in worker_mock.Next():
flows_run.append(flow_run)
if client_processed == 0 and not flows_run:
break
# The flows should be run in order of priority.
self.assertEqual(result[0:1],
[u"high priority"])
self.assertEqual(sorted(result[1:3]),
[u"medium priority", u"medium priority2"])
self.assertEqual(sorted(result[3:5]),
[u"low priority", u"low priority2"])
def testWorkerPrioritization(self):
"""Test that flow priorities work on the worker side."""
result = []
client_mock = PriorityClientMock(result)
client_mock = test_lib.MockClient(self.client_id, client_mock,
token=self.token)
worker_mock = test_lib.MockWorker(check_flow_errors=True,
token=self.token)
# Start some flows with different priorities.
args = [(rdfvalue.GrrMessage.Priority.LOW_PRIORITY, "low priority"),
(rdfvalue.GrrMessage.Priority.MEDIUM_PRIORITY, "medium priority"),
(rdfvalue.GrrMessage.Priority.LOW_PRIORITY, "low priority2"),
(rdfvalue.GrrMessage.Priority.HIGH_PRIORITY, "high priority"),
(rdfvalue.GrrMessage.Priority.MEDIUM_PRIORITY, "medium priority2")]
server_result = []
PriorityFlow.storage = server_result
for (priority, msg) in args:
flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="PriorityFlow", msg=msg,
priority=priority, token=self.token)
while True:
# Run all the clients first so workers have messages to choose from.
client_processed = 1
while client_processed:
client_processed = client_mock.Next()
# Now process the results, this should happen in the correct order.
flows_run = []
for flow_run in worker_mock.Next():
flows_run.append(flow_run)
if not flows_run:
break
# The flows should be run in order of priority.
self.assertEqual(server_result[0:1],
[u"high priority"])
self.assertEqual(sorted(server_result[1:3]),
[u"medium priority", u"medium priority2"])
self.assertEqual(sorted(server_result[3:5]),
[u"low priority", u"low priority2"])
class ResourcedWorker(test_lib.MockWorker):
USER_CPU = [1, 20, 5, 16]
SYSTEM_CPU = [4, 20, 2, 8]
NETWORK_BYTES = [180, 1000, 580, 2000]
class FlowLimitTests(BasicFlowTest):
def RunFlow(self, flow_name, **kwargs):
result = {}
client_mock = CPULimitClientMock(result)
client_mock = test_lib.MockClient(self.client_id, client_mock,
token=self.token)
worker_mock = ResourcedWorker(check_flow_errors=True,
token=self.token)
flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name=flow_name,
token=self.token, **kwargs)
while True:
client_processed = client_mock.Next()
flows_run = []
for flow_run in worker_mock.Next():
flows_run.append(flow_run)
if client_processed == 0 and not flows_run:
break
return result
def testNetworkLimit(self):
"""Tests that the network limit works."""
result = self.RunFlow("NetworkLimitFlow", network_bytes_limit=10000)
self.assertEqual(result["networklimit"], [10000, 9820, 8820, 8240])
def testCPULimit(self):
"""Tests that the cpu limit works."""
result = self.RunFlow("CPULimitFlow", cpu_limit=300)
self.assertEqual(result["cpulimit"], [300, 295, 255])
class MockVFSHandler(vfs.VFSHandler):
"""A mock VFS handler with fake files."""
children = []
for x in range(10):
child = rdfvalue.StatEntry(pathspec=rdfvalue.PathSpec(
path="Foo%s" % x, pathtype=rdfvalue.PathSpec.PathType.OS))
children.append(child)
supported_pathtype = rdfvalue.PathSpec.PathType.OS
def __init__(self, base_fd, pathspec=None, progress_callback=None):
super(MockVFSHandler, self).__init__(
base_fd, pathspec=pathspec, progress_callback=progress_callback)
self.pathspec.Append(pathspec)
def ListFiles(self):
return self.children
def IsDirectory(self):
return self.pathspec.path == "/"
class PriorityClientMock(object):
in_rdfvalue = rdfvalue.DataBlob
def __init__(self, storage):
# Register us as an action plugin.
actions.ActionPlugin.classes["Store"] = self
self.storage = storage
def Store(self, data):
self.storage.append(self.in_rdfvalue(data).string)
return [rdfvalue.DataBlob(string="Hello World")]
class PriorityFlowArgs(rdfvalue.RDFProtoStruct):
protobuf = tests_pb2.PriorityFlowArgs
class PriorityFlow(flow.GRRFlow):
"""This flow is used to test priorities."""
args_type = PriorityFlowArgs
storage = []
@flow.StateHandler(next_state="Done")
def Start(self):
self.CallClient("Store", string=self.args.msg, next_state="Done")
@flow.StateHandler()
def Done(self, responses):
_ = responses
self.storage.append(self.args.msg)
class CPULimitClientMock(object):
in_rdfvalue = rdfvalue.DataBlob
def __init__(self, storage):
# Register us as an action plugin.
actions.ActionPlugin.classes["Store"] = self
self.storage = storage
def HandleMessage(self, message):
self.storage.setdefault("cpulimit", []).append(message.cpu_limit)
self.storage.setdefault("networklimit",
[]).append(message.network_bytes_limit)
class CPULimitFlow(flow.GRRFlow):
"""This flow is used to test the cpu limit."""
@flow.StateHandler(next_state="State1")
def Start(self):
self.CallClient("Store", string="Hey!", next_state="State1")
@flow.StateHandler(next_state="State2")
def State1(self):
self.CallClient("Store", string="Hey!", next_state="State2")
@flow.StateHandler(next_state="Done")
def State2(self):
self.CallClient("Store", string="Hey!", next_state="Done")
@flow.StateHandler()
def Done(self, responses):
pass
class NetworkLimitFlow(flow.GRRFlow):
"""This flow is used to test the network bytes limit."""
@flow.StateHandler(next_state="State1")
def Start(self):
self.CallClient("Store", next_state="State1")
@flow.StateHandler(next_state="State2")
def State1(self):
# The mock worker doesn't track usage so we add it here.
self.CallClient("Store", next_state="State2")
@flow.StateHandler(next_state="State3")
def State2(self):
self.CallClient("Store", next_state="State3")
@flow.StateHandler(next_state="Done")
def State3(self):
self.CallClient("Store", next_state="Done")
@flow.StateHandler()
def Done(self, responses):
pass
class ClientMock(object):
"""Mock of client actions."""
in_rdfvalue = None
out_rdfvalue = rdfvalue.RDFString
def __init__(self):
# Register us as an action plugin.
actions.ActionPlugin.classes["ReturnHello"] = self
def ReturnHello(self, _):
return [rdfvalue.RDFString("Hello World")]
class ChildFlow(flow.GRRFlow):
"""This flow will be called by our parent."""
@flow.StateHandler(next_state="ReceiveHello")
def Start(self):
self.CallClient("ReturnHello", next_state="ReceiveHello")
@flow.StateHandler()
def ReceiveHello(self, responses):
# Relay the client's message to our parent
for response in responses:
self.SendReply(rdfvalue.RDFString("Child received"))
self.SendReply(response)
class BrokenChildFlow(ChildFlow):
"""A broken flow which raises."""
@flow.StateHandler()
def ReceiveHello(self, responses):
raise IOError("Boo")
class ParentFlow(flow.GRRFlow):
"""This flow will launch a child flow."""
# This is a global flag which will be set when the flow runs.
success = False
@flow.StateHandler(next_state="ParentReceiveHello")
def Start(self):
# Call the child flow.
self.CallFlow("ChildFlow",
next_state="ParentReceiveHello")
@flow.StateHandler()
def ParentReceiveHello(self, responses):
responses = list(responses)
if (len(responses) != 2 or "Child" not in unicode(responses[0]) or
"Hello" not in unicode(responses[1])):
raise RuntimeError("Messages not passed to parent")
ParentFlow.success = True
class ParentFlowWithoutResponses(flow.GRRFlow):
"""This flow will launch a child flow."""
success = False
@flow.StateHandler(next_state="ParentReceiveHello")
def Start(self):
# Call the child flow.
self.CallFlow("ChildFlow",
send_replies=False,
next_state="ParentReceiveHello")
@flow.StateHandler()
def ParentReceiveHello(self, responses):
if responses:
raise RuntimeError("Messages are not expected to be passed to parent")
ParentFlowWithoutResponses.success = True
class BrokenParentFlow(flow.GRRFlow):
"""This flow will launch a broken child flow."""
# This is a global flag which will be set when the flow runs.
success = False
@flow.StateHandler(next_state="ReceiveHello")
def Start(self):
# Call the child flow.
self.CallFlow("BrokenChildFlow",
next_state="ReceiveHello")
@flow.StateHandler()
def ReceiveHello(self, responses):
if (responses or
responses.status.status == rdfvalue.GrrStatus.ReturnedStatus.OK):
raise RuntimeError("Error not propagated to parent")
BrokenParentFlow.success = True
class CallStateFlow(flow.GRRFlow):
"""A flow that calls one of its own states."""
# This is a global flag which will be set when the flow runs.
success = False
@flow.StateHandler(next_state="ReceiveHello")
def Start(self):
# Call the receive state.
self.CallState([rdfvalue.RDFString("Hello")],
next_state="ReceiveHello",
request_data={"test_req_data": 2})
@flow.StateHandler()
def ReceiveHello(self, responses):
if responses.First() != "Hello":
raise RuntimeError("Did not receive hello.")
if responses.request_data["test_req_data"] != 2:
raise RuntimeError("request_data did not propagate.")
CallStateFlow.success = True
class DelayedCallStateFlow(flow.GRRFlow):
"""A flow that calls one of its own states with a delay."""
# This is a global flag which will be set when the flow runs.
flow_ran = 0
@flow.StateHandler(next_state="ReceiveHello")
def Start(self):
# Call the child flow.
self.CallState([rdfvalue.RDFString("Hello")],
next_state="ReceiveHello")
@flow.StateHandler(next_state="DelayedHello")
def ReceiveHello(self, responses):
if responses.First() != "Hello":
raise RuntimeError("Did not receive hello.")
DelayedCallStateFlow.flow_ran = 1
# Call the child flow.
self.CallState([rdfvalue.RDFString("Hello")],
next_state="DelayedHello",
start_time=rdfvalue.RDFDatetime().Now() + 100)
@flow.StateHandler()
def DelayedHello(self, responses):
if responses.First() != "Hello":
raise RuntimeError("Did not receive hello.")
DelayedCallStateFlow.flow_ran = 2
class BadArgsFlow1Args(rdfvalue.RDFProtoStruct):
protobuf = tests_pb2.BadArgsFlow1Args
class BadArgsFlow1(flow.GRRFlow):
"""A flow that has args that mismatch type info."""
args_type = BadArgsFlow1Args
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import ConfigParser
import importlib
import inspect
import logging
import os
import sys
from logging.config import fileConfig
from kafka_utils.kafka_cluster_manager.cluster_info.cluster_balancer \
import ClusterBalancer
from kafka_utils.kafka_cluster_manager.cluster_info.partition_count_balancer \
import PartitionCountBalancer
from kafka_utils.kafka_cluster_manager.cluster_info.partition_measurer \
import PartitionMeasurer
from kafka_utils.kafka_cluster_manager.cluster_info.partition_measurer \
import UniformPartitionMeasurer
from kafka_utils.kafka_cluster_manager.cluster_info.replication_group_parser \
import DefaultReplicationGroupParser
from kafka_utils.kafka_cluster_manager.cluster_info.replication_group_parser \
import ReplicationGroupParser
from kafka_utils.kafka_cluster_manager.cmds.decommission import DecommissionCmd
from kafka_utils.kafka_cluster_manager.cmds.rebalance import RebalanceCmd
from kafka_utils.kafka_cluster_manager.cmds.replace import ReplaceBrokerCmd
from kafka_utils.kafka_cluster_manager.cmds.set_replication_factor import SetReplicationFactorCmd
from kafka_utils.kafka_cluster_manager.cmds.stats import StatsCmd
from kafka_utils.kafka_cluster_manager.cmds.store_assignments \
import StoreAssignmentsCmd
from kafka_utils.util import config
_log = logging.getLogger()
GENETIC_BALANCER_MODULE = \
"kafka_utils.kafka_cluster_manager.cluster_info.genetic_balancer"
PARTITION_COUNT_BALANCER_MODULE = \
"kafka_utils.kafka_cluster_manager.cluster_info.partition_count_balancer"
def get_module(module_full_name):
if ':' in module_full_name:
path, module_name = module_full_name.rsplit(':', 1)
if not os.path.isdir(path):
print("{0} is not a valid directory".format(path), file=sys.stderr)
sys.exit(1)
sys.path.append(path)
return importlib.import_module(module_name)
else:
return importlib.import_module(module_full_name)
def dynamic_import(module_full_name, base_class):
module = get_module(module_full_name)
for _, class_type in inspect.getmembers(module, inspect.isclass):
if (issubclass(class_type, base_class) and
class_type is not base_class):
return class_type
def parse_args():
"""Parse the arguments."""
parser = argparse.ArgumentParser(
description='Manage and describe partition layout over brokers of'
' a cluster.',
)
parser.add_argument(
'--cluster-type',
'-t',
dest='cluster_type',
help='Type of the cluster.',
type=str,
required=True,
)
parser.add_argument(
'--cluster-name',
'-c',
dest='cluster_name',
help='Name of the cluster (Default to local cluster).',
)
parser.add_argument(
'--discovery-base-path',
dest='discovery_base_path',
type=str,
help='Path of the directory containing the <cluster_type>.yaml config',
)
parser.add_argument(
'--logconf',
type=str,
help='Path to logging configuration file. Default: log to console.',
)
parser.add_argument(
'--apply',
action='store_true',
help='Proposed-plan will be executed on confirmation.',
)
parser.add_argument(
'--no-confirm',
action='store_true',
help='Proposed-plan will be executed without confirmation.'
' --apply flag also required.',
)
parser.add_argument(
'--write-to-file',
dest='proposed_plan_file',
metavar='<reassignment-plan-file-path>',
type=str,
help='Write the partition reassignment plan '
'to a json file.',
)
parser.add_argument(
'--group-parser',
type=str,
help='Module containing an implementation of ReplicationGroupParser. '
'The module should be specified as path_to_include_to_py_path:module. '
'Ex: "/module/path:module.parser". '
'If not specified the default replication group parser will create '
'only one group for all brokers.',
)
parser.add_argument(
'--partition-measurer',
type=str,
help='Module containing an implementation of PartitionMeasurer. '
'The module should be specified as path_to_include_to_py_path:module. '
'Default: Assign each partition a weight and size of 1.'
)
parser.add_argument(
'--measurer-args',
type=str,
action='append',
default=[],
help='Argument list that is passed to the chosen PartitionMeasurer. '
'Ex: --measurer-args "--n 10" will pass ["--n", "10"] to the '
'PartitionMeasurer\'s parse_args method.'
)
parser.add_argument(
'--cluster-balancer',
type=str,
help='Module containing an implementation of ClusterBalancer. '
'The module should be specified as path_to_include_to_py_path:module. '
'Default: PartitionCountBalancer.',
)
parser.add_argument(
'--balancer-args',
type=str,
action='append',
default=[],
help='Argument list that is passed to the chosen ClusterBalancer. '
'Ex: --balancer-args "--n 10" will pass ["--n", "10"] to the '
'ClusterBalancer\'s parse_args method.'
)
parser.add_argument(
'--partition-count-balancer',
action='store_const',
const=PARTITION_COUNT_BALANCER_MODULE,
dest='cluster_balancer',
help='Use the number of partitions on each broker to balance the '
'cluster.',
)
parser.add_argument(
'--genetic-balancer',
action='store_const',
const=GENETIC_BALANCER_MODULE,
dest='cluster_balancer',
help='Use partition metrics and a genetic algorithm to balance the '
'cluster.',
)
subparsers = parser.add_subparsers()
RebalanceCmd().add_subparser(subparsers)
DecommissionCmd().add_subparser(subparsers)
StatsCmd().add_subparser(subparsers)
StoreAssignmentsCmd().add_subparser(subparsers)
ReplaceBrokerCmd().add_subparser(subparsers)
SetReplicationFactorCmd().add_subparser(subparsers)
return parser.parse_args()
def configure_logging(log_conf=None):
if log_conf:
try:
fileConfig(log_conf, disable_existing_loggers=False)
except ConfigParser.NoSectionError:
logging.basicConfig(level=logging.INFO)
_log.error(
'Failed to load {logconf} file.'
.format(logconf=log_conf),
)
else:
logging.basicConfig(level=logging.INFO)
def run():
args = parse_args()
configure_logging(args.logconf)
cluster_config = config.get_cluster_config(
args.cluster_type,
args.cluster_name,
args.discovery_base_path,
)
if args.group_parser:
rg_parser = dynamic_import(args.group_parser, ReplicationGroupParser)()
else:
rg_parser = DefaultReplicationGroupParser()
if args.partition_measurer:
partition_measurer = dynamic_import(
args.partition_measurer,
PartitionMeasurer
)
else:
partition_measurer = UniformPartitionMeasurer
if args.cluster_balancer:
cluster_balancer = dynamic_import(
args.cluster_balancer,
ClusterBalancer
)
else:
cluster_balancer = PartitionCountBalancer
args.command(
cluster_config,
rg_parser,
partition_measurer,
cluster_balancer,
args,
)
|
import os
import uuid
try:
from ipalib import api
from ipalib import errors
from ipapython.ipautil import kinit_keytab
ipalib_imported = True
except ImportError:
# ipalib/ipapython are not available in PyPy yet, don't make it
# a showstopper for the tests.
ipalib_imported = False
from oslo_config import cfg
from oslo_log import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class IPANovaJoinBase(object):
def __init__(self):
try:
self.ntries = CONF.connect_retries
except cfg.NoSuchOptError:
self.ntries = 1
if not ipalib_imported:
return
self.ccache = "MEMORY:" + str(uuid.uuid4())
os.environ['KRB5CCNAME'] = self.ccache
if self._ipa_client_configured() and not api.isdone('finalize'):
api.bootstrap(context='novajoin')
api.finalize()
def __get_connection(self):
"""Make a connection to IPA or raise an error."""
tries = 0
while tries <= self.ntries:
try:
api.Backend.rpcclient.connect()
except (errors.CCacheError, errors.TicketExpired) as e:
LOG.debug("kinit again: %s", e)
# pylint: disable=no-member
kinit_keytab(str('nova/%s@%s' %
(api.env.host, api.env.realm)),
CONF.keytab,
self.ccache)
tries += 1
else:
return
def _call_ipa(self, command, *args, **kw):
"""Make an IPA call.
Try twice to run the command. One execution may fail if we
previously had a connection but the ticket expired.
"""
if not api.Backend.rpcclient.isconnected():
self.__get_connection()
if 'version' not in kw:
kw['version'] = u'2.146' # IPA v4.2.0 for compatibility
try:
api.Command[command](*args, **kw)
except (errors.CCacheError, errors.TicketExpired):
LOG.debug("Refresh authentication")
api.Backend.rpcclient.disconnect()
self.__get_connection()
api.Command[command](*args, **kw)
def _ipa_client_configured(self):
"""Determine if the machine is an enrolled IPA client.
Return boolean indicating whether this machine is enrolled
in IPA. This is a rather weak detection method but better
than nothing.
"""
return os.path.exists('/etc/ipa/default.conf')
class IPAClient(IPANovaJoinBase):
def add_host(self, hostname, ipaotp, metadata=None, image_metadata=None):
"""Add a host to IPA.
If requested in the metadata, add a host to IPA. The assumption
is that hostname is already fully-qualified.
Because this is triggered by a metadata request, which can happen
multiple times, first we try to update the OTP in the host entry
and if that fails due to NotFound the host is added.
"""
LOG.debug('In IPABuildInstance')
if not self._ipa_client_configured():
LOG.debug('IPA is not configured')
return False
if metadata is None:
metadata = {}
if image_metadata is None:
image_metadata = {}
params = [hostname]
hostclass = metadata.get('ipa_hostclass', '')
location = metadata.get('ipa_host_location', '')
osdistro = image_metadata.get('os_distro', '')
osver = image_metadata.get('os_version', '')
hostargs = {
'description': u'IPA host for OpenStack',
'userpassword': ipaotp.decode('UTF-8'),
'force': True # we don't have an ip addr yet so
# use force to add anyway
}
if hostclass:
hostargs['userclass'] = hostclass
if osdistro or osver:
hostargs['nsosversion'] = '%s %s' % (osdistro, osver)
hostargs['nsosversion'] = hostargs['nsosversion'].strip()
if location:
hostargs['nshostlocation'] = location
modargs = {
'userpassword': ipaotp.decode('UTF-8'),
}
if not ipalib_imported:
return True
try:
self._call_ipa('host_mod', *params, **modargs)
except errors.NotFound:
try:
self._call_ipa('host_add', *params, **hostargs)
except (errors.DuplicateEntry, errors.ValidationError,
errors.DNSNotARecordError):
pass
except errors.ValidationError:
# Updating the OTP on an enrolled-host is not allowed
# in IPA and really a no-op.
return False
return True
def delete_host(self, hostname, metadata=None):
"""Delete a host from IPA and remove all related DNS entries."""
LOG.debug('In IPADeleteInstance')
if not self._ipa_client_configured():
LOG.debug('IPA is not configured')
return
if metadata is None:
metadata = {}
# TODO(rcrit): lookup instance in nova to get metadata to see if
# the host was enrolled. For now assume yes.
params = [hostname]
kw = {
'updatedns': True,
}
try:
self._call_ipa('host_del', *params, **kw)
except errors.NotFound:
pass
def add_ip(self, hostname, floating_ip):
"""Add a floating IP to a given hostname."""
LOG.debug('In add_ip')
if not self._ipa_client_configured():
LOG.debug('IPA is not configured')
return
params = [{"__dns_name__": CONF.domain + "."},
{"__dns_name__": hostname}]
kw = {'a_part_ip_address': floating_ip}
try:
self._call_ipa('dnsrecord_add', *params, **kw)
except (errors.DuplicateEntry, errors.ValidationError):
pass
def remove_ip(self, hostname, floating_ip):
"""Remove a floating IP from a given hostname."""
LOG.debug('In remove_ip')
if not self._ipa_client_configured():
LOG.debug('IPA is not configured')
return
LOG.debug('Current a no-op')
|
"""Tests for the Windows Shortcut (LNK) parser."""
import unittest
from plaso.lib import definitions
from plaso.parsers import winlnk
from tests.parsers import test_lib
class WinLnkParserTest(test_lib.ParserTestCase):
"""Tests for the Windows Shortcut (LNK) parser."""
def testParse(self):
"""Tests the Parse function."""
parser = winlnk.WinLnkParser()
storage_writer = self._ParseFile(['example.lnk'], parser)
# Link information:
# Creation time : Jul 13, 2009 23:29:02.849131000 UTC
# Modification time : Jul 14, 2009 01:39:18.220000000 UTC
# Access time : Jul 13, 2009 23:29:02.849131000 UTC
# Description : @%windir%\system32\migwiz\wet.dll,-590
# Relative path : .\migwiz\migwiz.exe
# Working directory : %windir%\system32\migwiz
# Icon location : %windir%\system32\migwiz\migwiz.exe
# Environment variables location : %windir%\system32\migwiz\migwiz.exe
self.assertEqual(storage_writer.number_of_events, 5)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
# A shortcut last accessed event.
expected_event_values = {
'date_time': '2009-07-13 23:29:02.8491310',
'data_type': 'windows:lnk:link',
'description': '@%windir%\\system32\\migwiz\\wet.dll,-590',
'env_var_location': '%windir%\\system32\\migwiz\\migwiz.exe',
'file_attribute_flags': 0x00000020,
'file_size': 544768,
'icon_location': '%windir%\\system32\\migwiz\\migwiz.exe',
'relative_path': '.\\migwiz\\migwiz.exe',
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_ACCESS,
'working_directory': '%windir%\\system32\\migwiz'}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
# A shortcut creation event.
expected_event_values = {
'date_time': '2009-07-13 23:29:02.8491310',
'data_type': 'windows:lnk:link',
'timestamp_desc': definitions.TIME_DESCRIPTION_CREATION}
self.CheckEventValues(storage_writer, events[1], expected_event_values)
# A shortcut last modification event.
expected_event_values = {
'date_time': '2009-07-14 01:39:18.2200000',
'data_type': 'windows:lnk:link',
'description': '@%windir%\\system32\\migwiz\\wet.dll,-590',
'env_var_location': '%windir%\\system32\\migwiz\\migwiz.exe',
'file_attribute_flags': 0x00000020,
'file_size': 544768,
'icon_location': '%windir%\\system32\\migwiz\\migwiz.exe',
'relative_path': '.\\migwiz\\migwiz.exe',
'timestamp_desc': definitions.TIME_DESCRIPTION_MODIFICATION,
'working_directory': '%windir%\\system32\\migwiz'}
self.CheckEventValues(storage_writer, events[2], expected_event_values)
# A distributed link tracking creation event.
expected_event_values = {
'date_time': '2009-07-14 05:45:20.5000123',
'data_type': 'windows:distributed_link_tracking:creation',
'mac_address': '00:1d:09:fa:5a:1c',
'timestamp_desc': definitions.TIME_DESCRIPTION_CREATION,
'uuid': '846ee3bb-7039-11de-9d20-001d09fa5a1c'}
self.CheckEventValues(storage_writer, events[4], expected_event_values)
def testParseLinkTargetIdentifier(self):
"""Tests the Parse function on an LNK with a link target identifier."""
parser = winlnk.WinLnkParser()
storage_writer = self._ParseFile(['NeroInfoTool.lnk'], parser)
self.assertEqual(storage_writer.number_of_events, 20)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
# A shortcut creation event.
expected_event_values = {
'date_time': '2009-06-05 20:13:20.0000000',
'data_type': 'windows:lnk:link',
'description': (
'Nero InfoTool provides you with information about the most '
'important features of installed drives, inserted discs, installed '
'software and much more. With Nero InfoTool you can find out all '
'about your drive and your system configuration.'),
'drive_serial_number': 0x70ecfa33,
'drive_type': 3,
'file_attribute_flags': 0x00000020,
'file_size': 4635160,
'icon_location': (
'%ProgramFiles%\\Nero\\Nero 9\\Nero InfoTool\\InfoTool.exe'),
'local_path': (
'C:\\Program Files (x86)\\Nero\\Nero 9\\Nero InfoTool\\'
'InfoTool.exe'),
'relative_path': (
'..\\..\\..\\..\\..\\..\\..\\..\\Program Files (x86)\\'
'Nero\\Nero 9\\Nero InfoTool\\InfoTool.exe'),
'timestamp_desc': definitions.TIME_DESCRIPTION_CREATION,
'volume_label': 'OS',
'working_directory': (
'C:\\Program Files (x86)\\Nero\\Nero 9\\Nero InfoTool')}
self.CheckEventValues(storage_writer, events[16], expected_event_values)
# A shell item event.
expected_event_values = {
'date_time': '2009-06-05 20:13:20',
'data_type': 'windows:shell_item:file_entry',
'file_reference': '81349-1',
'long_name': 'InfoTool.exe',
'name': 'InfoTool.exe',
'origin': 'NeroInfoTool.lnk',
'shell_item_path': (
'<My Computer> C:\\Program Files (x86)\\Nero\\Nero 9\\'
'Nero InfoTool\\InfoTool.exe'),
'timestamp_desc': definitions.TIME_DESCRIPTION_CREATION}
self.CheckEventValues(storage_writer, events[12], expected_event_values)
if __name__ == '__main__':
unittest.main()
|
from webservice.NexusHandler import NexusHandler as BaseHandler
from webservice.webmodel import StatsComputeOptions
from webservice.NexusHandler import nexus_handler
from webservice.NexusHandler import DEFAULT_PARAMETERS_SPEC
from webservice.webmodel import NexusResults, NexusProcessingException
import BaseDomsHandler
import datafetch
@nexus_handler
class DomsStatsQueryHandler(BaseDomsHandler.BaseDomsQueryHandler):
name = "DOMS In-Situ Stats Lookup"
path = "/domsstats"
description = ""
params = {}
singleton = True
def __init__(self):
BaseHandler.__init__(self)
def calc(self, computeOptions, **args):
source = computeOptions.get_argument("source", None)
startTime = computeOptions.get_argument("s", None)
endTime = computeOptions.get_argument("e", None)
bbox = computeOptions.get_argument("b", None)
timeTolerance = computeOptions.get_float_arg("tt")
depth_min = computeOptions.get_float_arg("depthMin", default=None)
depth_max = computeOptions.get_float_arg("depthMax", default=None)
radiusTolerance = computeOptions.get_float_arg("rt")
platforms = computeOptions.get_argument("platforms", None)
source1 = self.getDataSourceByName(source)
if source1 is None:
raise Exception("Source '%s' not found"%source)
count, bounds = datafetch.getCount(source1, startTime, endTime, bbox, depth_min, depth_max, platforms)
args = {
"source": source,
"startTime": startTime,
"endTime": endTime,
"bbox": bbox,
"timeTolerance": timeTolerance,
"depthMin": depth_min,
"depthMax": depth_max,
"radiusTolerance": radiusTolerance,
"platforms": platforms
}
return BaseDomsHandler.DomsQueryResults(results={}, args=args, details={}, bounds=bounds, count=count, computeOptions=None)
|
__author__ = 'Ahmed G. Ali'
import settings
db = settings.BIOSTUDIES_DB
|
"""Tests for TracingReducer."""
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.experimental.mcmc.internal import test_fixtures
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.internal import test_util
@test_util.test_all_tf_execution_regimes
class TracingReducerTest(test_util.TestCase):
@test_util.jax_disable_test_missing_functionality('dynamic-size TensorArray')
def test_tf_while(self):
def trace_fn(sample, pkr):
return sample, (sample, pkr), {'one': sample, 'two': pkr}
tracer = tfp.experimental.mcmc.TracingReducer(trace_fn=trace_fn)
state = tracer.initialize(tf.zeros(()), tf.zeros(()))
def _body(sample, pkr, state):
new_state = tracer.one_step(sample, state, pkr)
return (sample + 1, pkr + 2, new_state)
_, _, state = tf.while_loop(
cond=lambda i, _, __: i < 3,
body=_body,
loop_vars=(1., 2., state))
final_trace = self.evaluate(tracer.finalize(state))
self.assertEqual(3, len(final_trace))
self.assertAllEqual([1, 2], final_trace[0])
self.assertAllEqual(([1, 2], [2, 4]), final_trace[1])
self.assertAllEqualNested(final_trace[2], ({'one': [1, 2], 'two': [2, 4]}))
@test_util.jax_disable_test_missing_functionality('dynamic-size TensorArray')
def test_in_sample_fold(self):
tracer = tfp.experimental.mcmc.TracingReducer()
fake_kernel = test_fixtures.TestTransitionKernel()
trace, final_state, kernel_results = tfp.experimental.mcmc.sample_fold(
num_steps=3,
current_state=0.,
kernel=fake_kernel,
reducer=tracer,
seed=test_util.test_seed())
trace, final_state, kernel_results = self.evaluate([
trace,
final_state,
kernel_results])
self.assertAllEqual([1, 2, 3], trace[0])
self.assertAllEqual([1, 2, 3], trace[1].counter_1)
self.assertAllEqual([2, 4, 6], trace[1].counter_2)
self.assertEqual(3, final_state)
self.assertEqual(3, kernel_results.counter_1)
self.assertEqual(6, kernel_results.counter_2)
def test_known_size(self):
tracer = tfp.experimental.mcmc.TracingReducer(size=3)
self.assertEqual(tracer.size, 3)
state = tracer.initialize(tf.zeros(()), tf.zeros(()))
for sample in range(3):
state = tracer.one_step(sample, state, sample)
all_states, final_trace = tracer.finalize(state)
self.assertAllEqual([3], tensorshape_util.as_list(all_states.shape))
self.assertAllEqual([3], tensorshape_util.as_list(final_trace.shape))
all_states, final_trace = self.evaluate([all_states, final_trace])
self.assertAllEqual([0, 1, 2], all_states)
self.assertAllEqual([0, 1, 2], final_trace)
if __name__ == '__main__':
test_util.main()
|
import sys
from src.base.solution import Solution
from src.tests.part1.q076_test_min_win_substr import MinWinSubStrTestCases
class MinWinSubStr(Solution):
def verify_output(self, test_output, output):
return test_output == output
def print_output(self, output):
print(output)
def run_test(self, input):
return self.minWindow(input[0], input[1])
def gen_test_cases(self):
return MinWinSubStrTestCases()
def minWindow(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
min_win, sl, cnt, head, si, ei = sys.maxint, len(s), len(t), 0, 0, 0
lkp = {chr(x):0 for x in xrange(128)}
for ch in t: lkp[ch] += 1
# print(lkp)
while ei < sl:
# print((si, ei, min_win, cnt, s[si], s[ei]))
if lkp[s[ei]] > 0: cnt -= 1
lkp[s[ei]] -= 1
ei += 1
while cnt == 0:
# print(('inner', si, ei, cnt))
if min_win > ei - si:
min_win = ei - si
head = si
if lkp[s[si]] == 0: cnt += 1
lkp[s[si]] += 1
si += 1
return s[head: head + min_win] if min_win != sys.maxint else ''
if __name__ == '__main__':
sol = MinWinSubStr()
sol.run_tests()
|
import copy
import json
from nova.openstack.common import log as logging
from oslo.config import cfg
from powervc_nova import _
from powervc_nova.network.powerkvm import agent
from powervc_nova.network.powerkvm.agent import micro_op_builder as mob,\
commandlet
from powervc_nova.network.powerkvm.agent import service_network_restart \
as snr
from powervc_nova.network.powerkvm.agent.common import exception
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class OVSOperationRunner():
"""
Takes in a current DOM and a desired DOM, and determines the set of micro
operations that need to be executed to make the current DOM match the
desired DOM.
"""
def __init__(self, current_dom, desired_dom, force_flag, rollback):
"""
:param current_dom: A HostOVSNetworkConfig object that represents the
current state of the host.
:param desired_dom: A HostOVSNetworkConfig object that represents the
desired state of the host.
:param force_flag: If true, run execute and ignore warnings (errors
are never ignored).
:param rollback: Whether the operation should be rolled back before
completion, often to test the rollback mechanism.
"""
self.current_dom = current_dom
self.desired_dom = desired_dom
self.force_flag = force_flag
self.rollback = rollback
def update_host_ovs(self, context):
"""update host ovs data on current host"""
LOG.info(_('Updating Open vSwitch host data...'))
LOG.debug("Current DOM: %s" % self.current_dom.to_dict())
LOG.debug("Requested DOM: %s" % self.desired_dom.to_dict())
builder = mob.MicroOperationBuilder(context,
self.current_dom,
self.desired_dom,
self.rollback)
mo_list = builder.get_micro_ops_for_update()
# run validation
return self._run_micro_op_list(mo_list)
def _run_micro_op_list(self, mo_list):
"""
Run validation and, if force_flag is true or no warnings, run
execute. Any errors encountered during execute will result in
an undo call, in reverse order, on all previously executed
micro ops.
:param mo_list: A list of all micro ops needed to run the
desired operation. This list should be
generated by the micro ops builder.
:returns: A dictionary containing all warnings and errors
encountered during the running of the micro ops.
"""
current_dom = copy.deepcopy(self.current_dom)
warning_list = []
return_dict = {}
ifcfgs = ''
ovsvsctl_show = {}
# always run validation, even if force flag is
# set to True to make sure there are no errors
# encountered.
ops_ran_list = []
for micro_op in mo_list:
try:
ops_ran_list.append(micro_op.__class__.__name__)
LOG.debug("running micro op %s with DOM %s" %
(micro_op.__class__,
current_dom))
current_dom, curr_warning_list = \
micro_op.validate(current_dom)
warning_list.extend(curr_warning_list)
for warning in curr_warning_list:
LOG.warn(_('Warning "%(warn_name)s" occurred during '
'validation of operation %(oper)s: %(warn)s') %
{'warn_name': warning.name,
'oper': micro_op.__class__.__name__,
'warn': warning})
except Exception as exc:
LOG.exception(exc)
LOG.error(_("List of operations run: %s" % ops_ran_list))
return_dict[agent.ERRORS_KEY] = [{'message': '%s' % exc}]
break
# if the force flag is set we can ignore warnings but
# we cannot avoid errors, so check to be sure there
# were no errors
ops_ran_list = []
if((self.force_flag or len(warning_list) == 0)
and agent.ERRORS_KEY not in return_dict):
# in case of error, last_index is used to determine
# where to start undo from
last_index = -1
# execute micro op list
for i in range(0, len(mo_list)):
try:
ops_ran_list.append(mo_list[i].__class__.__name__)
mo_list[i].execute()
except Exception as exc:
LOG.exception(exc)
LOG.error(_("List of operations run: %s" % ops_ran_list))
return_dict[agent.ERRORS_KEY] = [{'message': "%s" % exc}]
last_index = i
break
# do we need to undo because of error?
if last_index != -1:
LOG.error(_("Error during operation execution, undoing "
"operations..."))
# Get the current state of the ifcfg files and ovs to log later
try:
ifcfgs = commandlet.CommandExecutor.\
get_all_ifcfg_files_for_logging()
ovsvsctl_show = \
commandlet.CommandExecutor.send_vsctl_command()
except Exception as e:
LOG.exception(e)
undo_list = []
# yes, undo needed; undo in reverse order
reversed_list = self._reorder_ops_for_undo(mo_list,
last_index)
for op in reversed_list:
try:
op.undo()
undo_list.append(op.__class__.__name__)
except Exception as exc:
# if we hit an error during undo, we will
# add the error to the error list and continue
# to attempt to undo the remaining micro ops
LOG.exception(exc)
return_dict[agent.ERRORS_KEY].append(
{'message': '%s' % exc})
LOG.error(_("Undone operations: %s" % undo_list))
# we are not doing an execution, so return the warnings
else:
# add warnings to return list
if len(warning_list) > 0:
return_dict[agent.WARNINGS_KEY] = []
for warning in warning_list:
return_dict[agent.WARNINGS_KEY].append(
{'message': '%s' % warning})
# Errors/warnings occurred. Log initial dom, request dom, the dom as
# it was when the error occurred, operations run, ifcfg files, and
# ovs-vsctl show output
if return_dict is not {}:
debug_info_list = []
debug_info_list.append(_('Initial Object Model is:'))
debug_info_list.append(json.dumps(self.current_dom.to_dict(),
sort_keys=True, indent=4))
debug_info_list.append(_('Requested Object Model is:'))
debug_info_list.append(json.dumps(self.desired_dom.to_dict(),
sort_keys=True, indent=4))
debug_info_list.append(_('Current Object Model is:'))
debug_info_list.append(json.dumps(current_dom.to_dict(),
sort_keys=True, indent=4))
debug_info_list.append(_("List of operations returned by builder: "
"%s" % [mo_list[i].__class__.__name__
for i in range(0, len(mo_list))]))
if ifcfgs:
# These are only logged on errors, not on warnings
debug_info_list.append(_("Contents of ifcfg files: %s" %
ifcfgs))
debug_info_list.append(_("ovs-vsctl show: "))
debug_info_list.append(json.dumps(ovsvsctl_show,
sort_keys=True,
indent=4))
if agent.ERRORS_KEY in return_dict:
for message in debug_info_list:
LOG.error(message)
else:
for message in debug_info_list:
LOG.warn(message)
return return_dict
def _reorder_ops_for_undo(self, micro_op_list, last_index):
new_list = [micro_op_list[i] for i in range(0, last_index + 1)]
new_list.reverse()
for op in new_list:
if isinstance(op, snr.ServiceNetworkRestart):
LOG.debug("Moving network restart op to "
"the end of the list.")
new_list.remove(op)
new_list.append(op)
break
return new_list
|
from __future__ import absolute_import
from django.utils.translation import ugettext as _
from django.utils import timezone
from django.http import HttpResponse, HttpRequest
from zilencer.models import Deployment, RemotePushDeviceToken, RemoteZulipServer
from zerver.decorator import has_request_variables, REQ
from zerver.lib.error_notify import do_report_error
from zerver.lib.push_notifications import send_android_push_notification, \
send_apple_push_notification
from zerver.lib.request import JsonableError
from zerver.lib.response import json_error, json_success
from zerver.lib.validator import check_dict, check_int
from zerver.models import UserProfile, PushDeviceToken, Realm
from zerver.views.push_notifications import validate_token
from typing import Any, Dict, Optional, Union, Text, cast
def validate_entity(entity):
# type: (Union[UserProfile, RemoteZulipServer]) -> None
if not isinstance(entity, RemoteZulipServer):
raise JsonableError(_("Must validate with valid Zulip server API key"))
def validate_bouncer_token_request(entity, token, kind):
# type: (Union[UserProfile, RemoteZulipServer], bytes, int) -> None
if kind not in [RemotePushDeviceToken.APNS, RemotePushDeviceToken.GCM]:
raise JsonableError(_("Invalid token type"))
validate_entity(entity)
validate_token(token, kind)
@has_request_variables
def report_error(request, deployment, type=REQ(), report=REQ(validator=check_dict([]))):
# type: (HttpRequest, Deployment, Text, Dict[str, Any]) -> HttpResponse
return do_report_error(deployment.name, type, report)
@has_request_variables
def remote_server_register_push(request, entity, user_id=REQ(),
token=REQ(), token_kind=REQ(validator=check_int), ios_app_id=None):
# type: (HttpRequest, Union[UserProfile, RemoteZulipServer], int, bytes, int, Optional[Text]) -> HttpResponse
validate_bouncer_token_request(entity, token, token_kind)
server = cast(RemoteZulipServer, entity)
# If a user logged out on a device and failed to unregister,
# we should delete any other user associations for this token
# & RemoteServer pair
RemotePushDeviceToken.objects.filter(
token=token, kind=token_kind, server=server).exclude(user_id=user_id).delete()
# Save or update
remote_token, created = RemotePushDeviceToken.objects.update_or_create(
user_id=user_id,
server=server,
kind=token_kind,
token=token,
defaults=dict(
ios_app_id=ios_app_id,
last_updated=timezone.now()))
return json_success()
@has_request_variables
def remote_server_unregister_push(request, entity, token=REQ(),
token_kind=REQ(validator=check_int), ios_app_id=None):
# type: (HttpRequest, Union[UserProfile, RemoteZulipServer], bytes, int, Optional[Text]) -> HttpResponse
validate_bouncer_token_request(entity, token, token_kind)
server = cast(RemoteZulipServer, entity)
deleted = RemotePushDeviceToken.objects.filter(token=token,
kind=token_kind,
server=server).delete()
if deleted[0] == 0:
return json_error(_("Token does not exist"))
return json_success()
@has_request_variables
def remote_server_notify_push(request, # type: HttpRequest
entity, # type: Union[UserProfile, RemoteZulipServer]
payload=REQ(argument_type='body') # type: Dict[str, Any]
):
# type: (...) -> HttpResponse
validate_entity(entity)
server = cast(RemoteZulipServer, entity)
user_id = payload['user_id']
gcm_payload = payload['gcm_payload']
apns_payload = payload['apns_payload']
android_devices = list(RemotePushDeviceToken.objects.filter(
user_id=user_id,
kind=RemotePushDeviceToken.GCM,
server=server
))
apple_devices = list(RemotePushDeviceToken.objects.filter(
user_id=user_id,
kind=RemotePushDeviceToken.APNS,
server=server
))
if android_devices:
send_android_push_notification(android_devices, gcm_payload, remote=True)
if apple_devices:
send_apple_push_notification(user_id, apple_devices, apns_payload)
return json_success()
|
import os.path
import tornado.auth
import tornado.escape
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from tornado.options import define, options
import pymongo
define("port", default=8000, help="run on the given port", type=int)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", MainHandler),
(r"/recommended/", RecommendedHandler),
(r"/books/([0-9Xx\-]+)", BookHandler),
(r"/edit/([0-9Xx\-]+)", BookEditHandler),
(r"/add", BookEditHandler)
]
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
ui_modules={"Book": BookModule},
debug=True,
)
conn = pymongo.Connection("localhost", 27017)
self.db = conn["bookstore"]
tornado.web.Application.__init__(self, handlers, **settings)
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render(
"index.html",
page_title = "Burt's Books | Home",
header_text = "Welcome to Burt's Books!",
)
class BookHandler(tornado.web.RequestHandler):
def get(self, isbn=None):
if isbn:
coll = self.application.db.books
book = coll.find_one({"isbn": isbn})
if book:
self.render("one_book.html",
page_title="Burt's Books | " + book['title'],
header_text=book['title'],
book=book)
return
self.set_header(404)
return
class BookEditHandler(tornado.web.RequestHandler):
def get(self, isbn=None):
book = dict()
if isbn:
coll = self.application.db.books
book = coll.find_one({"isbn": isbn})
self.render("book_edit.html",
page_title="Burt's Books",
header_text="Edit book",
book=book)
def post(self, isbn=None):
import time
book_fields = ['isbn', 'title', 'subtitle', 'image', 'author',
'date_released', 'description']
coll = self.application.db.books
if isbn:
book = coll.find_one({"isbn": isbn})
for key in book_fields:
book[key] = self.get_argument(key, None)
if isbn:
coll.save(book)
else:
book['date_added'] = int(time.time())
coll.insert(book)
self.redirect("/recommended/")
class RecommendedHandler(tornado.web.RequestHandler):
def get(self):
coll = self.application.db.books
books = coll.find()
self.render(
"recommended.html",
page_title = "Burt's Books | Recommended Reading",
header_text = "Recommended Reading",
books = books
)
class BookModule(tornado.web.UIModule):
def render(self, book):
return self.render_string(
"modules/book.html",
book=book,
)
def css_files(self):
return "css/recommended.css"
def javascript_files(self):
return "js/recommended.js"
def main():
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
__all__ = [
"LibcloudError",
"MalformedResponseError",
"InvalidCredsError",
"InvalidCredsException",
"LazyList"
]
class LibcloudError(Exception):
"""The base class for other libcloud exceptions"""
def __init__(self, value, driver=None):
self.value = value
self.driver = driver
def __str__(self):
return ("<LibcloudError in "
+ repr(self.driver)
+ " "
+ repr(self.value) + ">")
class MalformedResponseError(LibcloudError):
"""Exception for the cases when a provider returns a malformed
response, e.g. you request JSON and provider returns
'<h3>something</h3>' due to some error on their side."""
def __init__(self, value, body=None, driver=None):
self.value = value
self.driver = driver
self.body = body
def __str__(self):
return ("<MalformedResponseException in "
+ repr(self.driver)
+ " "
+ repr(self.value)
+ ">: "
+ repr(self.body))
class InvalidCredsError(LibcloudError):
"""Exception used when invalid credentials are used on a provider."""
def __init__(self, value='Invalid credentials with the provider',
driver=None):
self.value = value
self.driver = driver
def __str__(self):
return repr(self.value)
InvalidCredsException = InvalidCredsError
class LazyList(object):
def __init__(self, get_more, value_dict=None):
self._data = []
self._last_key = None
self._exhausted = False
self._all_loaded = False
self._get_more = get_more
self._value_dict = value_dict or {}
def __iter__(self):
if not self._all_loaded:
self._load_all()
data = self._data
for i in data:
yield i
def __getitem__(self, index):
if index >= len(self._data) and not self._all_loaded:
self._load_all()
return self._data[index]
def __len__(self):
self._load_all()
return len(self._data)
def __repr__(self):
self._load_all()
repr_string = ', ' .join([repr(item) for item in self._data])
repr_string = '[%s]' % (repr_string)
return repr_string
def _load_all(self):
while not self._exhausted:
newdata, self._last_key, self._exhausted = \
self._get_more(last_key=self._last_key,
value_dict=self._value_dict)
self._data.extend(newdata)
self._all_loaded = True
|
import random
import uuid
from openstack import exceptions
from openstack.tests.functional.baremetal import base
class TestBareMetalNode(base.BaseBaremetalTest):
def test_node_create_get_delete(self):
node = self.create_node(name='node-name')
self.assertEqual(node.name, 'node-name')
self.assertEqual(node.driver, 'fake-hardware')
self.assertEqual(node.provision_state, 'available')
self.assertFalse(node.is_maintenance)
# NOTE(dtantsur): get_node and find_node only differ in handing missing
# nodes, otherwise they are identical.
for call, ident in [(self.conn.baremetal.get_node, self.node_id),
(self.conn.baremetal.get_node, 'node-name'),
(self.conn.baremetal.find_node, self.node_id),
(self.conn.baremetal.find_node, 'node-name')]:
found = call(ident)
self.assertEqual(node.id, found.id)
self.assertEqual(node.name, found.name)
with_fields = self.conn.baremetal.get_node(
'node-name',
fields=['uuid', 'driver', 'instance_id'])
self.assertEqual(node.id, with_fields.id)
self.assertEqual(node.driver, with_fields.driver)
self.assertIsNone(with_fields.name)
self.assertIsNone(with_fields.provision_state)
nodes = self.conn.baremetal.nodes()
self.assertIn(node.id, [n.id for n in nodes])
self.conn.baremetal.delete_node(node, ignore_missing=False)
self.assertRaises(exceptions.ResourceNotFound,
self.conn.baremetal.get_node, self.node_id)
def test_node_update(self):
node = self.create_node(name='node-name', extra={'foo': 'bar'})
node.name = 'new-name'
node.extra = {'answer': 42}
instance_uuid = str(uuid.uuid4())
node = self.conn.baremetal.update_node(node,
instance_id=instance_uuid)
self.assertEqual('new-name', node.name)
self.assertEqual({'answer': 42}, node.extra)
self.assertEqual(instance_uuid, node.instance_id)
node = self.conn.baremetal.get_node('new-name')
self.assertEqual('new-name', node.name)
self.assertEqual({'answer': 42}, node.extra)
self.assertEqual(instance_uuid, node.instance_id)
node = self.conn.baremetal.update_node(node,
instance_id=None)
self.assertIsNone(node.instance_id)
node = self.conn.baremetal.get_node('new-name')
self.assertIsNone(node.instance_id)
def test_node_update_by_name(self):
self.create_node(name='node-name', extra={'foo': 'bar'})
instance_uuid = str(uuid.uuid4())
node = self.conn.baremetal.update_node('node-name',
instance_id=instance_uuid,
extra={'answer': 42})
self.assertEqual({'answer': 42}, node.extra)
self.assertEqual(instance_uuid, node.instance_id)
node = self.conn.baremetal.get_node('node-name')
self.assertEqual({'answer': 42}, node.extra)
self.assertEqual(instance_uuid, node.instance_id)
node = self.conn.baremetal.update_node('node-name',
instance_id=None)
self.assertIsNone(node.instance_id)
node = self.conn.baremetal.get_node('node-name')
self.assertIsNone(node.instance_id)
def test_node_patch(self):
node = self.create_node(name='node-name', extra={'foo': 'bar'})
node.name = 'new-name'
instance_uuid = str(uuid.uuid4())
node = self.conn.baremetal.patch_node(
node,
[dict(path='/instance_id', op='replace', value=instance_uuid),
dict(path='/extra/answer', op='add', value=42)])
self.assertEqual('new-name', node.name)
self.assertEqual({'foo': 'bar', 'answer': 42}, node.extra)
self.assertEqual(instance_uuid, node.instance_id)
node = self.conn.baremetal.get_node('new-name')
self.assertEqual('new-name', node.name)
self.assertEqual({'foo': 'bar', 'answer': 42}, node.extra)
self.assertEqual(instance_uuid, node.instance_id)
node = self.conn.baremetal.patch_node(
node,
[dict(path='/instance_id', op='remove'),
dict(path='/extra/answer', op='remove')])
self.assertIsNone(node.instance_id)
self.assertNotIn('answer', node.extra)
node = self.conn.baremetal.get_node('new-name')
self.assertIsNone(node.instance_id)
self.assertNotIn('answer', node.extra)
def test_node_list_update_delete(self):
self.create_node(name='node-name', extra={'foo': 'bar'})
node = next(n for n in
self.conn.baremetal.nodes(details=True,
provision_state='available',
is_maintenance=False,
associated=False)
if n.name == 'node-name')
self.assertEqual(node.extra, {'foo': 'bar'})
# This test checks that resources returned from listing are usable
self.conn.baremetal.update_node(node, extra={'foo': 42})
self.conn.baremetal.delete_node(node, ignore_missing=False)
def test_node_create_in_enroll_provide(self):
node = self.create_node(provision_state='enroll')
self.node_id = node.id
self.assertEqual(node.driver, 'fake-hardware')
self.assertEqual(node.provision_state, 'enroll')
self.assertIsNone(node.power_state)
self.assertFalse(node.is_maintenance)
self.conn.baremetal.set_node_provision_state(node, 'manage',
wait=True)
self.assertEqual(node.provision_state, 'manageable')
self.conn.baremetal.set_node_provision_state(node, 'provide',
wait=True)
self.assertEqual(node.provision_state, 'available')
def test_node_create_in_enroll_provide_by_name(self):
name = 'node-%d' % random.randint(0, 1000)
node = self.create_node(provision_state='enroll', name=name)
self.node_id = node.id
self.assertEqual(node.driver, 'fake-hardware')
self.assertEqual(node.provision_state, 'enroll')
self.assertIsNone(node.power_state)
self.assertFalse(node.is_maintenance)
node = self.conn.baremetal.set_node_provision_state(name, 'manage',
wait=True)
self.assertEqual(node.provision_state, 'manageable')
node = self.conn.baremetal.set_node_provision_state(name, 'provide',
wait=True)
self.assertEqual(node.provision_state, 'available')
def test_node_power_state(self):
node = self.create_node()
self.assertIsNone(node.power_state)
self.conn.baremetal.set_node_power_state(node, 'power on')
node = self.conn.baremetal.get_node(node.id)
# Fake nodes react immediately to power requests.
self.assertEqual('power on', node.power_state)
self.conn.baremetal.set_node_power_state(node, 'power off')
node = self.conn.baremetal.get_node(node.id)
self.assertEqual('power off', node.power_state)
def test_node_validate(self):
node = self.create_node()
# Fake hardware passes validation for all interfaces
result = self.conn.baremetal.validate_node(node)
for iface in ('boot', 'deploy', 'management', 'power'):
self.assertTrue(result[iface].result)
self.assertFalse(result[iface].reason)
def test_node_negative_non_existing(self):
uuid = "5c9dcd04-2073-49bc-9618-99ae634d8971"
self.assertRaises(exceptions.ResourceNotFound,
self.conn.baremetal.get_node, uuid)
self.assertRaises(exceptions.ResourceNotFound,
self.conn.baremetal.find_node, uuid,
ignore_missing=False)
self.assertRaises(exceptions.ResourceNotFound,
self.conn.baremetal.delete_node, uuid,
ignore_missing=False)
self.assertRaises(exceptions.ResourceNotFound,
self.conn.baremetal.update_node, uuid,
name='new-name')
self.assertIsNone(self.conn.baremetal.find_node(uuid))
self.assertIsNone(self.conn.baremetal.delete_node(uuid))
def test_maintenance(self):
reason = "Prepating for taking over the world"
node = self.create_node()
self.assertFalse(node.is_maintenance)
self.assertIsNone(node.maintenance_reason)
# Initial setting without the reason
node = self.conn.baremetal.set_node_maintenance(node)
self.assertTrue(node.is_maintenance)
self.assertIsNone(node.maintenance_reason)
# Updating the reason later
node = self.conn.baremetal.set_node_maintenance(node, reason)
self.assertTrue(node.is_maintenance)
self.assertEqual(reason, node.maintenance_reason)
# Removing the reason later
node = self.conn.baremetal.set_node_maintenance(node)
self.assertTrue(node.is_maintenance)
self.assertIsNone(node.maintenance_reason)
# Unsetting maintenance
node = self.conn.baremetal.unset_node_maintenance(node)
self.assertFalse(node.is_maintenance)
self.assertIsNone(node.maintenance_reason)
# Initial setting with the reason
node = self.conn.baremetal.set_node_maintenance(node, reason)
self.assertTrue(node.is_maintenance)
self.assertEqual(reason, node.maintenance_reason)
def test_maintenance_via_update(self):
reason = "Prepating for taking over the world"
node = self.create_node()
# Initial setting without the reason
node = self.conn.baremetal.update_node(node, is_maintenance=True)
self.assertTrue(node.is_maintenance)
self.assertIsNone(node.maintenance_reason)
# Make sure the change has effect on the remote side.
node = self.conn.baremetal.get_node(node.id)
self.assertTrue(node.is_maintenance)
self.assertIsNone(node.maintenance_reason)
# Updating the reason later
node = self.conn.baremetal.update_node(node, maintenance_reason=reason)
self.assertTrue(node.is_maintenance)
self.assertEqual(reason, node.maintenance_reason)
# Make sure the change has effect on the remote side.
node = self.conn.baremetal.get_node(node.id)
self.assertTrue(node.is_maintenance)
self.assertEqual(reason, node.maintenance_reason)
# Unsetting maintenance
node = self.conn.baremetal.update_node(node, is_maintenance=False)
self.assertFalse(node.is_maintenance)
self.assertIsNone(node.maintenance_reason)
# Make sure the change has effect on the remote side.
node = self.conn.baremetal.get_node(node.id)
self.assertFalse(node.is_maintenance)
self.assertIsNone(node.maintenance_reason)
# Initial setting with the reason
node = self.conn.baremetal.update_node(node, is_maintenance=True,
maintenance_reason=reason)
self.assertTrue(node.is_maintenance)
self.assertEqual(reason, node.maintenance_reason)
# Make sure the change has effect on the remote side.
node = self.conn.baremetal.get_node(node.id)
self.assertTrue(node.is_maintenance)
self.assertEqual(reason, node.maintenance_reason)
class TestNodeRetired(base.BaseBaremetalTest):
min_microversion = '1.61'
def test_retired(self):
reason = "I'm too old for this s...tuff!"
node = self.create_node()
# Set retired when node state available should fail!
self.assertRaises(
exceptions.ConflictException,
self.conn.baremetal.update_node, node, is_retired=True)
# Set node state to manageable
self.conn.baremetal.set_node_provision_state(node, 'manage',
wait=True)
self.assertEqual(node.provision_state, 'manageable')
# Set retired without reason
node = self.conn.baremetal.update_node(node, is_retired=True)
self.assertTrue(node.is_retired)
self.assertIsNone(node.retired_reason)
# Verify set retired on server side
node = self.conn.baremetal.get_node(node.id)
self.assertTrue(node.is_retired)
self.assertIsNone(node.retired_reason)
# Add the reason
node = self.conn.baremetal.update_node(node, retired_reason=reason)
self.assertTrue(node.is_retired)
self.assertEqual(reason, node.retired_reason)
# Verify the reason on server side
node = self.conn.baremetal.get_node(node.id)
self.assertTrue(node.is_retired)
self.assertEqual(reason, node.retired_reason)
# Unset retired
node = self.conn.baremetal.update_node(node, is_retired=False)
self.assertFalse(node.is_retired)
self.assertIsNone(node.retired_reason)
# Verify on server side
node = self.conn.baremetal.get_node(node.id)
self.assertFalse(node.is_retired)
self.assertIsNone(node.retired_reason)
# Set retired with reason
node = self.conn.baremetal.update_node(node, is_retired=True,
retired_reason=reason)
self.assertTrue(node.is_retired)
self.assertEqual(reason, node.retired_reason)
# Verify on server side
node = self.conn.baremetal.get_node(node.id)
self.assertTrue(node.is_retired)
self.assertEqual(reason, node.retired_reason)
class TestBareMetalNodeFields(base.BaseBaremetalTest):
min_microversion = '1.8'
def test_node_fields(self):
self.create_node()
result = self.conn.baremetal.nodes(
fields=['uuid', 'name', 'instance_id'])
for item in result:
self.assertIsNotNone(item.id)
self.assertIsNone(item.driver)
class TestBareMetalVif(base.BaseBaremetalTest):
min_microversion = '1.28'
def setUp(self):
super(TestBareMetalVif, self).setUp()
self.node = self.create_node(network_interface='noop')
self.vif_id = "200712fc-fdfb-47da-89a6-2d19f76c7618"
def test_node_vif_attach_detach(self):
self.conn.baremetal.attach_vif_to_node(self.node, self.vif_id)
# NOTE(dtantsur): The noop networking driver is completely noop - the
# VIF list does not return anything of value.
self.conn.baremetal.list_node_vifs(self.node)
res = self.conn.baremetal.detach_vif_from_node(self.node, self.vif_id,
ignore_missing=False)
self.assertTrue(res)
def test_node_vif_negative(self):
uuid = "5c9dcd04-2073-49bc-9618-99ae634d8971"
self.assertRaises(exceptions.ResourceNotFound,
self.conn.baremetal.attach_vif_to_node,
uuid, self.vif_id)
self.assertRaises(exceptions.ResourceNotFound,
self.conn.baremetal.list_node_vifs,
uuid)
self.assertRaises(exceptions.ResourceNotFound,
self.conn.baremetal.detach_vif_from_node,
uuid, self.vif_id, ignore_missing=False)
class TestTraits(base.BaseBaremetalTest):
min_microversion = '1.37'
def setUp(self):
super(TestTraits, self).setUp()
self.node = self.create_node()
def test_add_remove_node_trait(self):
node = self.conn.baremetal.get_node(self.node)
self.assertEqual([], node.traits)
self.conn.baremetal.add_node_trait(self.node, 'CUSTOM_FAKE')
self.assertEqual(['CUSTOM_FAKE'], self.node.traits)
node = self.conn.baremetal.get_node(self.node)
self.assertEqual(['CUSTOM_FAKE'], node.traits)
self.conn.baremetal.add_node_trait(self.node, 'CUSTOM_REAL')
self.assertEqual(sorted(['CUSTOM_FAKE', 'CUSTOM_REAL']),
sorted(self.node.traits))
node = self.conn.baremetal.get_node(self.node)
self.assertEqual(sorted(['CUSTOM_FAKE', 'CUSTOM_REAL']),
sorted(node.traits))
self.conn.baremetal.remove_node_trait(node, 'CUSTOM_FAKE',
ignore_missing=False)
self.assertEqual(['CUSTOM_REAL'], self.node.traits)
node = self.conn.baremetal.get_node(self.node)
self.assertEqual(['CUSTOM_REAL'], node.traits)
def test_set_node_traits(self):
node = self.conn.baremetal.get_node(self.node)
self.assertEqual([], node.traits)
traits1 = ['CUSTOM_FAKE', 'CUSTOM_REAL']
traits2 = ['CUSTOM_FOOBAR']
self.conn.baremetal.set_node_traits(self.node, traits1)
self.assertEqual(sorted(traits1), sorted(self.node.traits))
node = self.conn.baremetal.get_node(self.node)
self.assertEqual(sorted(traits1), sorted(node.traits))
self.conn.baremetal.set_node_traits(self.node, traits2)
self.assertEqual(['CUSTOM_FOOBAR'], self.node.traits)
node = self.conn.baremetal.get_node(self.node)
self.assertEqual(['CUSTOM_FOOBAR'], node.traits)
|
from bgp.models import Relationship
from utils.filters import (
BaseFilterSet,
CreatedUpdatedFilterSet,
NameSlugSearchFilterSet,
)
class RelationshipFilterSet(
BaseFilterSet, CreatedUpdatedFilterSet, NameSlugSearchFilterSet
):
class Meta:
model = Relationship
fields = ["id", "name", "slug"]
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_generation_utils import GenerationTesterMixin
from .test_modeling_common import ModelTesterMixin, ids_tensor
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class OpenAIGPTModelTester:
def __init__(
self,
parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_token_type_ids = True
self.use_labels = True
self.vocab_size = 99
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
self.pad_token_id = self.vocab_size - 1
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = OpenAIGPTConfig(
vocab_size=self.vocab_size,
n_embd=self.hidden_size,
n_layer=self.num_hidden_layers,
n_head=self.num_attention_heads,
# intermediate_size=self.intermediate_size,
# hidden_act=self.hidden_act,
# hidden_dropout_prob=self.hidden_dropout_prob,
# attention_probs_dropout_prob=self.attention_probs_dropout_prob,
n_positions=self.max_position_embeddings,
n_ctx=self.max_position_embeddings,
# type_vocab_size=self.type_vocab_size,
# initializer_range=self.initializer_range
pad_token_id=self.pad_token_id,
)
head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def create_and_check_openai_gpt_model(self, config, input_ids, head_mask, token_type_ids, *args):
model = OpenAIGPTModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_lm_head_model(self, config, input_ids, head_mask, token_type_ids, *args):
model = OpenAIGPTLMHeadModel(config)
model.to(torch_device)
model.eval()
result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_double_lm_head_model(self, config, input_ids, head_mask, token_type_ids, *args):
model = OpenAIGPTDoubleHeadsModel(config)
model.to(torch_device)
model.eval()
result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_openai_gpt_for_sequence_classification(
self, config, input_ids, head_mask, token_type_ids, *args
):
config.num_labels = self.num_labels
model = OpenAIGPTForSequenceClassification(config)
model.to(torch_device)
model.eval()
# print(config.num_labels, sequence_labels.size())
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
result = model(input_ids, token_type_ids=token_type_ids, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class OpenAIGPTModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
all_generative_model_classes = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
# special case for DoubleHeads model
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length),
dtype=torch.long,
device=torch_device,
)
inputs_dict["input_ids"] = inputs_dict["labels"]
inputs_dict["token_type_ids"] = inputs_dict["labels"]
inputs_dict["mc_token_ids"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices),
dtype=torch.long,
device=torch_device,
)
inputs_dict["mc_labels"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
return inputs_dict
def setUp(self):
self.model_tester = OpenAIGPTModelTester(self)
self.config_tester = ConfigTester(self, config_class=OpenAIGPTConfig, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_openai_gpt_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*config_and_inputs)
def test_openai_gpt_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*config_and_inputs)
def test_openai_gpt_double_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*config_and_inputs)
def test_openai_gpt_classification_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = OpenAIGPTModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
class OPENAIGPTModelLanguageGenerationTest(unittest.TestCase):
@slow
def test_lm_generate_openai_gpt(self):
model = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt")
model.to(torch_device)
input_ids = torch.tensor([[481, 4735, 544]], dtype=torch.long, device=torch_device) # the president is
expected_output_ids = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
output_ids = model.generate(input_ids, do_sample=False)
self.assertListEqual(output_ids[0].tolist(), expected_output_ids)
|
"""An observer that returns env's info.
"""
from typing import Dict
from acme.utils.observers import base
import dm_env
import numpy as np
class EnvInfoObserver(base.EnvLoopObserver):
"""An observer that collects and accumulates scalars from env's info."""
def __init__(self):
self._metrics = None
def _accumulate_metrics(self, env: dm_env.Environment) -> None:
if not hasattr(env, 'get_info'):
return
info = getattr(env, 'get_info')()
if not info:
return
for k, v in info.items():
if np.isscalar(v):
self._metrics[k] = self._metrics.get(k, 0) + v
def observe_first(self, env: dm_env.Environment, timestep: dm_env.TimeStep
) -> None:
"""Observes the initial state."""
self._metrics = {}
self._accumulate_metrics(env)
def observe(self, env: dm_env.Environment, timestep: dm_env.TimeStep,
action: np.ndarray) -> None:
"""Records one environment step."""
self._accumulate_metrics(env)
def get_metrics(self) -> Dict[str, base.Number]:
"""Returns metrics collected for the current episode."""
return self._metrics
|
import unittest
import requests_mock
from airflow.models import Connection
from airflow.contrib.hooks.openfaas_hook import OpenFaasHook
from airflow.hooks.base_hook import BaseHook
from airflow import AirflowException
from tests.compat import mock
FUNCTION_NAME = "function_name"
class TestOpenFaasHook(unittest.TestCase):
GET_FUNCTION = "/system/function/"
INVOKE_ASYNC_FUNCTION = "/async-function/"
DEPLOY_FUNCTION = "/system/functions"
UPDATE_FUNCTION = "/system/functions"
def setUp(self):
self.hook = OpenFaasHook(function_name=FUNCTION_NAME)
self.mock_response = {'ans': 'a'}
@mock.patch.object(BaseHook, 'get_connection')
@requests_mock.mock()
def test_is_function_exist_false(self, mock_get_connection, m):
m.get("http://open-faas.io" + self.GET_FUNCTION + FUNCTION_NAME,
json=self.mock_response, status_code=404)
mock_connection = Connection(host="http://open-faas.io")
mock_get_connection.return_value = mock_connection
does_function_exist = self.hook.does_function_exist()
self.assertFalse(does_function_exist)
@mock.patch.object(BaseHook, 'get_connection')
@requests_mock.mock()
def test_is_function_exist_true(self, mock_get_connection, m):
m.get("http://open-faas.io" + self.GET_FUNCTION + FUNCTION_NAME,
json=self.mock_response, status_code=202)
mock_connection = Connection(host="http://open-faas.io")
mock_get_connection.return_value = mock_connection
does_function_exist = self.hook.does_function_exist()
self.assertTrue(does_function_exist)
@mock.patch.object(BaseHook, 'get_connection')
@requests_mock.mock()
def test_update_function_true(self, mock_get_connection, m):
m.put("http://open-faas.io" + self.UPDATE_FUNCTION, json=self.mock_response, status_code=202)
mock_connection = Connection(host="http://open-faas.io")
mock_get_connection.return_value = mock_connection
update_function_ans = self.hook.update_function({})
self.assertEqual(update_function_ans, None)
@mock.patch.object(BaseHook, 'get_connection')
@requests_mock.mock()
def test_update_function_false(self, mock_get_connection, m):
m.put("http://open-faas.io" + self.UPDATE_FUNCTION, json=self.mock_response, status_code=400)
mock_connection = Connection(host="http://open-faas.io")
mock_get_connection.return_value = mock_connection
with self.assertRaises(AirflowException) as context:
self.hook.update_function({})
self.assertIn('failed to update ' + FUNCTION_NAME, str(context.exception))
@mock.patch.object(BaseHook, 'get_connection')
@requests_mock.mock()
def test_invoke_async_function_false(self, mock_get_connection, m):
m.post("http://open-faas.io" + self.INVOKE_ASYNC_FUNCTION + FUNCTION_NAME, json=self.mock_response,
status_code=400)
mock_connection = Connection(host="http://open-faas.io")
mock_get_connection.return_value = mock_connection
with self.assertRaises(AirflowException) as context:
self.hook.invoke_async_function({})
self.assertIn('failed to invoke function', str(context.exception))
@mock.patch.object(BaseHook, 'get_connection')
@requests_mock.mock()
def test_invoke_async_function_true(self, mock_get_connection, m):
m.post("http://open-faas.io" + self.INVOKE_ASYNC_FUNCTION + FUNCTION_NAME, json=self.mock_response,
status_code=202)
mock_connection = Connection(host="http://open-faas.io")
mock_get_connection.return_value = mock_connection
self.assertEqual(self.hook.invoke_async_function({}), None)
@mock.patch.object(BaseHook, 'get_connection')
@requests_mock.mock()
def test_deploy_function_function_already_exist(self, mock_get_connection, m):
m.put("http://open-faas.io/" + self.UPDATE_FUNCTION, json=self.mock_response, status_code=202)
mock_connection = Connection(host="http://open-faas.io/")
mock_get_connection.return_value = mock_connection
self.assertEqual(self.hook.deploy_function(True, {}), None)
@mock.patch.object(BaseHook, 'get_connection')
@requests_mock.mock()
def test_deploy_function_function_not_exist(self, mock_get_connection, m):
m.post("http://open-faas.io" + self.DEPLOY_FUNCTION, json={}, status_code=202)
mock_connection = Connection(host="http://open-faas.io")
mock_get_connection.return_value = mock_connection
self.assertEqual(self.hook.deploy_function(False, {}), None)
if __name__ == '__main__':
unittest.main()
|
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
import uuid
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('webframe', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='preference',
options={'permissions': (('browse_config', 'Can browse system configuration'), ('browse_preference', 'Can browse other preferences'))},
),
migrations.RemoveField(
model_name='preference',
name='user',
),
migrations.AddField(
model_name='preference',
name='enabled',
field=models.BooleanField(default=True, help_text='ValueObject.enabled.helptext', verbose_name='ValueObject.enabled'),
),
migrations.AddField(
model_name='preference',
name='lmb',
field=models.ForeignKey(blank=True, help_text='ValueObject.lmb.helptext', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='preference_lmd', to=settings.AUTH_USER_MODEL, verbose_name='ValueObject.lmb'),
),
migrations.AddField(
model_name='preference',
name='lmd',
field=models.DateTimeField(auto_now=True, default=datetime.datetime(2016, 10, 8, 6, 13, 28, 527051, tzinfo=utc), help_text='ValueObject.lmd.helptext', verbose_name='ValueObject.lmd'),
preserve_default=False,
),
migrations.AddField(
model_name='preference',
name='owner',
field=models.ForeignKey(blank=True, help_text='Preference.owner.helptext', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='preference_owner', to=settings.AUTH_USER_MODEL, verbose_name='Preference.owner'),
),
migrations.AlterField(
model_name='preference',
name='id',
field=models.UUIDField(default=uuid.uuid4, editable=False, help_text='Preference.id.helptext', primary_key=True, serialize=False, verbose_name='Preference.id'),
),
migrations.AlterField(
model_name='preference',
name='name',
field=models.CharField(help_text='Preference.name.helptext', max_length=100, verbose_name='Preference.name'),
),
migrations.AlterField(
model_name='preference',
name='parent',
field=models.ForeignKey(blank=True, help_text='Preference.parent.helptext', null=True, on_delete=django.db.models.deletion.CASCADE, to='webframe.Preference', verbose_name='Preference.parent'),
),
migrations.AlterField(
model_name='preference',
name='sequence',
field=models.FloatField(default=0.5, help_text='Preference.sequence.helptext', verbose_name='Preference.sequence'),
),
migrations.AlterField(
model_name='preference',
name='value',
field=models.CharField(help_text='Preference.value.helptext', max_length=1024, verbose_name='Preference.value'),
),
]
|
"""Implements RigL."""
import gin
from rigl.rigl_tf2 import utils
import tensorflow as tf
def get_all_layers(model, filter_fn=lambda _: True):
"""Gets all layers of a model and layers of a layer if it is a keras.Model."""
all_layers = []
for l in model.layers:
if hasattr(l, 'layers'):
all_layers.extend(get_all_layers(l, filter_fn=filter_fn))
elif filter_fn(l):
all_layers.append(l)
return all_layers
def is_pruned(layer):
return isinstance(layer, utils.PRUNING_WRAPPER) and layer.trainable
class MaskUpdater(object):
"""Base class for mask update algorithms.
Attributes:
model: tf.keras.Model
optimizer: tf.train.Optimizer
use_stateless: bool, if True stateless operations are used. This is
important for multi-worker jobs not to diverge.
stateless_seed_offset: int, added to the seed of stateless operations.
Use this to create randomness without divergence across workers.
"""
def __init__(self, model, optimizer, use_stateless=True,
stateless_seed_offset=0, loss_fn=None):
self._model = model
self._optimizer = optimizer
self._use_stateless = use_stateless
self._stateless_seed_offset = stateless_seed_offset
self._loss_fn = loss_fn
self.val_x = self.val_y = None
def prune_masks(self, prune_fraction):
"""Updates a fraction of weights in each layer."""
all_masks, all_vars = self.get_vars_and_masks()
drop_scores = self.get_drop_scores(all_vars, all_masks)
grow_score = None
for mask, var, drop_score in zip(all_masks, all_vars, drop_scores):
self.generic_mask_update(mask, var, drop_score, grow_score,
prune_fraction)
def update_masks(self, drop_fraction):
"""Updates a fraction of weights in each layer."""
all_masks, all_vars = self.get_vars_and_masks()
drop_scores = self.get_drop_scores(all_vars, all_masks)
grow_scores = self.get_grow_scores(all_vars, all_masks)
for mask, var, drop_score, grow_score in zip(all_masks, all_vars,
drop_scores, grow_scores):
self.generic_mask_update(mask, var, drop_score, grow_score, drop_fraction)
def get_all_pruning_layers(self):
"""Returns all pruned layers from the model."""
if hasattr(self._model, 'layers'):
return get_all_layers(self._model, filter_fn=is_pruned)
else:
return [self._model] if is_pruned(self._model) else []
def get_vars_and_masks(self):
"""Gets all masked variables and corresponding masks."""
all_masks = []
all_vars = []
for layer in self.get_all_pruning_layers():
for var, mask, _ in layer.pruning_vars:
all_vars.append(var)
all_masks.append(mask)
return all_masks, all_vars
def get_drop_scores(self, all_vars, all_masks):
raise NotImplementedError
def get_grow_scores(self, all_vars, all_masks):
raise NotImplementedError
def generic_mask_update(self, mask, var, score_drop, score_grow,
drop_fraction, reinit_when_same=False):
"""Prunes+grows connections, all tensors same shape."""
n_total = tf.size(score_drop)
n_ones = tf.cast(tf.reduce_sum(mask), dtype=tf.int32)
n_prune = tf.cast(
tf.cast(n_ones, dtype=tf.float32) * drop_fraction, tf.int32)
n_keep = n_ones - n_prune
# Sort the entire array since the k needs to be constant for TPU.
_, sorted_indices = tf.math.top_k(
tf.reshape(score_drop, [-1]), k=n_total)
sorted_indices_ex = tf.expand_dims(sorted_indices, 1)
# We will have zeros after having `n_keep` many ones.
new_values = tf.where(
tf.range(n_total) < n_keep,
tf.ones_like(sorted_indices, dtype=mask.dtype),
tf.zeros_like(sorted_indices, dtype=mask.dtype))
mask1 = tf.scatter_nd(sorted_indices_ex, new_values,
new_values.shape)
if score_grow is not None:
# Flatten the scores.
score_grow = tf.reshape(score_grow, [-1])
# Set scores of the enabled connections(ones) to min(s) - 1, so that they
# have the lowest scores.
score_grow_lifted = tf.where(
tf.math.equal(mask1, 1),
tf.ones_like(mask1) * (tf.reduce_min(score_grow) - 1), score_grow)
_, sorted_indices = tf.math.top_k(score_grow_lifted, k=n_total)
sorted_indices_ex = tf.expand_dims(sorted_indices, 1)
new_values = tf.where(
tf.range(n_total) < n_prune,
tf.ones_like(sorted_indices, dtype=mask.dtype),
tf.zeros_like(sorted_indices, dtype=mask.dtype))
mask2 = tf.scatter_nd(sorted_indices_ex, new_values, new_values.shape)
# Ensure masks are disjoint.
tf.debugging.assert_near(tf.reduce_sum(mask1 * mask2), 0.)
# Let's set the weights of the growed connections.
mask2_reshaped = tf.reshape(mask2, mask.shape)
# Set the values of the new connections.
grow_tensor = tf.zeros_like(var, dtype=var.dtype)
if reinit_when_same:
# If dropped and grown, we re-initialize.
new_connections = tf.math.equal(mask2_reshaped, 1)
else:
new_connections = tf.math.logical_and(
tf.math.equal(mask2_reshaped, 1), tf.math.equal(mask, 0))
new_weights = tf.where(new_connections, grow_tensor, var)
var.assign(new_weights)
# Ensure there is no momentum value for new connections
self.reset_momentum(var, new_connections)
mask_combined = tf.reshape(mask1 + mask2, mask.shape)
else:
mask_combined = tf.reshape(mask1, mask.shape)
mask.assign(mask_combined)
def reset_momentum(self, var, new_connections):
for s_name in self._optimizer.get_slot_names():
# Momentum variable for example, we reset the aggregated values to zero.
optim_var = self._optimizer.get_slot(var, s_name)
new_values = tf.where(new_connections,
tf.zeros_like(optim_var), optim_var)
optim_var.assign(new_values)
def _random_uniform(self, *args, **kwargs):
if self._use_stateless:
c_seed = self._stateless_seed_offset + kwargs['seed']
kwargs['seed'] = tf.cast(
tf.stack([c_seed, self._optimizer.iterations]), tf.int32)
return tf.random.stateless_uniform(*args, **kwargs)
else:
return tf.random.uniform(*args, **kwargs)
def _random_normal(self, *args, **kwargs):
if self._use_stateless:
c_seed = self._stateless_seed_offset + kwargs['seed']
kwargs['seed'] = tf.cast(
tf.stack([c_seed, self._optimizer.iterations]), tf.int32)
return tf.random.stateless_normal(*args, **kwargs)
else:
return tf.random.normal(*args, **kwargs)
def set_validation_data(self, val_x, val_y):
self.val_x, self.val_y = val_x, val_y
def _get_gradients(self, all_vars):
"""Returns the gradients of the given weights using the validation data."""
with tf.GradientTape() as tape:
batch_loss = self._loss_fn(self.val_x, self.val_y)
grads = tape.gradient(batch_loss, all_vars)
if grads:
grads = tf.distribute.get_replica_context().all_reduce('sum', grads)
return grads
class SET(MaskUpdater):
"""Implementation of dynamic sparsity optimizers.
Implementation of SET.
See https://www.nature.com/articles/s41467-018-04316-3
This optimizer wraps a regular optimizer and performs updates on the masks
according to schedule given.
"""
def get_drop_scores(self, all_vars, all_masks, noise_std=0):
def score_fn(mask, var):
score = tf.math.abs(mask*var)
if noise_std != 0:
score += self._random_normal(
score.shape, stddev=noise_std, dtype=score.dtype,
seed=(hash(var.name + 'drop')))
return score
return [score_fn(mask, var) for mask, var in zip(all_masks, all_vars)]
def get_grow_scores(self, all_vars, all_masks):
return [self._random_uniform(var.shape, seed=hash(var.name + 'grow'))
for var in all_vars]
class RigL(MaskUpdater):
"""Implementation of dynamic sparsity optimizers.
Implementation of RigL.
"""
def get_drop_scores(self, all_vars, all_masks, noise_std=0):
def score_fn(mask, var):
score = tf.math.abs(mask*var)
if noise_std != 0:
score += self._random_normal(
score.shape, stddev=noise_std, dtype=score.dtype,
seed=(hash(var.name + 'drop')))
return score
return [score_fn(mask, var) for mask, var in zip(all_masks, all_vars)]
def get_grow_scores(self, all_vars, all_masks):
return [tf.abs(g) for g in self._get_gradients(all_vars)]
class RigLInverted(RigL):
"""Implementation of dynamic sparsity optimizers.
Implementation of RigL.
"""
def get_grow_scores(self, all_vars, all_masks):
return [-tf.abs(g) for g in self._get_gradients(all_vars)]
class UpdateSchedule(object):
"""Base class for mask update algorithms.
Attributes:
mask_updater: MaskUpdater, to invoke.
update_freq: int, frequency of mask updates.
init_drop_fraction: float, initial drop fraction.
"""
def __init__(self, mask_updater, init_drop_fraction, update_freq,
last_update_step):
self._mask_updater = mask_updater
self.update_freq = update_freq
self.last_update_step = last_update_step
self.init_drop_fraction = init_drop_fraction
self.last_drop_fraction = 0
def get_drop_fraction(self, step):
raise NotImplementedError
def is_update_iter(self, step):
"""Returns true if it is a valid mask update step."""
# last_update_step < 0 means, there is no last step.
# last_update_step = 0 means, never update.
tf.debugging.Assert(step >= 0, [step])
if self.last_update_step < 0:
is_valid_step = True
elif self.last_update_step == 0:
is_valid_step = False
else:
is_valid_step = step <= self.last_update_step
return tf.logical_and(is_valid_step, step % self.update_freq == 0)
def update(self, step, check_update_iter=True):
if check_update_iter:
tf.debugging.Assert(self.is_update_iter(step), [step])
self.last_drop_fraction = self.get_drop_fraction(step)
def true_fn():
self._mask_updater.update_masks(self.last_drop_fraction)
tf.cond(self.last_drop_fraction > 0., true_fn, lambda: None)
def prune(self, prune_fraction):
self.last_drop_fraction = prune_fraction
self._mask_updater.prune_masks(self.last_drop_fraction)
def set_validation_data(self, val_x, val_y):
self._mask_updater.set_validation_data(val_x, val_y)
class ConstantUpdateSchedule(UpdateSchedule):
"""Updates a constant fraction of connections."""
def get_drop_fraction(self, step):
return self.init_drop_fraction
class CosineUpdateSchedule(UpdateSchedule):
"""Updates a constant fraction of connections."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._drop_fraction_fn = tf.keras.experimental.CosineDecay(
self.init_drop_fraction,
self.last_update_step,
alpha=0.0,
name='cosine_drop_fraction')
def get_drop_fraction(self, step):
return self._drop_fraction_fn(step)
class ScaledLRUpdateSchedule(UpdateSchedule):
"""Scales the drop fraction with learning rate."""
def __init__(self, mask_updater, init_drop_fraction, update_freq,
last_update_step, optimizer):
self._optimizer = optimizer
self._initial_lr = self._get_lr(0)
super(ScaledLRUpdateSchedule, self).__init__(
mask_updater, init_drop_fraction, update_freq, last_update_step)
def _get_lr(self, step):
if isinstance(self._optimizer.lr, tf.Variable):
return self._optimizer.lr.numpy()
else:
return self._optimizer.lr(step)
def get_drop_fraction(self, step):
current_lr = self._get_lr(step)
return (self.init_drop_fraction / self._initial_lr) * current_lr
@gin.configurable(
'mask_updater',
allowlist=[
'update_alg',
'schedule_alg',
'update_freq',
'init_drop_fraction',
'last_update_step',
'use_stateless',
])
def get_mask_updater(
model,
optimizer,
loss_fn,
update_alg='',
schedule_alg='lr',
update_freq=100,
init_drop_fraction=0.3,
last_update_step=-1,
use_stateless=True):
"""Retrieves the update algorithm and passes it to the schedule object."""
if not update_alg:
return None
elif update_alg == 'set':
mask_updater = SET(model, optimizer, use_stateless=use_stateless)
elif update_alg == 'rigl':
mask_updater = RigL(
model, optimizer, loss_fn=loss_fn, use_stateless=use_stateless)
elif update_alg == 'rigl_inverted':
mask_updater = RigLInverted(
model, optimizer, loss_fn=loss_fn, use_stateless=use_stateless)
else:
raise ValueError('update_alg:%s is not valid.' % update_alg)
if schedule_alg == 'lr':
update_schedule = ScaledLRUpdateSchedule(
mask_updater, init_drop_fraction, update_freq, last_update_step,
optimizer)
elif schedule_alg == 'cosine':
update_schedule = CosineUpdateSchedule(
mask_updater, init_drop_fraction, update_freq, last_update_step)
elif schedule_alg == 'constant':
update_schedule = ConstantUpdateSchedule(mask_updater, init_drop_fraction,
update_freq, last_update_step)
else:
raise ValueError('schedule_alg:%s is not valid.' % schedule_alg)
return update_schedule
|
from __future__ import absolute_import, division, print_function, with_statement
from funnel.queue import AsyncManager, SyncManager, Message
from time import time
from tornado.testing import AsyncTestCase
from tornado.ioloop import IOLoop
from funnel.testing import HOST
from unittest import TestCase
class TestAsyncManager(AsyncTestCase):
def get_new_ioloop(self):
return IOLoop.current()
def test_basis(self):
queue = AsyncManager()
self.addCleanup(queue.close_connection)
queue.connect(host=HOST)
counter = {"n": 0}
def on_message(body):
self.assertEqual(body, {"message": "Hello, world!"})
counter["n"] += 1
queue.start_consuming(
on_message,
)
queue.publish({"message": "Hello, world!"}, routing_key=queue.name)
IOLoop.current().add_timeout(time() + 0.2, self.stop)
self.wait()
self.assertEqual(counter["n"], 1)
def test_handling_static_queue_name(self):
queue = AsyncManager(queue="dummy")
self.addCleanup(queue.close_connection)
queue.connect(host=HOST)
self.assertEqual(queue.name, "dummy")
def test_publish_with_not_ready(self):
queue = AsyncManager(queue="dummy")
self.addCleanup(queue.close_connection)
queue.connect(host=HOST)
queue._ready = False
try:
queue.publish(None,None)
except Exception as e:
self.fail("This exception is raised: {}".format(e))
def test_serializer(self):
class SomeObject(object):
def __init__(self, entity):
self.entity = entity
def __repr__(self):
return self.entity
queue = AsyncManager()
self.addCleanup(queue.close_connection)
queue.connect(host=HOST)
counter = {"n": 0}
def on_message(body):
self.assertEqual(body, {"message": "Hello, world!"})
counter["n"] += 1
queue.start_consuming(
on_message,
)
self.assertRaises(TypeError, lambda: queue.publish({"message": SomeObject("Hello, world!")}, routing_key=queue.name))
def serializer(o):
if isinstance(o, SomeObject):
return repr(o)
raise TypeError(repr(o) + " is not JSON serializable")
queue.publish({"message": SomeObject("Hello, world!")}, routing_key=queue.name, serializer=serializer)
IOLoop.current().add_timeout(time() + 0.2, self.stop)
self.wait()
self.assertEqual(counter["n"], 1)
class TestSyncManager(AsyncTestCase):
def test_basis(self):
# manager二つ使っているのでqueue nameは固定にしないと動かない
queue = SyncManager(queue="dummy")
worker_queue = AsyncManager(queue="dummy")
self.addCleanup(queue.close_connection)
# XXX こちらをcloseするとtesting のAsyncWorkerTestCase.doCleanupと競合しているのかエラーになるのでコメントアウト
queue.connect(host=HOST)
worker_queue.connect(host=HOST)
counter = {"n": 0}
def on_message(body):
self.assertEqual(body, {"message": "Hello, world!"})
counter["n"] += 1
worker_queue.start_consuming(
on_message,
)
queue.publish({"message": "Hello, world!"}, routing_key=queue.name)
IOLoop.current().add_timeout(time() + 0.2, self.stop)
self.wait()
self.assertEqual(counter["n"], 1)
def test_publish_with_not_ready(self):
queue = SyncManager(queue="dummy")
self.addCleanup(queue.close_connection)
queue.connect(host=HOST)
queue._ready = False
try:
queue.publish(None,None)
except Exception as e:
self.fail("This exception is raised: {}".format(e))
def test_ready_flag_when_queue_declere_completed(self):
queue = SyncManager(queue="dummy")
worker_queue = AsyncManager(queue="dummy")
self.addCleanup(queue.close_connection)
queue.connect(host=HOST)
worker_queue.connect(host=HOST)
def on_message():
pass
worker_queue.start_consuming(
on_message,
)
queue.publish({"message": "Hello, world!"}, routing_key=queue.name)
IOLoop.current().add_timeout(time() + 0.2, self.stop)
self.wait()
self.assertTrue(queue._ready)
class TestMassage(TestCase):
def test__prepare_body(self):
def dummy_callback():
pass
queue = AsyncManager()
message = Message(queue, dummy_callback)
body = message._prepare_body(b'{}')
self.assertEqual(body, '{}')
|
from __future__ import print_function
from collections import defaultdict, Counter, namedtuple, OrderedDict
import os
import os.path as op
import yaml
import fontforge
from fontaine.cmap import Library
from fontaine.font import FontFactory
from bakery_cli.scripts.vmet import get_metric_view
from bakery_cli.utils import UpstreamDirectory
from bakery_cli.report import utils as report_utils
from bakery_lint.metadata import Metadata
TAB = 'Index'
TEMPLATE_DIR = op.join(op.dirname(__file__), 'templates')
t = lambda templatefile: op.join(TEMPLATE_DIR, templatefile)
def sort(data):
a = []
for grouped_dict in data:
if 'required' in grouped_dict['tags']:
a.append(grouped_dict)
for grouped_dict in data:
if 'note' in grouped_dict['tags'] and 'required' not in grouped_dict['tags']:
a.append(grouped_dict)
for grouped_dict in data:
if 'note' not in grouped_dict['tags'] and 'required' not in grouped_dict['tags']:
a.append(grouped_dict)
return a
def filter_with_tag(fonttestdata, tag):
tests = fonttestdata['failure'] + fonttestdata['error']
return [test for test in tests if tag in test['tags']]
def filter_by_results_with_tag(fonttestdata, tag, *results):
tests = {}
for res in results:
tests[res] = [test for test in fonttestdata.get(res) if tag in test.get('tags', [])]
return tests
def get_fonts_table_sizes(fonts):
""" Returns tuple with available tables from all fonts and their length """
from fontTools.ttLib import sfnt
_fonts = {}
tables = []
for font in fonts:
_fonts[op.basename(font)] = {}
with open(font) as fp_font:
sf = sfnt.SFNTReader(fp_font)
for t in sf.tables:
if t not in tables:
tables.append(t)
_fonts[op.basename(font)][t] = sf.tables[t].length
return tables, _fonts
def get_fonts_table_sizes_grouped(fonts_list):
_, fonts = get_fonts_table_sizes(fonts_list)
fonts_dict = defaultdict(dict, fonts)
# Fonts may have different tables!!!
# across all fonts calculate sum of each table
table_sizes_sums = sum(
(Counter(v) for k, v in fonts_dict.iteritems()), Counter()
)
# count amount of each table across all fonts
tables_counts = sum(
(Counter(v.keys()) for k, v in fonts_dict.iteritems()), Counter()
)
# count average for each table, take value from 'table_sizes_sums'
# and divide by corresponding value from 'tables_counts',
# eg table_sizes_sums['glyf'] / tables_counts['glyf']
tables_mean_dict = {
k: table_sizes_sums[k]/tables_counts[k] for k in table_sizes_sums
}
# calculate deviation (delta) from an average
# for each font and each table in font find delta
tables_delta_dict = {}
for font, tables in fonts_dict.iteritems():
tables_delta_dict[font] = {
k: tables_mean_dict[k]-v for k, v in tables.iteritems()
}
# gather all existent tables from all fonts
all_possible_tables = set()
for font, tables in tables_delta_dict.items():
for table in tables:
if table not in all_possible_tables:
all_possible_tables.add(table)
# if some font does not have a table that others have,
# just set the deviation to 0
for font, tables in tables_delta_dict.items():
for item in all_possible_tables:
tables.setdefault(item, 0)
tables_delta_dict[font] = tables
# make the deviation dict ready for google chart as array
tables_delta_dict_for_google_array = {}
for font, props in tables_delta_dict.iteritems():
tables_delta_dict_for_google_array.setdefault('fonts', []).append(font)
for k, v in props.iteritems():
tables_delta_dict_for_google_array.setdefault(k, []).append(v)
# prepare all tables dict as array for google chart
tables_dict_for_google_array = {}
for font, props in fonts.iteritems():
tables_dict_for_google_array.setdefault('fonts', []).append(font)
for k, v in props.iteritems():
tables_dict_for_google_array.setdefault(k, []).append(v)
grouped_dict = {
'fonts': tables_dict_for_google_array.pop('fonts'),
'tables': [
[k, tables_mean_dict[k]] + v for k, v in tables_dict_for_google_array.items()
]
}
delta_dict = {
'fonts': tables_delta_dict_for_google_array.pop('fonts'),
'tables': [
[k, ] + v for k, v in tables_delta_dict_for_google_array.items()
]
}
# make all arrays to have same len
max_len = len(max(grouped_dict['tables'], key=len))
new_items = []
for item in grouped_dict["tables"]:
new_item = item[:]
while len(new_item) < max_len:
new_item.append(0)
new_items.append(new_item)
grouped_dict["tables"] = new_items
ftable = namedtuple('FontTable', ['mean', 'grouped', 'delta'])
return ftable(tables_mean_dict, grouped_dict, delta_dict)
def get_orthography(fontaineFonts):
fonts_dict = defaultdict(list)
library = Library(collections=['subsets'])
fonts_names = []
for font, fontaine in fontaineFonts:
fonts_names.append(font)
for info in fontaine.get_orthographies(_library=library):
font_info = dict(name=font, support=info.support_level,
coverage=info.coverage,
missing_chars=info.missing)
fonts_dict[info.charmap.common_name].append(font_info)
averages = {}
for subset, fonts in fonts_dict.items():
averages[subset] = sum([font['coverage'] for font in fonts]) / len(fonts)
return sorted(fonts_names), averages, OrderedDict(sorted(fonts_dict.items()))
def to_google_data_list(tdict, haxis=0):
return sorted([[x, tdict[x] - haxis] for x in tdict])
def font_table_to_google_data_list(tdict):
return sorted([list(item) for item in tdict.items()])
def average_table_size(tdict):
return sum(tdict.values()) / len(tdict)
def _obj_to_dict(instance, exclude_attrs=()):
# Very simplified, but enough for reports
return {
k: getattr(instance, k) for k in dir(instance) \
if not any([k.startswith('__'), str(k) in exclude_attrs])
}
def font_factory_instance_to_dict(instance):
return _obj_to_dict(instance, exclude_attrs=(
'get_othography_info', 'get_orthographies', 'orthographies',
'refresh_sfnt_properties', '_fontFace', 'getGlyphNames'
))
def get_stem_info(fontfile, glyph='n'):
ttf = fontforge.open(fontfile)
if ttf.italicangle != 0.0:
style = 'italic'
else:
style = 'normal'
glyph = ttf[glyph]
if not glyph.vhints:
glyph.autoHint()
if glyph.vhints:
v_hints = [item[1] for item in glyph.vhints]
stem = sum(v_hints)/len(v_hints)
else:
stem = None
return {'stem': stem, 'fontname': ttf.fontname, 'style': style,
'weight': ttf.os2_weight}
def generate(config):
if config.get('failed'):
return
directory = UpstreamDirectory(config['path'])
if op.exists(op.join(config['path'], 'METADATA.json.new')):
metadata_file = open(op.join(config['path'], 'METADATA.json.new')).read()
else:
metadata_file = open(op.join(config['path'], 'METADATA.json')).read()
family_metadata = Metadata.get_family_metadata(metadata_file)
faces = []
for f in family_metadata.fonts:
faces.append({'name': f.full_name,
'basename': f.post_script_name,
'path': f.filename,
'meta': f})
metadata = yaml.load(open(op.join(config['path'], 'METADATA.yaml')))
upstreamdata = {}
upstreamdatafile = op.join(config['path'], 'upstream.yaml')
if op.exists(upstreamdatafile):
upstreamdata = yaml.load(open(upstreamdatafile))
data = {}
for fp in directory.BIN:
path = op.join(config['path'], '{}.yaml'.format(fp[:-4]))
if op.exists(path):
data[fp] = yaml.load(open(path))
data.update(metadata)
data.update(upstreamdata)
fontpaths = [op.join(config['path'], path)
for path in directory.BIN]
ttftablesizes = get_fonts_table_sizes(fontpaths)
ftables_data = get_fonts_table_sizes_grouped(fontpaths)
buildstate = yaml.load(open(op.join(config['path'], 'build.state.yaml')))
autohint_sizes = buildstate.get('autohinting_sizes', [])
vmet = get_metric_view(fontpaths)
fonts = [(path, FontFactory.openfont(op.join(config['path'], path)))
for path in directory.BIN]
stems = [get_stem_info(op.join(config['path'], path)) for path in directory.BIN]
new_data = []
for k in data:
d = {'name': k}
d.update(data[k])
new_data.append(d)
report_app = report_utils.BuildInfo(config)
metrics = {'data': vmet._its_metrics, 'headings': vmet._its_metrics_header}
table_sizes = {'tables': ttftablesizes[0], 'sizes': ttftablesizes[1:]}
report_app.summary_page.dump_file(metrics, 'metrics.json')
report_app.summary_page.dump_file(stems, 'stems.json')
report_app.summary_page.dump_file(table_sizes, 'table_sizes.json')
report_app.summary_page.dump_file(autohint_sizes, 'autohint_sizes.json')
report_app.summary_page.dump_file(new_data, 'tests.json')
report_app.summary_page.dump_file({'mean': ftables_data.mean,
'grouped': ftables_data.grouped,
'delta': ftables_data.delta},
'fonts_tables_grouped.json')
for face in family_metadata.fonts:
face_template = "@font-face {{ font-family: {}; src: url(fonts/{});}}\n".format(face.metadata_object['postScriptName'], face.metadata_object['filename'])
report_app.write_file(face_template, op.join(report_app.css_dir, 'faces.css'), mode='a')
fonts_serialized = dict([(str(path), font_factory_instance_to_dict(fontaine)) for path, fontaine in fonts])
report_app.summary_page.dump_file(fonts_serialized, 'fontaine_fonts.json')
#Temporarily remove this broken piece of code
if False:
fonts_orthography = get_orthography(fonts)
report_app.summary_page.dump_file({'fonts_list': fonts_orthography[0],
'coverage_averages': fonts_orthography[1],
'fonts_info': fonts_orthography[2]},
'fonts_orthography.json')
|
import os
import logging
from threading import Lock
from flask import Flask, Response, jsonify, request, make_response, json, url_for, render_template
from flask_api import status # HTTP Status Codes
from flasgger import Swagger
from redis import Redis
from redis.exceptions import ConnectionError
from promotion import Promotion
app = Flask(__name__)
app.config['LOGGING_LEVEL'] = logging.INFO
app.config['SWAGGER'] = {
"swagger_version": "2.0",
"specs": [
{
"version": "1.0.0",
"title": "DevOps Swagger Promotion App",
"description": "This is a Promotion server.",
"endpoint": 'v1_spec',
"route": '/v1/spec'
}
]
}
Swagger(app)
HTTP_200_OK = 200
HTTP_201_CREATED = 201
HTTP_204_NO_CONTENT = 204
HTTP_400_BAD_REQUEST = 400
HTTP_404_NOT_FOUND = 404
HTTP_409_CONFLICT = 409
@app.route('/')
def index():
return render_template('index.html')
#promotion_url = request.base_url + "promotions"
#return make_response(jsonify(name='Promotion REST API Service',version='1.0',url=promotion_url), HTTP_200_OK)
@app.route('/promotions', methods=['GET'])
def list_promotions():
"""
Retrieve a list of all Promotions
This endpoint will return all Promotions unless a query parameter on kind is specified
---
tags:
- Promotions
description: The Promotions endpoint allows you to query Promotion schemes
parameters:
- name: kind
in: query
description: the kind of Promotion scheme you are looking for
required: false
type: string
responses:
200:
description: An array of Promotion schemes
schema:
type: array
items:
schema:
id: Promotion
properties:
id:
type: integer
description: unique id assigned internally by service
name:
type: string
description: the promotion scheme's name
kind:
type: string
description: the kind of Promotion scheme (sales-promotion1, sale-senior-promotion, black-friday-promotion etc.)
description:
type: string
description: the complete detail of the Promotion scheme and the criteria for the promotion.
status:
type: string
description: the status of promotion scheme whether it is currently "Active" or "Inactive"
404:
description: No promotion schemes found.
"""
results = []
kind = request.args.get('kind')
if kind:
result = Promotion.find_by_kind(redis, kind)
else:
result = Promotion.all(redis)
if len(result) > 0:
results = [Promotion.serialize(promotion) for promotion in result]
return make_response(jsonify(results), HTTP_200_OK)
else:
results = { 'error' : 'No promotions found' }
rc = HTTP_404_NOT_FOUND
return make_response(jsonify(results), rc)
@app.route('/promotions/status/active', methods=['GET'])
def list_all_active_promotions():
"""
Retrieve a list of all acitve Promotions
This endpoint will return all active Promotions unless no active Promotions can be found
---
tags:
- Promotions
description: The Promotions endpoint allows you to query Promotion schemes
produces:
- application/json
responses:
200:
description: An array of Promotion schemes
schema:
type: array
items:
schema:
id: Promotion
properties:
id:
type: integer
description: unique id assigned internally by service
name:
type: string
description: the promotion scheme's name
kind:
type: string
description: the kind of Promotion scheme (sales-promotion1, sale-senior-promotion, black-friday-promotion etc.)
description:
type: string
description: the complete detail of the Promotion scheme and the criteria for the promotion.
status:
type: string
description: the status of promotion scheme. Will always be "Active"
404:
description: No promotion schemes found.
"""
results = Promotion.find_by_status(redis, 'ACTIVE')
if len(results) > 0:
result = [Promotion.serialize(promotion) for promotion in results]
rc = HTTP_200_OK
else:
result = { 'error' : 'No active promotions found' }
rc = HTTP_404_NOT_FOUND
return make_response(jsonify(result), rc)
@app.route('/promotions/status/inactive', methods=['GET'])
def list_all_inactive_promotions():
"""
Retrieve a list of all inactive Promotions
This endpoint will return all inactive Promotions
---
tags:
- Promotions
description: The Promotions endpoint allows you to query Promotion schemes
produces:
- application/json
responses:
200:
description: An array of Promotion schemes
schema:
type: array
items:
schema:
id: Promotion
properties:
id:
type: integer
description: unique id assigned internally by service
name:
type: string
description: the promotion scheme's name
kind:
type: string
description: the kind of Promotion scheme (sales-promotion1, sale-senior-promotion, black-friday-promotion etc.)
description:
type: string
description: the complete detail of the Promotion scheme and the criteria for the promotion.
status:
type: string
description: the status of promotion scheme whether it is currently "Active" or "Inactive"
400:
description: No promotion schemes found.
"""
results = Promotion.find_by_status(redis, 'INACTIVE')
if len(results) > 0:
result = [Promotion.serialize(promotion) for promotion in results]
rc = HTTP_200_OK
else:
result = { 'error' : 'No inactive promotions found' }
rc = HTTP_404_NOT_FOUND
return make_response(jsonify(result), rc)
@app.route('/promotions/<int:id>', methods=['GET'])
def get_promotions(id):
"""
Retrieve a single Promotion
This endpoint will return a Promotion based on it's id
---
tags:
- Promotions
produces:
- application/json
parameters:
- name: id
in: path
description: ID of promotion to retrieve
type: integer
required: true
responses:
200:
description: Promotion returned
schema:
id: Promotion
properties:
id:
type: integer
description: unique id assigned internally by service
name:
type: string
description: name for the Promotion scheme
kind:
type: string
description: the kind of Promotion scheme (sales-promotion1, sale-senior-promotion, black-friday-promotion etc.)
description:
type: string
description: the complete detail of the Promotion scheme and the criteria for the promotion.
status:
type: string
description: the status of promotion scheme whether it is currently "Active" or "Inactive"
404:
description: Promotion not found
"""
promotion = Promotion.find(redis, id)
if promotion:
message = promotion.serialize()
rc = HTTP_200_OK
else:
message = { 'error' : 'Promotion with id: %s was not found' % str(id) }
rc = HTTP_404_NOT_FOUND
return make_response(jsonify(message), rc)
@app.route('/promotions/kind/<kind>', methods=['GET'])
def get_promotions_kind(kind):
"""
Retrieve all promotions for one kind
This endpoint will return a Promotion based on it's kind
---
tags:
- Promotions
produces:
- application/json
parameters:
- name: kind
in: path
description: the kind of Promotion scheme you are looking for
type: string
required: true
responses:
200:
description: Promotion returned
schema:
id: Promotion
properties:
id:
type: integer
description: unique id assigned internally by service
name:
type: string
description: name for the Promotion scheme
kind:
type: string
description: the kind of Promotion scheme (sales-promotion1, sale-senior-promotion, black-friday-promotion etc.)
description:
type: string
description: the complete detail of the Promotion scheme and the criteria for the promotion.
status:
type: string
description: the status of promotion scheme whether it is currently "Active" or "Inactive"
404:
description: Promotion not found
"""
results = Promotion.find_by_kind(redis, kind.upper())
if len(results) > 0:
result = [Promotion.serialize(promotion) for promotion in results]
rc = HTTP_200_OK
else:
result = { 'error' : 'Promotion with kind: %s was not found' % str(kind) }
rc = HTTP_404_NOT_FOUND
return make_response(jsonify(result), rc)
@app.route('/promotions/<int:id>/cancel', methods=['PUT'])
def cancel_promotions(id):
"""
Cancel a single Promotion
This endpoint will set the status of promotion as Inactive on success or return an error message if promotion is not found.
---
tags:
- Promotions
parameters:
- name: id
in: path
description: ID of promotion to cancel
type: integer
required: true
responses:
200:
description: success message, 'Cancelled the promotion with given id'
404:
description: error message, 'Promotion with given id was not found'
"""
promotion = Promotion.find(redis, id)
if promotion:
promotion = Promotion.cancel_by_id(redis,id)
promotion.save(redis)
message = {'Success' : 'Cancelled the Promotion with id ' + str(id)}
rc = HTTP_200_OK
else:
message = { 'error' : 'Promotion %s was not found' % id }
rc = HTTP_404_NOT_FOUND
return make_response(jsonify(message), rc)
@app.route('/promotions', methods=['POST'])
def create_promotions():
"""
Creates a Promotion
This endpoint will create a Promotion scheme based the data in the body that is posted
---
tags:
- Promotions
consumes:
- application/json
produces:
- application/json
parameters:
- in: body
name: body
required: true
schema:
id: data
required:
- name
- kind
- description
properties:
name:
type: string
description: name for the Promotion scheme
kind:
type: string
description: the kind of Promotion scheme (sales-promotion1, sale-senior-promotion, black-friday-promotion etc.)
description:
type: string
description: the complete detail of the Promotion scheme and the criteria for the promotion.
responses:
201:
description: Promotion created
schema:
id: Promotion
properties:
id:
type: integer
description: unique id assigned internally by service
name:
type: string
description: name for the Promotion scheme
kind:
type: string
description: the kind of Promotion scheme (sales-promotion1, sale-senior-promotion, black-friday-promotion etc.)
description:
type: string
description: the complete detail of the Promotion scheme and the criteria for the promotion.
status:
type: string
description: the status of promotion scheme with the value "Active"
400:
description: Bad Request (the posted data was not valid)
"""
id = 0
payload = request.get_json()
print payload
if Promotion.validate(payload):
promotion = Promotion(id, payload['name'], payload['description'], payload['kind'], 'Active')
promotion.save(redis)
id = promotion.id
message = promotion.serialize()
rc = HTTP_201_CREATED
else:
message = { 'error' : 'Data is not valid' }
rc = HTTP_400_BAD_REQUEST
response = make_response(jsonify(message), rc)
if rc == HTTP_201_CREATED:
response.headers['Location'] = url_for('get_promotions', id=id)
return response
@app.route('/promotions/<int:id>', methods=['PUT'])
def update_promotions(id):
"""
Update a Promotion
This endpoint will update a Promotion based the body that is posted
---
tags:
- Promotions
consumes:
- application/json
produces:
- application/json
parameters:
- name: id
in: path
description: ID of promotion to retrieve
type: integer
required: true
- in: body
name: body
schema:
id: data
required:
- name
- kind
- description
properties:
name:
type: string
description: name for the Promotion scheme
kind:
type: string
description: the kind of Promotion scheme (sales-promotion1, sale-senior-promotion, black-friday-promotion etc.)
description:
type: string
description: the complete detail of the Promotion scheme and the criteria for the promotion.
responses:
200:
description: Promotion Updated
schema:
id: Promotion
properties:
id:
type: integer
description: unique id assigned internally by service
name:
type: string
description: name for the Promotion scheme
kind:
type: string
description: the kind of Promotion scheme (sales-promotion1, sale-senior-promotion, black-friday-promotion etc.)
description:
type: string
description: the complete detail of the Promotion scheme and the criteria for the promotion.
status:
type: string
description: the status of promotion scheme whether it is currently "Active" or "Inactive"
400:
description: Bad Request (the posted data was not valid)
"""
promotion = Promotion.find(redis, id)
if promotion:
payload = request.get_json()
print 'payload is',payload
if Promotion.validate(payload):
promotion = Promotion(id, payload['name'], payload['description'], payload['kind'], promotion.status)
promotion.save(redis)
message = promotion.serialize()
rc = HTTP_200_OK
else:
message = { 'error' : 'Promotion data was not valid' }
rc = HTTP_400_BAD_REQUEST
else:
message = { 'error' : 'Promotion %s was not found' % id }
rc = HTTP_404_NOT_FOUND
return make_response(jsonify(message), rc)
@app.route('/promotions/<int:id>', methods=['DELETE'])
def delete_promotions(id):
"""
Delete a single Promotion
This endpoint will return an empty response and delete the promotion in database
---
tags:
- Promotions
parameters:
- name: id
in: path
description: ID of promotion to delete
type: integer
required: true
responses:
204:
description: no content
"""
promotion = Promotion.find(redis, id)
if promotion:
promotion.delete(redis)
return make_response('', HTTP_204_NO_CONTENT)
def data_load(payload):
promotion = Promotion(0, payload['name'], payload['description'], payload['kind'],payload['status'])
promotion.save(redis)
def data_reset():
redis.flushall()
def connect_to_redis(hostname, port, password):
redis = Redis(host=hostname, port=port, password=password)
try:
redis.ping()
except ConnectionError:
redis = None
return redis
def inititalize_redis():
global redis
redis = None
# Get the crdentials from the Bluemix environment
if 'VCAP_SERVICES' in os.environ:
app.logger.info("Using VCAP_SERVICES...")
VCAP_SERVICES = os.environ['VCAP_SERVICES']
services = json.loads(VCAP_SERVICES)
creds = services['rediscloud'][0]['credentials']
app.logger.info("Conecting to Redis on host %s port %s" % (creds['hostname'], creds['port']))
redis = connect_to_redis(creds['hostname'], creds['port'], creds['password'])
else:
app.logger.info("VCAP_SERVICES not found, checking localhost for Redis")
redis = connect_to_redis('127.0.0.1', 6379, None)
if not redis:
app.logger.info("No Redis on localhost, using: redis")
redis = connect_to_redis('redis', 6379, None)
if not redis:
# if you end up here, redis instance is down.
app.logger.error('*** FATAL ERROR: Could not connect to the Redis Service')
exit(1)
debug = (os.getenv('DEBUG', 'False') == 'True')
inititalize_redis()
port = os.getenv('PORT', '5000')
if __name__ == "__main__":
# Pull options from environment
app.run(host='0.0.0.0', port=int(port), debug=debug)
|
"""A Billing Account Resource."""
import json
from google.cloud.forseti.common.gcp_type import resource
class BillingAccountLifecycleState(resource.LifecycleState):
"""Represents the Billing Account's LifecycleState."""
pass
class BillingAccount(resource.Resource):
"""BillingAccount Resource."""
RESOURCE_NAME_FMT = 'billingAccounts/%s'
def __init__(
self,
billing_account_id,
full_name=None,
data=None,
name=None,
display_name=None,
parent=None,
lifecycle_state=BillingAccountLifecycleState.UNSPECIFIED):
"""Initialize.
Args:
billing_account_id (str): The billing account id.
full_name (str): The full resource name and ancestory.
data (str): Resource representation of the billing account.
name (str): The billing account's unique GCP name, with the format
"billingAccounts/{id}".
display_name (str): The billing account's display name.
parent (Resource): The parent Resource.
lifecycle_state (LifecycleState): The billing accounts's lifecycle
state.
"""
super(BillingAccount, self).__init__(
resource_id=billing_account_id,
resource_type=resource.ResourceType.BILLING_ACCOUNT,
name=name,
display_name=display_name,
parent=parent,
lifecycle_state=lifecycle_state)
self.full_name = full_name
self.data = data
@classmethod
def from_json(cls, parent, json_string):
"""Creates a billing account from a JSON string.
Args:
parent (Resource): resource this billing account belongs to.
json_string (str): JSON string of a billing account GCP resource.
Returns:
BillingAccount: billing account resource.
"""
acct_dict = json.loads(json_string)
name = acct_dict['name']
acct_id = name.split('/')[-1]
full_name = '{}billing_account/{}/'.format(parent.full_name, acct_id)
return cls(
billing_account_id=acct_id,
full_name=full_name,
data=json_string,
name=name,
display_name=acct_dict.get('displayName'),
parent=parent)
|
from muntjac.data.item import \
IItem, IPropertySetChangeEvent, IPropertySetChangeNotifier, \
IPropertySetChangeListener
from muntjac.util import EventObject
class PropertysetItem(IItem, IPropertySetChangeNotifier): # Cloneable
"""Class for handling a set of identified Properties. The elements
contained in a C{MapItem} can be referenced using locally unique
identifiers. The class supports listeners who are interested in changes
to the Property set managed by the class.
@author: Vaadin Ltd.
@author: Richard Lincoln
@version: @VERSION@
"""
def __init__(self):
# Mapping from property id to property.
self._map = dict()
# List of all property ids to maintain the order.
self._list = list()
# List of property set modification listeners.
self._propertySetChangeListeners = list()
self._propertySetChangeCallbacks = dict()
def getItemProperty(self, idd):
"""Gets the Property corresponding to the given Property ID stored in
the Item. If the Item does not contain the Property, C{None} is
returned.
@param idd: the identifier of the Property to get.
@return: the Property with the given ID or C{None}
"""
return self._map.get(idd)
def getItemPropertyIds(self):
"""Gets the collection of IDs of all Properties stored in the Item.
@return: collection containing IDs of the Properties
stored the Item
"""
return list(self._list)
def removeItemProperty(self, idd):
"""Removes the Property identified by ID from the Item. This
functionality is optional. If the method is not implemented, the
method always returns C{False}.
@param idd: the ID of the Property to be removed.
@return: C{True} if the operation succeeded C{False} if not
"""
# Cant remove missing properties
if idd not in self._map:
return False
del self._map[idd]
self._list.remove(idd)
# Send change events
self.fireItemPropertySetChange()
return True
def addItemProperty(self, idd, prop):
"""Tries to add a new Property into the Item.
@param id:
the ID of the new Property.
@param prop:
the Property to be added and associated with the id.
@return: C{True} if the operation succeeded, C{False} if not
"""
# Null ids are not accepted
if idd is None:
raise ValueError, 'Item property id can not be null'
# Cant add a property twice
if idd in self._map:
return False
# Put the property to map
self._map[idd] = prop
self._list.append(idd)
# Send event
self.fireItemPropertySetChange()
return True
def __str__(self):
"""Gets the string representation of the contents of the Item.
The format of the string is a space separated catenation of the
string representations of the Properties contained by the Item.
@return: String representation of the Item contents
"""
retValue = ''
for i, propertyId in enumerate(self.getItemPropertyIds()):
retValue += str( self.getItemProperty(propertyId) )
if i < len(self.getItemPropertyIds()) - 1:
retValue += ' '
return retValue
def addListener(self, listener, iface=None):
"""Registers a new property set change listener for this Item.
@param listener: the new Listener to be registered.
"""
if (isinstance(listener, IPropertySetChangeListener) and
(iface is None or
issubclass(iface, IPropertySetChangeListener))):
self._propertySetChangeListeners.append(listener)
def addCallback(self, callback, eventType=None, *args):
if eventType is None:
eventType = callback._eventType
if issubclass(eventType, IPropertySetChangeEvent):
self._propertySetChangeCallbacks[callback] = args
else:
super(PropertysetItem, self).addCallback(callback,
eventType, *args)
def removeListener(self, listener, iface=None):
"""Removes a previously registered property set change listener.
@param listener: the Listener to be removed.
"""
if (isinstance(listener, IPropertySetChangeListener) and
(iface is None or
issubclass(iface, IPropertySetChangeListener))):
if listener in self._propertySetChangeListeners:
self._propertySetChangeListeners.remove(listener)
def removeCallback(self, callback, eventType=None):
if eventType is None:
eventType = callback._eventType
if issubclass(eventType, IPropertySetChangeEvent):
if callback in self._propertySetChangeCallbacks:
del self._propertySetChangeCallbacks[callback]
else:
super(PropertysetItem, self).removeCallback(callback, eventType)
def fireItemPropertySetChange(self):
"""Sends a Property set change event to all interested listeners."""
event = PropertySetChangeEvent(self)
for listener in self._propertySetChangeListeners:
listener.itemPropertySetChange(event)
for callback, args in self._propertySetChangeCallbacks.iteritems():
callback(event, *args)
def getListeners(self, eventType):
if issubclass(eventType, IPropertySetChangeEvent):
return list(self._propertySetChangeListeners)
return list()
def getCallbacks(self, eventType):
if issubclass(eventType, IPropertySetChangeEvent):
return dict(self._propertySetChangeCallbacks)
return dict()
def clone(self):
"""Creates and returns a copy of this object.
The method C{clone} performs a shallow copy of the C{PropertysetItem}.
Note: All arrays are considered to implement the interface Cloneable.
Otherwise, this method creates a new instance of the class of this
object and initializes all its fields with exactly the contents of the
corresponding fields of this object, as if by assignment, the contents
of the fields are not themselves cloned. Thus, this method performs a
"shallow copy" of this object, not a "deep copy" operation.
@raise CloneNotSupportedException:
if the object's class does not support the Cloneable
interface.
"""
npsi = PropertysetItem()
npsi.list = list(self._list) if self._list is not None else None
npsi.propertySetChangeListeners = list(self._propertySetChangeListeners)
npsi.map = self._map.copy()
return npsi
def __eq__(self, obj):
if (obj is None) or (not isinstance(obj, PropertysetItem)):
return False
other = obj
if other._list != self._list:
if other._list is None:
return False
if not (other._list == self._list):
return False
if other._map != self._map:
if other._map is None:
return False
if other._map != self._map:
return False
if other._propertySetChangeListeners != self._propertySetChangeListeners:
thisEmpty = ((self._propertySetChangeListeners is None)
or len(self._propertySetChangeListeners) == 0)
otherEmpty = ((other.propertySetChangeListeners is None)
or len(other.propertySetChangeListeners) == 0)
if thisEmpty and otherEmpty:
return True
if otherEmpty:
return False
if (other.propertySetChangeListeners !=
self._propertySetChangeListeners):
return False
return True
def __hash__(self):
return (((0 if self._list is None else hash(self._list))
^ (0 if self._map is None else hash(self._map)))
^ (0 if (self._propertySetChangeListeners is None)
or (len(self._propertySetChangeListeners) == 0)
else hash(self._propertySetChangeListeners)))
class PropertySetChangeEvent(EventObject, IItem, IPropertySetChangeEvent):
"""An C{event} object specifying an Item whose Property set has
changed.
@author: Vaadin Ltd.
@version: @VERSION@
"""
def __init__(self, source):
super(PropertySetChangeEvent, self).__init__(source)
def getItem(self):
"""Gets the Item whose Property set has changed.
@return: source object of the event as an C{Item}
"""
return self.getSource()
|
from fabric.api import *
@task
def js():
"""
update jumpscale
"""
run("jscode update -a jumpscale -r jp_jumpscale,jp_serverapps,jumpscale_core")
|
import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, json_output
@click.command('update_dataset_collection')
@click.argument("history_id", type=str)
@click.argument("dataset_collection_id", type=str)
@click.option(
"--deleted",
help="Mark or unmark history dataset collection as deleted",
is_flag=True
)
@click.option(
"--name",
help="Replace history dataset collection name with the given string",
type=str
)
@click.option(
"--visible",
help="Mark or unmark history dataset collection as visible",
is_flag=True
)
@pass_context
@custom_exception
@json_output
def cli(ctx, history_id, dataset_collection_id, deleted=None, name=None, visible=None):
"""Update history dataset collection metadata. Some of the attributes that can be modified are documented below.
Output:
the updated dataset collection attributes
.. versionchanged:: 0.8.0
Changed the return value from the status code (type int) to a dict.
"""
kwargs = {}
return ctx.gi.histories.update_dataset_collection(history_id, dataset_collection_id, **kwargs)
|
import collections
from heat.engine import clients
from heat.common import exception
from heat.common import template_format
from heat.engine import parser
from heat.engine import resource
from heat.engine import scheduler
from heat.tests.common import HeatTestCase
from heat.tests.fakes import FakeKeystoneClient
from heat.tests.v1_1 import fakes
from heat.tests import utils
from novaclient.v1_1 import security_groups as nova_sg
from novaclient.v1_1 import security_group_rules as nova_sgr
from neutronclient.common.exceptions import NeutronClientException
from neutronclient.v2_0 import client as neutronclient
NovaSG = collections.namedtuple('NovaSG',
' '.join([
'name',
'id',
'rules',
'description',
]))
class SecurityGroupTest(HeatTestCase):
test_template_nova = '''
HeatTemplateFormatVersion: '2012-12-12'
Resources:
the_sg:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: HTTP and SSH access
SecurityGroupIngress:
- IpProtocol: tcp
FromPort: "22"
ToPort: "22"
CidrIp: 0.0.0.0/0
- IpProtocol: tcp
FromPort : "80"
ToPort : "80"
CidrIp : 0.0.0.0/0
- IpProtocol: tcp
SourceSecurityGroupName: test
- IpProtocol: icmp
SourceSecurityGroupId: "1"
'''
test_template_nova_with_egress = '''
HeatTemplateFormatVersion: '2012-12-12'
Resources:
the_sg:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: HTTP and SSH access
SecurityGroupEgress:
- IpProtocol: tcp
FromPort: "22"
ToPort: "22"
CidrIp: 0.0.0.0/0
'''
test_template_neutron = '''
HeatTemplateFormatVersion: '2012-12-12'
Resources:
the_sg:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: HTTP and SSH access
VpcId: aaaa
SecurityGroupIngress:
- IpProtocol: tcp
FromPort: "22"
ToPort: "22"
CidrIp: 0.0.0.0/0
- IpProtocol: tcp
FromPort : "80"
ToPort : "80"
CidrIp : 0.0.0.0/0
- IpProtocol: tcp
SourceSecurityGroupId: wwww
SecurityGroupEgress:
- IpProtocol: tcp
FromPort: "22"
ToPort: "22"
CidrIp: 10.0.1.0/24
- SourceSecurityGroupName: xxxx
'''
def setUp(self):
super(SecurityGroupTest, self).setUp()
self.fc = fakes.FakeClient()
self.m.StubOutWithMock(clients.OpenStackClients, 'nova')
self.m.StubOutWithMock(clients.OpenStackClients, 'keystone')
self.m.StubOutWithMock(nova_sgr.SecurityGroupRuleManager, 'create')
self.m.StubOutWithMock(nova_sgr.SecurityGroupRuleManager, 'delete')
self.m.StubOutWithMock(nova_sg.SecurityGroupManager, 'create')
self.m.StubOutWithMock(nova_sg.SecurityGroupManager, 'delete')
self.m.StubOutWithMock(nova_sg.SecurityGroupManager, 'get')
self.m.StubOutWithMock(nova_sg.SecurityGroupManager, 'list')
utils.setup_dummy_db()
self.m.StubOutWithMock(neutronclient.Client, 'create_security_group')
self.m.StubOutWithMock(
neutronclient.Client, 'create_security_group_rule')
self.m.StubOutWithMock(neutronclient.Client, 'show_security_group')
self.m.StubOutWithMock(
neutronclient.Client, 'delete_security_group_rule')
self.m.StubOutWithMock(neutronclient.Client, 'delete_security_group')
def create_stack(self, template):
t = template_format.parse(template)
self.stack = self.parse_stack(t)
self.assertIsNone(self.stack.create())
return self.stack
def parse_stack(self, t):
stack_name = 'test_stack'
tmpl = parser.Template(t)
stack = parser.Stack(utils.dummy_context(), stack_name, tmpl)
stack.store()
return stack
def assertResourceState(self, rsrc, ref_id, metadata={}):
self.assertIsNone(rsrc.validate())
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.assertEqual(ref_id, rsrc.FnGetRefId())
self.assertEqual(metadata, dict(rsrc.metadata))
@utils.stack_delete_after
def test_security_group_nova(self):
#create script
clients.OpenStackClients.nova('compute').AndReturn(self.fc)
nova_sg.SecurityGroupManager.list().AndReturn([NovaSG(
id=1,
name='test',
description='FAKE_SECURITY_GROUP',
rules=[],
)])
clients.OpenStackClients.nova('compute').AndReturn(self.fc)
sg_name = utils.PhysName('test_stack', 'the_sg')
nova_sg.SecurityGroupManager.create(
sg_name,
'HTTP and SSH access').AndReturn(NovaSG(
id=2,
name=sg_name,
description='HTTP and SSH access',
rules=[]))
clients.OpenStackClients.nova('compute').AndReturn(self.fc)
nova_sgr.SecurityGroupRuleManager.create(
2, 'tcp', '22', '22', '0.0.0.0/0', None).AndReturn(None)
nova_sgr.SecurityGroupRuleManager.create(
2, 'tcp', '80', '80', '0.0.0.0/0', None).AndReturn(None)
nova_sgr.SecurityGroupRuleManager.create(
2, 'tcp', None, None, None, 1).AndReturn(None)
nova_sgr.SecurityGroupRuleManager.create(
2, 'icmp', None, None, None, '1').AndReturn(None)
# delete script
clients.OpenStackClients.nova('compute').AndReturn(self.fc)
nova_sg.SecurityGroupManager.get(2).AndReturn(NovaSG(
id=2,
name=sg_name,
description='HTTP and SSH access',
rules=[{
"from_port": '22',
"group": {},
"ip_protocol": "tcp",
"to_port": '22',
"parent_group_id": 2,
"ip_range": {
"cidr": "0.0.0.0/0"
},
'id': 130
}, {
'from_port': '80',
'group': {},
'ip_protocol': 'tcp',
'to_port': '80',
'parent_group_id': 2,
'ip_range': {
'cidr': '0.0.0.0/0'
},
'id': 131
}, {
'from_port': None,
'group': {
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'name': 'test'
},
'ip_protocol': 'tcp',
'to_port': None,
'parent_group_id': 2,
'ip_range': {},
'id': 132
}, {
'from_port': None,
'group': {
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'name': 'test'
},
'ip_protocol': 'icmp',
'to_port': None,
'parent_group_id': 2,
'ip_range': {},
'id': 133
}]
))
clients.OpenStackClients.nova('compute').AndReturn(self.fc)
nova_sgr.SecurityGroupRuleManager.delete(130).AndReturn(None)
clients.OpenStackClients.nova('compute').AndReturn(self.fc)
nova_sgr.SecurityGroupRuleManager.delete(131).AndReturn(None)
clients.OpenStackClients.nova('compute').AndReturn(self.fc)
nova_sgr.SecurityGroupRuleManager.delete(132).AndReturn(None)
clients.OpenStackClients.nova('compute').AndReturn(self.fc)
nova_sgr.SecurityGroupRuleManager.delete(133).AndReturn(None)
clients.OpenStackClients.nova('compute').AndReturn(self.fc)
nova_sg.SecurityGroupManager.delete(2).AndReturn(None)
self.m.ReplayAll()
stack = self.create_stack(self.test_template_nova)
sg = stack['the_sg']
self.assertRaises(resource.UpdateReplace, sg.handle_update, {}, {}, {})
self.assertResourceState(sg, utils.PhysName('test_stack', 'the_sg'))
stack.delete()
self.m.VerifyAll()
@utils.stack_delete_after
def test_security_group_nova_exception(self):
#create script
clients.OpenStackClients.nova('compute').AndReturn(self.fc)
sg_name = utils.PhysName('test_stack', 'the_sg')
nova_sg.SecurityGroupManager.list().AndReturn([
NovaSG(
id=2,
name=sg_name,
description='HTTP and SSH access',
rules=[],
),
NovaSG(
id=1,
name='test',
description='FAKE_SECURITY_GROUP',
rules=[],
)
])
clients.OpenStackClients.nova('compute').AndReturn(self.fc)
nova_sgr.SecurityGroupRuleManager.create(
2, 'tcp', '22', '22', '0.0.0.0/0', None).AndRaise(
clients.novaclient.exceptions.BadRequest(
400, 'Rule already exists'))
nova_sgr.SecurityGroupRuleManager.create(
2, 'tcp', '80', '80', '0.0.0.0/0', None).AndReturn(
clients.novaclient.exceptions.BadRequest(
400, 'Rule already exists'))
nova_sgr.SecurityGroupRuleManager.create(
2, 'tcp', None, None, None, 1).AndReturn(
clients.novaclient.exceptions.BadRequest(
400, 'Rule already exists'))
nova_sgr.SecurityGroupRuleManager.create(
2, 'icmp', None, None, None, '1').AndReturn(
clients.novaclient.exceptions.BadRequest(
400, 'Rule already exists'))
# delete script
clients.OpenStackClients.nova('compute').AndReturn(self.fc)
nova_sg.SecurityGroupManager.get(2).AndReturn(NovaSG(
id=2,
name=sg_name,
description='HTTP and SSH access',
rules=[{
"from_port": '22',
"group": {},
"ip_protocol": "tcp",
"to_port": '22',
"parent_group_id": 2,
"ip_range": {
"cidr": "0.0.0.0/0"
},
'id': 130
}, {
'from_port': '80',
'group': {},
'ip_protocol': 'tcp',
'to_port': '80',
'parent_group_id': 2,
'ip_range': {
'cidr': '0.0.0.0/0'
},
'id': 131
}, {
'from_port': None,
'group': {
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'name': 'test'
},
'ip_protocol': 'tcp',
'to_port': None,
'parent_group_id': 2,
'ip_range': {},
'id': 132
}, {
'from_port': None,
'group': {
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'name': 'test'
},
'ip_protocol': 'icmp',
'to_port': None,
'parent_group_id': 2,
'ip_range': {},
'id': 133
}]
))
clients.OpenStackClients.nova('compute').AndReturn(self.fc)
nova_sgr.SecurityGroupRuleManager.delete(130).AndRaise(
clients.novaclient.exceptions.NotFound('goneburger'))
clients.OpenStackClients.nova('compute').AndReturn(self.fc)
nova_sgr.SecurityGroupRuleManager.delete(131).AndRaise(
clients.novaclient.exceptions.NotFound('goneburger'))
clients.OpenStackClients.nova('compute').AndReturn(self.fc)
nova_sgr.SecurityGroupRuleManager.delete(132).AndRaise(
clients.novaclient.exceptions.NotFound('goneburger'))
clients.OpenStackClients.nova('compute').AndReturn(self.fc)
nova_sgr.SecurityGroupRuleManager.delete(133).AndRaise(
clients.novaclient.exceptions.NotFound('goneburger'))
clients.OpenStackClients.nova('compute').AndReturn(self.fc)
nova_sg.SecurityGroupManager.delete(2).AndReturn(None)
clients.OpenStackClients.nova('compute').AndReturn(self.fc)
nova_sg.SecurityGroupManager.get(2).AndRaise(
clients.novaclient.exceptions.NotFound('goneburger'))
self.m.ReplayAll()
stack = self.create_stack(self.test_template_nova)
sg = stack['the_sg']
self.assertRaises(resource.UpdateReplace, sg.handle_update, {}, {}, {})
self.assertResourceState(sg, utils.PhysName('test_stack', 'the_sg'))
scheduler.TaskRunner(sg.delete)()
sg.state_set(sg.CREATE, sg.COMPLETE, 'to delete again')
sg.resource_id = 2
stack.delete()
self.m.VerifyAll()
def test_security_group_nova_with_egress_rules(self):
t = template_format.parse(self.test_template_nova_with_egress)
stack = self.parse_stack(t)
sg = stack['the_sg']
self.assertRaises(exception.EgressRuleNotAllowed, sg.validate)
@utils.stack_delete_after
def test_security_group_neutron(self):
#create script
clients.OpenStackClients.keystone().AndReturn(
FakeKeystoneClient())
sg_name = utils.PhysName('test_stack', 'the_sg')
neutronclient.Client.create_security_group({
'security_group': {
'name': sg_name,
'description': 'HTTP and SSH access'
}
}).AndReturn({
'security_group': {
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'name': sg_name,
'description': 'HTTP and SSH access',
'security_group_rules': [{
"direction": "egress",
"ethertype": "IPv4",
"id": "aaaa-1",
"port_range_max": None,
"port_range_min": None,
"protocol": None,
"remote_group_id": None,
"remote_ip_prefix": None,
"security_group_id": "aaaa",
"tenant_id": "f18ca530cc05425e8bac0a5ff92f7e88"
}, {
"direction": "egress",
"ethertype": "IPv6",
"id": "aaaa-2",
"port_range_max": None,
"port_range_min": None,
"protocol": None,
"remote_group_id": None,
"remote_ip_prefix": None,
"security_group_id": "aaaa",
"tenant_id": "f18ca530cc05425e8bac0a5ff92f7e88"
}],
'id': 'aaaa'
}
})
neutronclient.Client.create_security_group_rule({
'security_group_rule': {
'direction': 'ingress',
'remote_group_id': None,
'remote_ip_prefix': '0.0.0.0/0',
'port_range_min': '22',
'ethertype': 'IPv4',
'port_range_max': '22',
'protocol': 'tcp',
'security_group_id': 'aaaa'
}
}).AndReturn({
'security_group_rule': {
'direction': 'ingress',
'remote_group_id': None,
'remote_ip_prefix': '0.0.0.0/0',
'port_range_min': '22',
'ethertype': 'IPv4',
'port_range_max': '22',
'protocol': 'tcp',
'security_group_id': 'aaaa',
'id': 'bbbb'
}
})
neutronclient.Client.create_security_group_rule({
'security_group_rule': {
'direction': 'ingress',
'remote_group_id': None,
'remote_ip_prefix': '0.0.0.0/0',
'port_range_min': '80',
'ethertype': 'IPv4',
'port_range_max': '80',
'protocol': 'tcp',
'security_group_id': 'aaaa'
}
}).AndReturn({
'security_group_rule': {
'direction': 'ingress',
'remote_group_id': None,
'remote_ip_prefix': '0.0.0.0/0',
'port_range_min': '80',
'ethertype': 'IPv4',
'port_range_max': '80',
'protocol': 'tcp',
'security_group_id': 'aaaa',
'id': 'cccc'
}
})
neutronclient.Client.create_security_group_rule({
'security_group_rule': {
'direction': 'ingress',
'remote_group_id': 'wwww',
'remote_ip_prefix': None,
'port_range_min': None,
'ethertype': 'IPv4',
'port_range_max': None,
'protocol': 'tcp',
'security_group_id': 'aaaa'
}
}).AndReturn({
'security_group_rule': {
'direction': 'ingress',
'remote_group_id': 'wwww',
'remote_ip_prefix': None,
'port_range_min': None,
'ethertype': 'IPv4',
'port_range_max': None,
'protocol': 'tcp',
'security_group_id': 'aaaa',
'id': 'dddd'
}
})
neutronclient.Client.delete_security_group_rule('aaaa-1').AndReturn(
None)
neutronclient.Client.delete_security_group_rule('aaaa-2').AndReturn(
None)
neutronclient.Client.create_security_group_rule({
'security_group_rule': {
'direction': 'egress',
'remote_group_id': None,
'remote_ip_prefix': '10.0.1.0/24',
'port_range_min': '22',
'ethertype': 'IPv4',
'port_range_max': '22',
'protocol': 'tcp',
'security_group_id': 'aaaa'
}
}).AndReturn({
'security_group_rule': {
'direction': 'egress',
'remote_group_id': None,
'remote_ip_prefix': '10.0.1.0/24',
'port_range_min': '22',
'ethertype': 'IPv4',
'port_range_max': '22',
'protocol': 'tcp',
'security_group_id': 'aaaa',
'id': 'eeee'
}
})
neutronclient.Client.create_security_group_rule({
'security_group_rule': {
'direction': 'egress',
'remote_group_id': 'xxxx',
'remote_ip_prefix': None,
'port_range_min': None,
'ethertype': 'IPv4',
'port_range_max': None,
'protocol': None,
'security_group_id': 'aaaa'
}
}).AndReturn({
'security_group_rule': {
'direction': 'egress',
'remote_group_id': 'xxxx',
'remote_ip_prefix': None,
'port_range_min': None,
'ethertype': 'IPv4',
'port_range_max': None,
'protocol': None,
'security_group_id': 'aaaa',
'id': 'ffff'
}
})
# delete script
neutronclient.Client.show_security_group('aaaa').AndReturn({
'security_group': {
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'name': 'sc1',
'description': '',
'security_group_rules': [{
'direction': 'ingress',
'protocol': 'tcp',
'port_range_max': '22',
'id': 'bbbb',
'ethertype': 'IPv4',
'security_group_id': 'aaaa',
'remote_group_id': None,
'remote_ip_prefix': '0.0.0.0/0',
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'port_range_min': '22'
}, {
'direction': 'ingress',
'protocol': 'tcp',
'port_range_max': '80',
'id': 'cccc',
'ethertype': 'IPv4',
'security_group_id': 'aaaa',
'remote_group_id': None,
'remote_ip_prefix': '0.0.0.0/0',
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'port_range_min': '80'
}, {
'direction': 'ingress',
'protocol': 'tcp',
'port_range_max': None,
'id': 'dddd',
'ethertype': 'IPv4',
'security_group_id': 'aaaa',
'remote_group_id': 'wwww',
'remote_ip_prefix': None,
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'port_range_min': None
}, {
'direction': 'egress',
'protocol': 'tcp',
'port_range_max': '22',
'id': 'eeee',
'ethertype': 'IPv4',
'security_group_id': 'aaaa',
'remote_group_id': None,
'remote_ip_prefix': '10.0.1.0/24',
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'port_range_min': '22'
}, {
'direction': 'egress',
'protocol': None,
'port_range_max': None,
'id': 'ffff',
'ethertype': 'IPv4',
'security_group_id': 'aaaa',
'remote_group_id': None,
'remote_ip_prefix': None,
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'port_range_min': None
}],
'id': 'aaaa'}})
neutronclient.Client.delete_security_group_rule('bbbb').AndReturn(None)
neutronclient.Client.delete_security_group_rule('cccc').AndReturn(None)
neutronclient.Client.delete_security_group_rule('dddd').AndReturn(None)
neutronclient.Client.delete_security_group_rule('eeee').AndReturn(None)
neutronclient.Client.delete_security_group_rule('ffff').AndReturn(None)
neutronclient.Client.delete_security_group('aaaa').AndReturn(None)
self.m.ReplayAll()
stack = self.create_stack(self.test_template_neutron)
sg = stack['the_sg']
self.assertRaises(resource.UpdateReplace, sg.handle_update, {}, {}, {})
self.assertResourceState(sg, 'aaaa')
stack.delete()
self.m.VerifyAll()
@utils.stack_delete_after
def test_security_group_neutron_exception(self):
#create script
clients.OpenStackClients.keystone().AndReturn(
FakeKeystoneClient())
sg_name = utils.PhysName('test_stack', 'the_sg')
neutronclient.Client.create_security_group({
'security_group': {
'name': sg_name,
'description': 'HTTP and SSH access'
}
}).AndReturn({
'security_group': {
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'name': sg_name,
'description': 'HTTP and SSH access',
'security_group_rules': [],
'id': 'aaaa'
}
})
neutronclient.Client.create_security_group_rule({
'security_group_rule': {
'direction': 'ingress',
'remote_group_id': None,
'remote_ip_prefix': '0.0.0.0/0',
'port_range_min': '22',
'ethertype': 'IPv4',
'port_range_max': '22',
'protocol': 'tcp',
'security_group_id': 'aaaa'
}
}).AndRaise(
NeutronClientException(status_code=409))
neutronclient.Client.create_security_group_rule({
'security_group_rule': {
'direction': 'ingress',
'remote_group_id': None,
'remote_ip_prefix': '0.0.0.0/0',
'port_range_min': '80',
'ethertype': 'IPv4',
'port_range_max': '80',
'protocol': 'tcp',
'security_group_id': 'aaaa'
}
}).AndRaise(
NeutronClientException(status_code=409))
neutronclient.Client.create_security_group_rule({
'security_group_rule': {
'direction': 'ingress',
'remote_group_id': 'wwww',
'remote_ip_prefix': None,
'port_range_min': None,
'ethertype': 'IPv4',
'port_range_max': None,
'protocol': 'tcp',
'security_group_id': 'aaaa'
}
}).AndRaise(
NeutronClientException(status_code=409))
neutronclient.Client.create_security_group_rule({
'security_group_rule': {
'direction': 'egress',
'remote_group_id': None,
'remote_ip_prefix': '10.0.1.0/24',
'port_range_min': '22',
'ethertype': 'IPv4',
'port_range_max': '22',
'protocol': 'tcp',
'security_group_id': 'aaaa'
}
}).AndRaise(
NeutronClientException(status_code=409))
neutronclient.Client.create_security_group_rule({
'security_group_rule': {
'direction': 'egress',
'remote_group_id': 'xxxx',
'remote_ip_prefix': None,
'port_range_min': None,
'ethertype': 'IPv4',
'port_range_max': None,
'protocol': None,
'security_group_id': 'aaaa'
}
}).AndRaise(
NeutronClientException(status_code=409))
# delete script
neutronclient.Client.show_security_group('aaaa').AndReturn({
'security_group': {
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'name': 'sc1',
'description': '',
'security_group_rules': [{
'direction': 'ingress',
'protocol': 'tcp',
'port_range_max': '22',
'id': 'bbbb',
'ethertype': 'IPv4',
'security_group_id': 'aaaa',
'remote_group_id': None,
'remote_ip_prefix': '0.0.0.0/0',
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'port_range_min': '22'
}, {
'direction': 'ingress',
'protocol': 'tcp',
'port_range_max': '80',
'id': 'cccc',
'ethertype': 'IPv4',
'security_group_id': 'aaaa',
'remote_group_id': None,
'remote_ip_prefix': '0.0.0.0/0',
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'port_range_min': '80'
}, {
'direction': 'ingress',
'protocol': 'tcp',
'port_range_max': None,
'id': 'dddd',
'ethertype': 'IPv4',
'security_group_id': 'aaaa',
'remote_group_id': 'wwww',
'remote_ip_prefix': None,
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'port_range_min': None
}, {
'direction': 'egress',
'protocol': 'tcp',
'port_range_max': '22',
'id': 'eeee',
'ethertype': 'IPv4',
'security_group_id': 'aaaa',
'remote_group_id': None,
'remote_ip_prefix': '10.0.1.0/24',
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'port_range_min': '22'
}, {
'direction': 'egress',
'protocol': None,
'port_range_max': None,
'id': 'ffff',
'ethertype': 'IPv4',
'security_group_id': 'aaaa',
'remote_group_id': None,
'remote_ip_prefix': None,
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'port_range_min': None
}],
'id': 'aaaa'}})
neutronclient.Client.delete_security_group_rule('bbbb').AndRaise(
NeutronClientException(status_code=404))
neutronclient.Client.delete_security_group_rule('cccc').AndRaise(
NeutronClientException(status_code=404))
neutronclient.Client.delete_security_group_rule('dddd').AndRaise(
NeutronClientException(status_code=404))
neutronclient.Client.delete_security_group_rule('eeee').AndRaise(
NeutronClientException(status_code=404))
neutronclient.Client.delete_security_group_rule('ffff').AndRaise(
NeutronClientException(status_code=404))
neutronclient.Client.delete_security_group('aaaa').AndRaise(
NeutronClientException(status_code=404))
neutronclient.Client.show_security_group('aaaa').AndRaise(
NeutronClientException(status_code=404))
self.m.ReplayAll()
stack = self.create_stack(self.test_template_neutron)
sg = stack['the_sg']
self.assertRaises(resource.UpdateReplace, sg.handle_update, {}, {}, {})
self.assertResourceState(sg, 'aaaa')
scheduler.TaskRunner(sg.delete)()
sg.state_set(sg.CREATE, sg.COMPLETE, 'to delete again')
sg.resource_id = 'aaaa'
stack.delete()
self.m.VerifyAll()
|
import collections
from oneview_redfish_toolkit.api.errors import \
OneViewRedfishException
from oneview_redfish_toolkit.api.errors import \
OneViewRedfishResourceNotFoundException
from oneview_redfish_toolkit.api.redfish_json_validator import \
RedfishJsonValidator
from oneview_redfish_toolkit import config
class RedfishError(RedfishJsonValidator):
"""Creates a Redfish Error Dict
Populates self.redfish with errors. Will not validate as there's no
schema to validate against.
"""
SCHEMA_NAME = None
def __init__(self, code, message):
"""Constructor
Populates self.redfish with error message.
"""
super().__init__(self.SCHEMA_NAME)
self.redfish["error"] = collections.OrderedDict()
# Check if Code is a valid Code Error in the registry
if code not in config.get_registry_dict()["Base"]["Messages"]:
raise OneViewRedfishResourceNotFoundException(
"Registry {} not found.".format(code)
)
self.redfish["error"]["code"] = "Base.1.1." + code
self.redfish["error"]["message"] = message
self.redfish["error"]["@Message.ExtendedInfo"] = list()
def add_extended_info(
self,
message_id,
message_args=[],
related_properties=[]):
"""Adds an item to ExtendedInfo list using values from DMTF registry
Adds an item to ExtendedInfo list using the values for Message,
Severity and Resolution from DMTF Base Registry.
Parameters:
message_id: Id of the message; oneOf the keys in Redfish
Registry Messages
message_args: List of string to replace markers on Redfish
messages. Must have the same length as the number of %
signs found in the registry Message field
related_properties: Properties relates to this e error if
necessary
"""
messages = config.get_registry_dict()["Base"]["Messages"]
# Verify if message_id exists in registry
try:
severity = messages[message_id]["Severity"]
except Exception:
raise OneViewRedfishResourceNotFoundException(
"Message id {} not found.".format(message_id)
)
message = messages[message_id]["Message"]
# Check if numbers of replacements and message_args length match
replaces = message.count('%')
replacements = len(message_args)
if replaces != replacements:
raise OneViewRedfishException(
'Message has {} replacements to be made but {} args '
'where sent'.format(replaces, replacements)
)
# Replacing the marks in the message. A better way to do this
# is welcome.
for i in range(replaces):
message = message.replace('%' + str(i + 1), message_args[i])
# Construct the dict
extended_info = collections.OrderedDict()
extended_info["@odata.type"] = "#Message.v1_0_5.Message"
extended_info["MessageId"] = "Base.1.1." + message_id
extended_info["Message"] = message
extended_info["RelatedProperties"] = related_properties
extended_info["MessageArgs"] = message_args
extended_info["Severity"] = severity
extended_info["Resolution"] = messages[message_id]["Resolution"]
# Append it to the list
self.redfish["error"]["@Message.ExtendedInfo"].append(extended_info)
|
from abc import ABCMeta, abstractmethod, abstractproperty
import json
import uuid
class ToJSON:
__metaclass__ = ABCMeta
@abstractmethod
def to_json_repr(self):
raise NotImplementedError
def to_json(self):
"Return a json representation of the object"
return json.dumps(self.to_json_repr())
class FromJSON:
@classmethod
def from_json(cls, json):
"Construct an object from json representation"
raise NotImplementedError
class FileType:
input = 'input'
output = 'output'
class File(ToJSON, object):
def __init__(self, localpath, remotepath, type=None, cache=True):
self.localpath = localpath
self.remotepath = remotepath
assert type is not None
self.type = type
self.cache = cache
self.uuid = uuid.uuid1().urn
def to_json_repr(self):
return self.__dict__
def to_json(self):
return json.dumps(self.to_json_repr())
class FileBlob(object):
def __init__(self, path, uuid):
self.path = path
self.uuid = uuid
with open(path, 'rb') as fd:
self.blob = b64encode(fd.read())
class Task(ToJSON, object):
def __init__(self, command):
self.command = command
self.files = list()
self._uuid = uuid.uuid1().urn
@property
def uuid(self):
"The UUID of the job"
return self._uuid
def add_file(self, file):
self.files.append(file)
def to_json_repr(self):
return dict(command=self.command,
files=map(File.to_json_repr, self.files),
uuid=self.uuid)
def to_json(self):
return json.dumps(self.to_json_repr())
class RunnableTask(ToJSON, object):
def __init__(self, task, uuids):
self.task = task
self.uuids = set(uuids)
self.input_files = list()
for file in task.files:
if file.uuid in self._uuids:
blob = FileBlob(file.localpath, file.uuid)
self.input_files.append(blob)
class Job:
__metaclass__ = ABCMeta
@abstractproperty
def id(self):
"ID of the job in the inventory"
@abstractproperty
def status(self):
"Status of the task"
@abstractproperty
def location(self):
"Resource on which the task is assigned"
@abstractproperty
def created(self):
"When the task was created"
@abstractproperty
def modified(self):
"When the task was updated"
@abstractproperty
def task(self):
"The task to run"
class Status:
init = 'init'
registered = 'registered'
offered = 'offered'
scheduled = 'scheduled'
running = 'running'
fail = 'fail'
success = 'success'
class Inventory:
__metaclass__ = ABCMeta
@abstractmethod
def insert_tasks(self, collection):
"""Insert a collection of :class:`Job`s
:param collection: an iterable
:returns: task ids
:rtype: iterable of int
"""
@abstractmethod
def query_status(self, status, limit=None):
"""Retreive task ids with matching status
:param status: the :class:`Status`
:param limit: maximum number of results to return [None=all]
:returns: task ids
:rtype: iterable of int
"""
@abstractmethod
def update_status(self, jobids, status):
"""Set the status of the tasks
:param jobids: the job ids
:type jobids: iterable of int
:param status: the :class:`Status`
"""
@abstractmethod
def get_jobs(self, jobids):
"""Get the jobs
:param jobids: the ids
:returns: the tasks
:rtype: iterable of tasks
"""
|
from __future__ import print_function
import ctypes
from MTS.Packet import Packet
from MTS.word.HeaderWord import HeaderWord
c_uint8 = ctypes.c_uint8
class Header(ctypes.Union):
_fields_ = [
('word', ctypes.c_uint16),
('b', HeaderWord)
]
_anonymous_ = 'b'
def __init__(self, *args, **kwargs):
super(Header, self).__init__(*args, **kwargs)
if 'word' in kwargs:
self.word = kwargs['word']
self.b.is_valid()
def word_count(self):
return (self.b.LengthHigh << 7) | self.b.LengthLow
def read_packet(self, in_stream, debug_stream=None):
# Read the bytes that are required to complete the packet
wordslen = (self.word & 0x1000 << 7) | self.word & 0x007F
byteslen = wordslen * 2
if debug_stream:
print(
'words={:d}; bytes={:d}'.format(wordslen, byteslen),
file=debug_stream
)
bodybytes = bytearray(b'0' * byteslen)
if debug_stream:
print(' '.join(['{:02X}'.format(b) for b in bodybytes]), file=debug_stream)
in_stream.readinto(bodybytes)
# Take pairs of body bytes for to return words of data
body = [(bodybytes[idx] << 8) | bodybytes[idx + 1] for idx in range(0, byteslen-1, 2)]
return Packet(self, body)
# words.extend()
# words_hexstring = ' '.join(['{:04X}'.format(w) for w in words])
# print(words_hexstring)
def desc(self):
return '0x{:04X} {} Len={:d} words '.format(
self.word,
'Data' if self.b.is_data() else 'Response',
self.b.length()
)
|
import os
import warnings
import numpy as np
from pyrates.utility.genetic_algorithm import CGSGeneticAlgorithm
from pandas import DataFrame, read_hdf
from copy import deepcopy
class CustomGOA(CGSGeneticAlgorithm):
def eval_fitness(self, target: list, **kwargs):
# define simulation conditions
worker_file = self.cgs_config['worker_file'] if 'worker_file' in self.cgs_config else None
param_grid = self.pop.drop(['fitness', 'sigma', 'results'], axis=1)
result_vars = ['r_e', 'r_i']
param_grid, invalid_params = eval_params(param_grid)
chunk_size = [
300, # carpenters
300, # osttimor
200, # spanien
300, # animals
100, # kongo
100, # uganda
#100, # tschad
]
# perform simulations
if len(param_grid) > 0:
self.gs_config['init_kwargs'].update(kwargs)
res_file = self.cgs.run(
circuit_template=self.gs_config['circuit_template'],
param_grid=deepcopy(param_grid),
param_map=self.gs_config['param_map'],
simulation_time=self.gs_config['simulation_time'],
dt=self.gs_config['step_size'],
inputs=self.gs_config['inputs'],
outputs=self.gs_config['outputs'],
sampling_step_size=self.gs_config['sampling_step_size'],
permute=False,
chunk_size=chunk_size,
worker_file=worker_file,
worker_env=self.cgs_config['worker_env'],
gs_kwargs={'init_kwargs': self.gs_config['init_kwargs']},
worker_kwargs={'y': target},
result_concat_axis=0)
results_tmp = read_hdf(res_file, key=f'Results/results')
# calculate fitness
for gene_id in param_grid.index:
self.pop.at[gene_id, 'fitness'] = 1.0 / results_tmp.at[gene_id, 'fitness']
self.pop.at[gene_id, 'results'] = [results_tmp.at[gene_id, v] for v in result_vars]
# set fitness of invalid parametrizations
for gene_id in invalid_params.index:
self.pop.at[gene_id, 'fitness'] = 0.0
self.pop.at[gene_id, 'results'] = [0. for _ in result_vars]
def fitness(y, t):
t = np.asarray(t)
weights = t/sum(t)
y = np.asarray(y).flatten()
t = np.asarray(t).flatten()
diff = np.asarray([0.0 if np.isnan(t_tmp) else y_tmp - t_tmp for y_tmp, t_tmp in zip(y, t)])
return np.sqrt(weights @ diff**2)
def eval_params(params):
valid_params = []
invalid_params = []
for i, gene_id in enumerate(params.index):
# check validity conditions
valid = True
if params.loc[gene_id, 'J_ee'] > 0.3*params.loc[gene_id, 'J_ie']:
valid = False
if params.loc[gene_id, 'J_ie'] > 10.0*params.loc[gene_id, 'J_ei']:
valid = False
if params.loc[gene_id, 'J_ie'] < 0.1*params.loc[gene_id, 'J_ei']:
valid = False
# add parametrization to valid or invalid parameter sets
if valid:
valid_params.append(i)
else:
invalid_params.append(i)
valid_df = params.iloc[valid_params, :]
valid_df.index = valid_params
invalid_df = params.iloc[invalid_params, :]
invalid_df.index = invalid_params
return valid_df, invalid_df
if __name__ == "__main__":
warnings.filterwarnings("ignore")
pop_genes = {
'J_ee': {'min': 0, 'max': 10, 'size': 4, 'sigma': 0.1, 'loc': 2.0, 'scale': 0.5},
'J_ei': {'min': 0, 'max': 120, 'size': 4, 'sigma': 1.0, 'loc': 50.0, 'scale': 10.0},
'J_ie': {'min': 0, 'max': 120, 'size': 4, 'sigma': 1.0, 'loc': 50.0, 'scale': 10.0},
'J_ii': {'min': 0, 'max': 120, 'size': 4, 'sigma': 1.0, 'loc': 20.0, 'scale': 10.0},
'eta_e': {'min': -20, 'max': 30, 'size': 4, 'sigma': 1.0, 'loc': 0.0, 'scale': 10.0},
'eta_i': {'min': -20, 'max': 30, 'size': 4, 'sigma': 1.0, 'loc': 20.0, 'scale': 10.0},
}
param_map = {
'J_ee': {'vars': ['qif_simple/J_ee'], 'nodes': ['stn_gpe']},
'J_ei': {'vars': ['qif_simple/J_ei'], 'nodes': ['stn_gpe']},
'J_ie': {'vars': ['qif_simple/J_ie'], 'nodes': ['stn_gpe']},
'J_ii': {'vars': ['qif_simple/J_ii'], 'nodes': ['stn_gpe']},
'eta_e': {'vars': ['qif_simple/eta_e'], 'nodes': ['stn_gpe']},
'eta_i': {'vars': ['qif_simple/eta_i'], 'nodes': ['stn_gpe']},
}
T = 5000.
dt = 1e-2
dts = 1e-1
compute_dir = f"{os.getcwd()}/stn_gpe_simple_opt"
ga = CustomGOA(fitness_measure=fitness,
gs_config={
'circuit_template': f"{os.getcwd()}/config/stn_gpe/stn_gpe_reduced",
'permute_grid': True,
'param_map': param_map,
'simulation_time': T,
'step_size': dt,
'sampling_step_size': dts,
'inputs': {},
'outputs': {'r_e': "stn_gpe/qif_simple/R_e", 'r_i': 'stn_gpe/qif_simple/R_i'},
'init_kwargs': {'backend': 'numpy', 'solver': 'scipy', 'step_size': dt},
},
cgs_config={'nodes': [
'carpenters',
'osttimor',
'spanien',
'animals',
'kongo',
'uganda',
#'tschad'
],
'compute_dir': compute_dir,
'worker_file': f'{os.getcwd()}/stn_gpe_simple_cfit_worker.py',
'worker_env': "/nobackup/spanien1/rgast/anaconda3/envs/pyrates_test/bin/python3",
})
drop_save_dir = f'{compute_dir}/PopulationDrops/'
os.makedirs(drop_save_dir, exist_ok=True)
winner = ga.run(
initial_gene_pool=pop_genes,
gene_sampling_func=np.random.normal,
new_member_sampling_func=np.random.uniform,
target=[20.0, 60.0],
max_iter=100,
min_fit=1.0,
n_winners=20,
n_parent_pairs=3600,
n_new=476,
sigma_adapt=0.015,
candidate_save=f'{compute_dir}/GeneticCGSCandidate.h5',
drop_save=drop_save_dir,
new_pop_on_drop=True,
pop_save=f'{compute_dir}/Results/pop_summary'
)
winner.to_hdf(f'{compute_dir}/PopulationDrops/winner.h5', key='data')
|
from google.cloud import aiplatform_v1
def sample_create_specialist_pool():
# Create a client
client = aiplatform_v1.SpecialistPoolServiceClient()
# Initialize request argument(s)
specialist_pool = aiplatform_v1.SpecialistPool()
specialist_pool.name = "name_value"
specialist_pool.display_name = "display_name_value"
request = aiplatform_v1.CreateSpecialistPoolRequest(
parent="parent_value",
specialist_pool=specialist_pool,
)
# Make the request
operation = client.create_specialist_pool(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
|
from flask import Flask,request
from pymongo import MongoClient
import time,os
app = Flask(__name__)
client = MongoClient(os.getenv('MONGOHQ_URL'))
db = client.DummyData
cn = db.keyLogs
@app.route('/',methods=['GET'])
def boo():
return "This server logs every value POST'd to it on url /log"
@app.route('/log',methods=['POST'])
def foo():
data =str(request.get_data())
now = time.time()
cn.insert({"data":data,"time_stamp":now})
return "logged"
if __name__ == '__main__':
app.run(debug=True)
|
"""WSGI Routers for the Assignment service."""
import functools
from keystone.assignment import controllers
from keystone.common import json_home
from keystone.common import router
from keystone.common import wsgi
build_os_inherit_relation = functools.partial(
json_home.build_v3_extension_resource_relation,
extension_name='OS-INHERIT', extension_version='1.0')
class Public(wsgi.ComposableRouter):
def add_routes(self, mapper):
tenant_controller = controllers.TenantAssignment()
mapper.connect('/tenants',
controller=tenant_controller,
action='get_projects_for_token',
conditions=dict(method=['GET']))
class Admin(wsgi.ComposableRouter):
def add_routes(self, mapper):
# Role Operations
roles_controller = controllers.RoleAssignmentV2()
mapper.connect('/tenants/{tenant_id}/users/{user_id}/roles',
controller=roles_controller,
action='get_user_roles',
conditions=dict(method=['GET']))
mapper.connect('/users/{user_id}/roles',
controller=roles_controller,
action='get_user_roles',
conditions=dict(method=['GET']))
class Routers(wsgi.RoutersBase):
def append_v3_routers(self, mapper, routers):
project_controller = controllers.ProjectAssignmentV3()
self._add_resource(
mapper, project_controller,
path='/users/{user_id}/projects',
get_head_action='list_user_projects',
rel=json_home.build_v3_resource_relation('user_projects'),
path_vars={
'user_id': json_home.Parameters.USER_ID,
})
routers.append(
router.Router(controllers.RoleV3(), 'roles', 'role',
resource_descriptions=self.v3_resources,
method_template='%s_wrapper'))
implied_roles_controller = controllers.ImpliedRolesV3()
self._add_resource(
mapper, implied_roles_controller,
path='/roles/{prior_role_id}/implies',
rel=json_home.build_v3_resource_relation('implied_roles'),
get_action='list_implied_roles',
status=json_home.Status.EXPERIMENTAL,
path_vars={
'prior_role_id': json_home.Parameters.ROLE_ID,
}
)
self._add_resource(
mapper, implied_roles_controller,
path='/roles/{prior_role_id}/implies/{implied_role_id}',
put_action='create_implied_role',
delete_action='delete_implied_role',
head_action='check_implied_role',
get_action='get_implied_role',
rel=json_home.build_v3_resource_relation('implied_role'),
status=json_home.Status.EXPERIMENTAL,
path_vars={
'prior_role_id': json_home.Parameters.ROLE_ID,
'implied_role_id': json_home.Parameters.ROLE_ID
}
)
self._add_resource(
mapper, implied_roles_controller,
path='/role_inferences',
get_action='list_role_inference_rules',
rel=json_home.build_v3_resource_relation('role_inferences'),
status=json_home.Status.EXPERIMENTAL,
path_vars={}
)
grant_controller = controllers.GrantAssignmentV3()
self._add_resource(
mapper, grant_controller,
path='/projects/{project_id}/users/{user_id}/roles/{role_id}',
get_head_action='check_grant',
put_action='create_grant',
delete_action='revoke_grant',
rel=json_home.build_v3_resource_relation('project_user_role'),
path_vars={
'project_id': json_home.Parameters.PROJECT_ID,
'role_id': json_home.Parameters.ROLE_ID,
'user_id': json_home.Parameters.USER_ID,
})
self._add_resource(
mapper, grant_controller,
path='/projects/{project_id}/groups/{group_id}/roles/{role_id}',
get_head_action='check_grant',
put_action='create_grant',
delete_action='revoke_grant',
rel=json_home.build_v3_resource_relation('project_group_role'),
path_vars={
'group_id': json_home.Parameters.GROUP_ID,
'project_id': json_home.Parameters.PROJECT_ID,
'role_id': json_home.Parameters.ROLE_ID,
})
self._add_resource(
mapper, grant_controller,
path='/projects/{project_id}/users/{user_id}/roles',
get_head_action='list_grants',
rel=json_home.build_v3_resource_relation('project_user_roles'),
path_vars={
'project_id': json_home.Parameters.PROJECT_ID,
'user_id': json_home.Parameters.USER_ID,
})
self._add_resource(
mapper, grant_controller,
path='/projects/{project_id}/groups/{group_id}/roles',
get_head_action='list_grants',
rel=json_home.build_v3_resource_relation('project_group_roles'),
path_vars={
'group_id': json_home.Parameters.GROUP_ID,
'project_id': json_home.Parameters.PROJECT_ID,
})
self._add_resource(
mapper, grant_controller,
path='/domains/{domain_id}/users/{user_id}/roles/{role_id}',
get_head_action='check_grant',
put_action='create_grant',
delete_action='revoke_grant',
rel=json_home.build_v3_resource_relation('domain_user_role'),
path_vars={
'domain_id': json_home.Parameters.DOMAIN_ID,
'role_id': json_home.Parameters.ROLE_ID,
'user_id': json_home.Parameters.USER_ID,
})
self._add_resource(
mapper, grant_controller,
path='/domains/{domain_id}/groups/{group_id}/roles/{role_id}',
get_head_action='check_grant',
put_action='create_grant',
delete_action='revoke_grant',
rel=json_home.build_v3_resource_relation('domain_group_role'),
path_vars={
'domain_id': json_home.Parameters.DOMAIN_ID,
'group_id': json_home.Parameters.GROUP_ID,
'role_id': json_home.Parameters.ROLE_ID,
})
self._add_resource(
mapper, grant_controller,
path='/domains/{domain_id}/users/{user_id}/roles',
get_head_action='list_grants',
rel=json_home.build_v3_resource_relation('domain_user_roles'),
path_vars={
'domain_id': json_home.Parameters.DOMAIN_ID,
'user_id': json_home.Parameters.USER_ID,
})
self._add_resource(
mapper, grant_controller,
path='/domains/{domain_id}/groups/{group_id}/roles',
get_head_action='list_grants',
rel=json_home.build_v3_resource_relation('domain_group_roles'),
path_vars={
'domain_id': json_home.Parameters.DOMAIN_ID,
'group_id': json_home.Parameters.GROUP_ID,
})
self._add_resource(
mapper, controllers.RoleAssignmentV3(),
path='/role_assignments',
get_head_action='list_role_assignments_wrapper',
rel=json_home.build_v3_resource_relation('role_assignments'))
self._add_resource(
mapper, grant_controller,
path='/OS-INHERIT/domains/{domain_id}/users/{user_id}/roles/'
'{role_id}/inherited_to_projects',
get_head_action='check_grant',
put_action='create_grant',
delete_action='revoke_grant',
rel=build_os_inherit_relation(
resource_name='domain_user_role_inherited_to_projects'),
path_vars={
'domain_id': json_home.Parameters.DOMAIN_ID,
'role_id': json_home.Parameters.ROLE_ID,
'user_id': json_home.Parameters.USER_ID,
})
self._add_resource(
mapper, grant_controller,
path='/OS-INHERIT/domains/{domain_id}/groups/{group_id}/roles/'
'{role_id}/inherited_to_projects',
get_head_action='check_grant',
put_action='create_grant',
delete_action='revoke_grant',
rel=build_os_inherit_relation(
resource_name='domain_group_role_inherited_to_projects'),
path_vars={
'domain_id': json_home.Parameters.DOMAIN_ID,
'group_id': json_home.Parameters.GROUP_ID,
'role_id': json_home.Parameters.ROLE_ID,
})
self._add_resource(
mapper, grant_controller,
path='/OS-INHERIT/domains/{domain_id}/groups/{group_id}/roles/'
'inherited_to_projects',
get_action='list_grants',
rel=build_os_inherit_relation(
resource_name='domain_group_roles_inherited_to_projects'),
path_vars={
'domain_id': json_home.Parameters.DOMAIN_ID,
'group_id': json_home.Parameters.GROUP_ID,
})
self._add_resource(
mapper, grant_controller,
path='/OS-INHERIT/domains/{domain_id}/users/{user_id}/roles/'
'inherited_to_projects',
get_action='list_grants',
rel=build_os_inherit_relation(
resource_name='domain_user_roles_inherited_to_projects'),
path_vars={
'domain_id': json_home.Parameters.DOMAIN_ID,
'user_id': json_home.Parameters.USER_ID,
})
self._add_resource(
mapper, grant_controller,
path='/OS-INHERIT/projects/{project_id}/users/{user_id}/roles/'
'{role_id}/inherited_to_projects',
get_head_action='check_grant',
put_action='create_grant',
delete_action='revoke_grant',
rel=build_os_inherit_relation(
resource_name='project_user_role_inherited_to_projects'),
path_vars={
'project_id': json_home.Parameters.PROJECT_ID,
'user_id': json_home.Parameters.USER_ID,
'role_id': json_home.Parameters.ROLE_ID,
})
self._add_resource(
mapper, grant_controller,
path='/OS-INHERIT/projects/{project_id}/groups/{group_id}/'
'roles/{role_id}/inherited_to_projects',
get_head_action='check_grant',
put_action='create_grant',
delete_action='revoke_grant',
rel=build_os_inherit_relation(
resource_name='project_group_role_inherited_to_projects'),
path_vars={
'project_id': json_home.Parameters.PROJECT_ID,
'group_id': json_home.Parameters.GROUP_ID,
'role_id': json_home.Parameters.ROLE_ID,
})
|
"""Long or boring tests for vobjects."""
import vobject
from vobject import base, icalendar, behavior, vcard, hcalendar
import StringIO, re, dateutil.tz, datetime
import doctest, test_vobject, unittest
from pkg_resources import resource_stream
base.logger.setLevel(base.logging.FATAL)
def additional_tests():
flags = doctest.NORMALIZE_WHITESPACE | doctest.REPORT_ONLY_FIRST_FAILURE | doctest.ELLIPSIS
suite = unittest.TestSuite()
for module in base, test_vobject, icalendar, vobject, vcard:
suite.addTest(doctest.DocTestSuite(module, optionflags=flags))
suite.addTest(doctest.DocFileSuite(
'README.txt', 'test_files/more_tests.txt',
package='__main__', optionflags=flags
))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(additional_tests())
testSilly="""
sillyname:name
profile:sillyprofile
stuff:folded
line
""" + "morestuff;asinine:this line is not folded, \
but in practice probably ought to be, as it is exceptionally long, \
and moreover demonstratively stupid"
icaltest=r"""BEGIN:VCALENDAR
CALSCALE:GREGORIAN
X-WR-TIMEZONE;VALUE=TEXT:US/Pacific
METHOD:PUBLISH
PRODID:-//Apple Computer\, Inc//iCal 1.0//EN
X-WR-CALNAME;VALUE=TEXT:Example
VERSION:2.0
BEGIN:VEVENT
SEQUENCE:5
DTSTART;TZID=US/Pacific:20021028T140000
RRULE:FREQ=Weekly;COUNT=10
DTSTAMP:20021028T011706Z
SUMMARY:Coffee with Jason
UID:EC9439B1-FF65-11D6-9973-003065F99D04
DTEND;TZID=US/Pacific:20021028T150000
BEGIN:VALARM
TRIGGER;VALUE=DURATION:-P1D
ACTION:DISPLAY
DESCRIPTION:Event reminder\, with comma\nand line feed
END:VALARM
END:VEVENT
BEGIN:VTIMEZONE
X-LIC-LOCATION:Random location
TZID:US/Pacific
LAST-MODIFIED:19870101T000000Z
BEGIN:STANDARD
DTSTART:19671029T020000
RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10
TZOFFSETFROM:-0700
TZOFFSETTO:-0800
TZNAME:PST
END:STANDARD
BEGIN:DAYLIGHT
DTSTART:19870405T020000
RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4
TZOFFSETFROM:-0800
TZOFFSETTO:-0700
TZNAME:PDT
END:DAYLIGHT
END:VTIMEZONE
END:VCALENDAR"""
badDtStartTest="""BEGIN:VCALENDAR
METHOD:PUBLISH
VERSION:2.0
BEGIN:VEVENT
DTSTART:20021028
DTSTAMP:20021028T011706Z
SUMMARY:Coffee with Jason
UID:EC9439B1-FF65-11D6-9973-003065F99D04
END:VEVENT
END:VCALENDAR"""
badLineTest="""BEGIN:VCALENDAR
METHOD:PUBLISH
VERSION:2.0
BEGIN:VEVENT
DTSTART:19870405T020000
X-BAD/SLASH:TRUE
X-BAD_UNDERSCORE:TRUE
UID:EC9439B1-FF65-11D6-9973-003065F99D04
END:VEVENT
END:VCALENDAR"""
vcardtest =r"""BEGIN:VCARD
VERSION:3.0
FN:Daffy Duck Knudson (with Bugs Bunny and Mr. Pluto)
N:Knudson;Daffy Duck (with Bugs Bunny and Mr. Pluto)
NICKNAME:gnat and gnu and pluto
BDAY;value=date:02-10
TEL;type=HOME:+01-(0)2-765.43.21
TEL;type=CELL:+01-(0)5-555.55.55
ACCOUNT;type=HOME:010-1234567-05
ADR;type=HOME:;;Haight Street 512\;\nEscape\, Test;Novosibirsk;;80214;Gnuland
TEL;type=HOME:+01-(0)2-876.54.32
ORG:University of Novosibirsk\, Department of Octopus
Parthenogenesis
END:VCARD"""
vcardWithGroups = r"""home.begin:vcard
version:3.0
source:ldap://cn=Meister%20Berger,o=Universitaet%20Goerlitz,c=DE
name:Meister Berger
fn:Meister Berger
n:Berger;Meister
bday;value=date:1963-09-21
o:Universit=E6t G=F6rlitz
title:Mayor
title;language=de;value=text:Burgermeister
note:The Mayor of the great city of
Goerlitz in the great country of Germany.\nNext line.
email;internet:mb@goerlitz.de
home.tel;type=fax,voice;type=msg:+49 3581 123456
home.label:Hufenshlagel 1234\n
02828 Goerlitz\n
Deutschland
END:VCARD"""
lowercaseComponentNames = r"""begin:vcard
fn:Anders Bobo
n:Bobo;Anders
org:Bobo A/S;Vice President, Technical Support
adr:Rockfeller Center;;Mekastreet;Bobocity;;2100;Myworld
email;internet:bobo@example.com
tel;work:+123455
tel;fax:+123456
tel;cell:+123457
x-mozilla-html:FALSE
url:http://www.example.com
version:2.1
end:vcard"""
icalWeirdTrigger = r"""BEGIN:VCALENDAR
CALSCALE:GREGORIAN
X-WR-TIMEZONE;VALUE=TEXT:US/Pacific
METHOD:PUBLISH
PRODID:-//Apple Computer\, Inc//iCal 1.0//EN
X-WR-CALNAME;VALUE=TEXT:Example
VERSION:2.0
BEGIN:VEVENT
DTSTART:20021028T140000Z
BEGIN:VALARM
TRIGGER:20021028T120000Z
ACTION:DISPLAY
DESCRIPTION:This trigger is a date-time without a VALUE=DATE-TIME parameter
END:VALARM
END:VEVENT
END:VCALENDAR"""
badstream = r"""BEGIN:VCALENDAR
CALSCALE:GREGORIAN
X-WR-TIMEZONE;VALUE=TEXT:US/Pacific
METHOD:PUBLISH
PRODID:-//Apple Computer\, Inc//iCal 1.0//EN
X-WR-CALNAME;VALUE=TEXT:Example
VERSION:2.0
BEGIN:VEVENT
DTSTART:20021028T140000Z
BEGIN:VALARM
TRIGGER:a20021028120000
ACTION:DISPLAY
DESCRIPTION:This trigger has a nonsensical value
END:VALARM
END:VEVENT
END:VCALENDAR"""
timezones = r"""
BEGIN:VTIMEZONE
TZID:US/Pacific
BEGIN:STANDARD
DTSTART:19671029T020000
RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10
TZOFFSETFROM:-0700
TZOFFSETTO:-0800
TZNAME:PST
END:STANDARD
BEGIN:DAYLIGHT
DTSTART:19870405T020000
RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4
TZOFFSETFROM:-0800
TZOFFSETTO:-0700
TZNAME:PDT
END:DAYLIGHT
END:VTIMEZONE
BEGIN:VTIMEZONE
TZID:US/Eastern
BEGIN:STANDARD
DTSTART:19671029T020000
RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10
TZOFFSETFROM:-0400
TZOFFSETTO:-0500
TZNAME:EST
END:STANDARD
BEGIN:DAYLIGHT
DTSTART:19870405T020000
RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4
TZOFFSETFROM:-0500
TZOFFSETTO:-0400
TZNAME:EDT
END:DAYLIGHT
END:VTIMEZONE
BEGIN:VTIMEZONE
TZID:Santiago
BEGIN:STANDARD
DTSTART:19700314T000000
TZOFFSETFROM:-0300
TZOFFSETTO:-0400
RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=2SA
TZNAME:Pacific SA Standard Time
END:STANDARD
BEGIN:DAYLIGHT
DTSTART:19701010T000000
TZOFFSETFROM:-0400
TZOFFSETTO:-0300
RRULE:FREQ=YEARLY;BYMONTH=10;BYDAY=2SA
TZNAME:Pacific SA Daylight Time
END:DAYLIGHT
END:VTIMEZONE
BEGIN:VTIMEZONE
TZID:W. Europe
BEGIN:STANDARD
DTSTART:19701025T030000
TZOFFSETFROM:+0200
TZOFFSETTO:+0100
RRULE:FREQ=YEARLY;BYMONTH=10;BYDAY=-1SU
TZNAME:W. Europe Standard Time
END:STANDARD
BEGIN:DAYLIGHT
DTSTART:19700329T020000
TZOFFSETFROM:+0100
TZOFFSETTO:+0200
RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=-1SU
TZNAME:W. Europe Daylight Time
END:DAYLIGHT
END:VTIMEZONE
BEGIN:VTIMEZONE
TZID:US/Fictitious-Eastern
LAST-MODIFIED:19870101T000000Z
BEGIN:STANDARD
DTSTART:19671029T020000
RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10
TZOFFSETFROM:-0400
TZOFFSETTO:-0500
TZNAME:EST
END:STANDARD
BEGIN:DAYLIGHT
DTSTART:19870405T020000
RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4;UNTIL=20050403T070000Z
TZOFFSETFROM:-0500
TZOFFSETTO:-0400
TZNAME:EDT
END:DAYLIGHT
END:VTIMEZONE
BEGIN:VTIMEZONE
TZID:America/Montreal
LAST-MODIFIED:20051013T233643Z
BEGIN:DAYLIGHT
DTSTART:20050403T070000
TZOFFSETTO:-0400
TZOFFSETFROM:+0000
TZNAME:EDT
END:DAYLIGHT
BEGIN:STANDARD
DTSTART:20051030T020000
TZOFFSETTO:-0500
TZOFFSETFROM:-0400
TZNAME:EST
END:STANDARD
END:VTIMEZONE
"""
__test__ = { "Test readOne" :
r"""
>>> silly = base.readOne(testSilly, findBegin=False)
>>> silly
<SILLYPROFILE| [<MORESTUFF{}this line is not folded, but in practice probably ought to be, as it is exceptionally long, and moreover demonstratively stupid>, <SILLYNAME{}name>, <STUFF{}foldedline>]>
>>> silly.stuff
<STUFF{}foldedline>
>>> original = silly.serialize()
>>> f3 = StringIO.StringIO(original.decode("utf-8"))
>>> silly2 = base.readOne(f3)
>>> silly2.serialize()==original
True
>>> s3 = StringIO.StringIO('cn:Babs Jensen\r\ncn:Barbara J Jensen\r\nsn:Jensen\r\nemail:babs@umich.edu\r\nphone:+1 313 747-4454\r\nx-id:1234567890\r\n')
>>> ex1 = base.readOne(s3, findBegin=False)
>>> ex1
<*unnamed*| [<CN{}Babs Jensen>, <CN{}Barbara J Jensen>, <EMAIL{}babs@umich.edu>, <PHONE{}+1 313 747-4454>, <SN{}Jensen>, <X-ID{}1234567890>]>
>>> ex1.serialize()
'CN:Babs Jensen\r\nCN:Barbara J Jensen\r\nEMAIL:babs@umich.edu\r\nPHONE:+1 313 747-4454\r\nSN:Jensen\r\nX-ID:1234567890\r\n'
""",
"Import icaltest" :
r"""
>>> c = base.readOne(icaltest, validate=True)
>>> c.vevent.valarm.trigger
<TRIGGER{}-1 day, 0:00:00>
>>> c.vevent.dtstart.value
datetime.datetime(2002, 10, 28, 14, 0, tzinfo=<tzicalvtz 'US/Pacific'>)
>>> c.vevent.dtend.value
datetime.datetime(2002, 10, 28, 15, 0, tzinfo=<tzicalvtz 'US/Pacific'>)
>>> c.vevent.dtstamp.value
datetime.datetime(2002, 10, 28, 1, 17, 6, tzinfo=tzutc())
>>> c.vevent.valarm.description.value
u'Event reminder, with comma\nand line feed'
>>> c.vevent.valarm.description.serialize()
'DESCRIPTION:Event reminder\\, with comma\\nand line feed\r\n'
>>> vevent = c.vevent.transformFromNative()
>>> vevent.rrule
<RRULE{}FREQ=Weekly;COUNT=10>
""",
"Parsing tests" :
"""
>>> parseRDate = icalendar.MultiDateBehavior.transformToNative
>>> icalendar.stringToTextValues('')
['']
>>> icalendar.stringToTextValues('abcd,efgh')
['abcd', 'efgh']
>>> icalendar.stringToPeriod("19970101T180000Z/19970102T070000Z")
(datetime.datetime(1997, 1, 1, 18, 0, tzinfo=tzutc()), datetime.datetime(1997, 1, 2, 7, 0, tzinfo=tzutc()))
>>> icalendar.stringToPeriod("19970101T180000Z/PT1H")
(datetime.datetime(1997, 1, 1, 18, 0, tzinfo=tzutc()), datetime.timedelta(0, 3600))
>>> parseRDate(base.textLineToContentLine("RDATE;VALUE=DATE:19970304,19970504,19970704,19970904"))
<RDATE{'VALUE': ['DATE']}[datetime.date(1997, 3, 4), datetime.date(1997, 5, 4), datetime.date(1997, 7, 4), datetime.date(1997, 9, 4)]>
>>> parseRDate(base.textLineToContentLine("RDATE;VALUE=PERIOD:19960403T020000Z/19960403T040000Z,19960404T010000Z/PT3H"))
<RDATE{'VALUE': ['PERIOD']}[(datetime.datetime(1996, 4, 3, 2, 0, tzinfo=tzutc()), datetime.datetime(1996, 4, 3, 4, 0, tzinfo=tzutc())), (datetime.datetime(1996, 4, 4, 1, 0, tzinfo=tzutc()), datetime.timedelta(0, 10800))]>
""",
"read failure" :
"""
>>> vevent = base.readOne(badstream)
Traceback (most recent call last):
...
ParseError: At line 11: TRIGGER with no VALUE not recognized as DURATION or as DATE-TIME
>>> cal = base.readOne(badLineTest)
Traceback (most recent call last):
...
ParseError: At line 6: Failed to parse line: X-BAD/SLASH:TRUE
>>> cal = base.readOne(badLineTest, ignoreUnreadable=True)
>>> cal.vevent.x_bad_slash
Traceback (most recent call last):
...
AttributeError: x_bad_slash
>>> cal.vevent.x_bad_underscore
<X-BAD-UNDERSCORE{}TRUE>
""",
"ical trigger workaround" :
"""
>>> badical = base.readOne(icalWeirdTrigger)
>>> badical.vevent.valarm.description.value
u'This trigger is a date-time without a VALUE=DATE-TIME parameter'
>>> badical.vevent.valarm.trigger.value
datetime.datetime(2002, 10, 28, 12, 0, tzinfo=tzutc())
""",
"unicode test" :
r"""
>>> f = resource_stream(__name__, 'test_files/utf8_test.ics')
>>> vevent = base.readOne(f).vevent
>>> vevent.summary.value
u'The title \u3053\u3093\u306b\u3061\u306f\u30ad\u30c6\u30a3'
>>> summary = vevent.summary.value
>>> test = str(vevent.serialize()),
""",
# make sure date valued UNTILs in rrules are in a reasonable timezone,
# and include that day (12/28 in this test)
"recurrence test" :
r"""
>>> f = resource_stream(__name__, 'test_files/recurrence.ics')
>>> cal = base.readOne(f)
>>> dates = list(cal.vevent.rruleset)
>>> dates[0]
datetime.datetime(2006, 1, 26, 23, 0, tzinfo=tzutc())
>>> dates[1]
datetime.datetime(2006, 2, 23, 23, 0, tzinfo=tzutc())
>>> dates[-1]
datetime.datetime(2006, 12, 28, 23, 0, tzinfo=tzutc())
""",
"regular expression test" :
"""
>>> re.findall(base.patterns['name'], '12foo-bar:yay')
['12foo-bar', 'yay']
>>> re.findall(base.patterns['safe_char'], 'a;b"*,cd')
['a', 'b', '*', 'c', 'd']
>>> re.findall(base.patterns['qsafe_char'], 'a;b"*,cd')
['a', ';', 'b', '*', ',', 'c', 'd']
>>> re.findall(base.patterns['param_value'], '"quoted";not-quoted;start"after-illegal-quote', re.VERBOSE)
['"quoted"', '', 'not-quoted', '', 'start', '', 'after-illegal-quote', '']
>>> match = base.line_re.match('TEST;ALTREP="http://www.wiz.org":value:;"')
>>> match.group('value')
'value:;"'
>>> match.group('name')
'TEST'
>>> match.group('params')
';ALTREP="http://www.wiz.org"'
""",
"VTIMEZONE creation test:" :
"""
>>> f = StringIO.StringIO(timezones)
>>> tzs = dateutil.tz.tzical(f)
>>> tzs.get("US/Pacific")
<tzicalvtz 'US/Pacific'>
>>> icalendar.TimezoneComponent(_)
<VTIMEZONE | <TZID{}US/Pacific>>
>>> pacific = _
>>> print pacific.serialize()
BEGIN:VTIMEZONE
TZID:US/Pacific
BEGIN:STANDARD
DTSTART:20001029T020000
RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10
TZNAME:PST
TZOFFSETFROM:-0700
TZOFFSETTO:-0800
END:STANDARD
BEGIN:DAYLIGHT
DTSTART:20000402T020000
RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4
TZNAME:PDT
TZOFFSETFROM:-0800
TZOFFSETTO:-0700
END:DAYLIGHT
END:VTIMEZONE
>>> (_)
<VTIMEZONE | <TZID{}US/Pacific>>
>>> santiago = icalendar.TimezoneComponent(tzs.get('Santiago'))
>>> ser = santiago.serialize()
>>> print ser
BEGIN:VTIMEZONE
TZID:Santiago
BEGIN:STANDARD
DTSTART:20000311T000000
RRULE:FREQ=YEARLY;BYDAY=2SA;BYMONTH=3
TZNAME:Pacific SA Standard Time
TZOFFSETFROM:-0300
TZOFFSETTO:-0400
END:STANDARD
BEGIN:DAYLIGHT
DTSTART:20001014T000000
RRULE:FREQ=YEARLY;BYDAY=2SA;BYMONTH=10
TZNAME:Pacific SA Daylight Time
TZOFFSETFROM:-0400
TZOFFSETTO:-0300
END:DAYLIGHT
END:VTIMEZONE
>>> roundtrip = dateutil.tz.tzical(StringIO.StringIO(str(ser))).get()
>>> for year in range(2001, 2010):
... for month in (2, 9):
... dt = datetime.datetime(year, month, 15, tzinfo = roundtrip)
... if dt.replace(tzinfo=tzs.get('Santiago')) != dt:
... print "Failed for:", dt
>>> fict = icalendar.TimezoneComponent(tzs.get('US/Fictitious-Eastern'))
>>> print fict.serialize()
BEGIN:VTIMEZONE
TZID:US/Fictitious-Eastern
BEGIN:STANDARD
DTSTART:20001029T020000
RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10
TZNAME:EST
TZOFFSETFROM:-0400
TZOFFSETTO:-0500
END:STANDARD
BEGIN:DAYLIGHT
DTSTART:20000402T020000
RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4;UNTIL=20050403T070000Z
TZNAME:EDT
TZOFFSETFROM:-0500
TZOFFSETTO:-0400
END:DAYLIGHT
END:VTIMEZONE
""",
"Create iCalendar from scratch" :
"""
>>> cal = base.newFromBehavior('vcalendar', '2.0')
>>> cal.add('vevent')
<VEVENT| []>
>>> cal.vevent.add('dtstart').value = datetime.datetime(2006, 5, 9)
>>> cal.vevent.add('description').value = "Test event"
>>> pacific = dateutil.tz.tzical(StringIO.StringIO(timezones)).get('US/Pacific')
>>> cal.vevent.add('created').value = datetime.datetime(2006, 1, 1, 10, tzinfo=pacific)
>>> cal.vevent.add('uid').value = "Not very random UID"
>>> print cal.serialize()
BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//PYVOBJECT//NONSGML Version 1//EN
BEGIN:VEVENT
UID:Not very random UID
DTSTART:20060509T000000
CREATED:20060101T180000Z
DESCRIPTION:Test event
END:VEVENT
END:VCALENDAR
""",
"Serializing with timezones test" :
"""
>>> from dateutil.rrule import rrule, rruleset, WEEKLY, MONTHLY
>>> pacific = dateutil.tz.tzical(StringIO.StringIO(timezones)).get('US/Pacific')
>>> cal = base.Component('VCALENDAR')
>>> cal.setBehavior(icalendar.VCalendar2_0)
>>> ev = cal.add('vevent')
>>> ev.add('dtstart').value = datetime.datetime(2005, 10, 12, 9, tzinfo = pacific)
>>> set = rruleset()
>>> set.rrule(rrule(WEEKLY, interval=2, byweekday=[2,4], until=datetime.datetime(2005, 12, 15, 9)))
>>> set.rrule(rrule(MONTHLY, bymonthday=[-1,-5]))
>>> set.exdate(datetime.datetime(2005, 10, 14, 9, tzinfo = pacific))
>>> ev.rruleset = set
>>> ev.add('duration').value = datetime.timedelta(hours=1)
>>> print cal.serialize()
BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//PYVOBJECT//NONSGML Version 1//EN
BEGIN:VTIMEZONE
TZID:US/Pacific
BEGIN:STANDARD
DTSTART:20001029T020000
RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10
TZNAME:PST
TZOFFSETFROM:-0700
TZOFFSETTO:-0800
END:STANDARD
BEGIN:DAYLIGHT
DTSTART:20000402T020000
RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4
TZNAME:PDT
TZOFFSETFROM:-0800
TZOFFSETTO:-0700
END:DAYLIGHT
END:VTIMEZONE
BEGIN:VEVENT
UID:...
DTSTART;TZID=US/Pacific:20051012T090000
DURATION:PT1H
EXDATE;TZID=US/Pacific:20051014T090000
RRULE:FREQ=WEEKLY;BYDAY=WE,FR;INTERVAL=2;UNTIL=20051215T090000
RRULE:FREQ=MONTHLY;BYMONTHDAY=-1,-5
END:VEVENT
END:VCALENDAR
>>> apple = dateutil.tz.tzical(StringIO.StringIO(timezones)).get('America/Montreal')
>>> ev.dtstart.value = datetime.datetime(2005, 10, 12, 9, tzinfo = apple)
>>> print cal.serialize()
BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//PYVOBJECT//NONSGML Version 1//EN
BEGIN:VTIMEZONE
TZID:US/Pacific
BEGIN:STANDARD
DTSTART:20001029T020000
RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10
TZNAME:PST
TZOFFSETFROM:-0700
TZOFFSETTO:-0800
END:STANDARD
BEGIN:DAYLIGHT
DTSTART:20000402T020000
RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4
TZNAME:PDT
TZOFFSETFROM:-0800
TZOFFSETTO:-0700
END:DAYLIGHT
END:VTIMEZONE
BEGIN:VTIMEZONE
TZID:America/Montreal
BEGIN:STANDARD
DTSTART:20000101T000000
RRULE:FREQ=YEARLY;BYMONTH=1;UNTIL=20040101T050000Z
TZNAME:EST
TZOFFSETFROM:-0500
TZOFFSETTO:-0500
END:STANDARD
BEGIN:STANDARD
DTSTART:20051030T020000
RRULE:FREQ=YEARLY;BYDAY=5SU;BYMONTH=10
TZNAME:EST
TZOFFSETFROM:-0400
TZOFFSETTO:-0500
END:STANDARD
BEGIN:DAYLIGHT
DTSTART:20050403T070000
RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4;UNTIL=20050403T120000Z
TZNAME:EDT
TZOFFSETFROM:-0500
TZOFFSETTO:-0400
END:DAYLIGHT
END:VTIMEZONE
BEGIN:VEVENT
UID:...
DTSTART;TZID=America/Montreal:20051012T090000
DURATION:PT1H
EXDATE;TZID=US/Pacific:20051014T090000
RRULE:FREQ=WEEKLY;BYDAY=WE,FR;INTERVAL=2;UNTIL=20051215T090000
RRULE:FREQ=MONTHLY;BYMONTHDAY=-1,-5
END:VEVENT
END:VCALENDAR
""",
"Handling DATE without a VALUE=DATE" :
"""
>>> cal = base.readOne(badDtStartTest)
>>> cal.vevent.dtstart.value
datetime.date(2002, 10, 28)
""",
"Serializing iCalendar to hCalendar" :
"""
>>> cal = base.newFromBehavior('hcalendar')
>>> cal.behavior
<class 'vobject.hcalendar.HCalendar'>
>>> pacific = dateutil.tz.tzical(StringIO.StringIO(timezones)).get('US/Pacific')
>>> cal.add('vevent')
<VEVENT| []>
>>> cal.vevent.add('summary').value = "this is a note"
>>> cal.vevent.add('url').value = "http://microformats.org/code/hcalendar/creator"
>>> cal.vevent.add('dtstart').value = datetime.date(2006,2,27)
>>> cal.vevent.add('location').value = "a place"
>>> cal.vevent.add('dtend').value = datetime.date(2006,2,27) + datetime.timedelta(days = 2)
>>> event2 = cal.add('vevent')
>>> event2.add('summary').value = "Another one"
>>> event2.add('description').value = "The greatest thing ever!"
>>> event2.add('dtstart').value = datetime.datetime(1998, 12, 17, 16, 42, tzinfo = pacific)
>>> event2.add('location').value = "somewhere else"
>>> event2.add('dtend').value = event2.dtstart.value + datetime.timedelta(days = 6)
>>> hcal = cal.serialize()
>>> print hcal
<span class="vevent">
<a class="url" href="http://microformats.org/code/hcalendar/creator">
<span class="summary">this is a note</span>:
<abbr class="dtstart", title="20060227">Monday, February 27</abbr>
- <abbr class="dtend", title="20060301">Tuesday, February 28</abbr>
at <span class="location">a place</span>
</a>
</span>
<span class="vevent">
<span class="summary">Another one</span>:
<abbr class="dtstart", title="19981217T164200-0800">Thursday, December 17, 16:42</abbr>
- <abbr class="dtend", title="19981223T164200-0800">Wednesday, December 23, 16:42</abbr>
at <span class="location">somewhere else</span>
<div class="description">The greatest thing ever!</div>
</span>
""",
"Generate UIDs automatically test:" :
"""
>>> cal = base.newFromBehavior('vcalendar')
>>> cal.add('vevent').add('dtstart').value = datetime.datetime(2006,2,2,10)
>>> ser = cal.serialize()
>>> len(cal.vevent.uid_list)
1
""",
"VCARD 3.0 parse test:" :
r"""
>>> card = base.readOne(vcardtest)
>>> card.adr.value
<Address: Haight Street 512;\nEscape, Test\nNovosibirsk, 80214\nGnuland>
>>> print card.adr.value
Haight Street 512;
Escape, Test
Novosibirsk, 80214
Gnuland
>>> card.org.value
[u'University of Novosibirsk, Department of Octopus Parthenogenesis']
>>> print card.serialize()
BEGIN:VCARD
VERSION:3.0
ACCOUNT;TYPE=HOME:010-1234567-05
ADR;TYPE=HOME:;;Haight Street 512\;\nEscape\, Test;Novosibirsk;;80214;Gnul
and
BDAY;VALUE=date:02-10
FN:Daffy Duck Knudson (with Bugs Bunny and Mr. Pluto)
N:Knudson;Daffy Duck (with Bugs Bunny and Mr. Pluto);;;
NICKNAME:gnat and gnu and pluto
ORG:University of Novosibirsk\, Department of Octopus Parthenogenesis
TEL;TYPE=HOME:+01-(0)2-765.43.21
TEL;TYPE=CELL:+01-(0)5-555.55.55
TEL;TYPE=HOME:+01-(0)2-876.54.32
END:VCARD
""",
"Multi-text serialization test:" :
"""
>>> category = base.newFromBehavior('categories')
>>> category.value = ['Random category']
>>> print category.serialize().strip()
CATEGORIES:Random category
>>> category.value.append('Other category')
>>> print category.serialize().strip()
CATEGORIES:Random category,Other category
""",
"Semi-colon separated multi-text serialization test:" :
"""
>>> requestStatus = base.newFromBehavior('request-status')
>>> requestStatus.value = ['5.1', 'Service unavailable']
>>> print requestStatus.serialize().strip()
REQUEST-STATUS:5.1;Service unavailable
""",
"vCard groups test:" :
"""
>>> card = base.readOne(vcardWithGroups)
>>> card.group
u'home'
>>> card.tel.group
u'home'
>>> card.group = card.tel.group = 'new'
>>> card.tel.serialize().strip()
'new.TEL;TYPE=fax,voice,msg:+49 3581 123456'
>>> card.serialize().splitlines()[0]
'new.BEGIN:VCARD'
>>> dtstart = base.newFromBehavior('dtstart')
>>> dtstart.group = "badgroup"
>>> dtstart.serialize()
Traceback (most recent call last):
...
VObjectError: "<DTSTART{}> has a group, but this object doesn't support groups"
""",
"Lowercase components test:" :
"""
>>> card = base.readOne(lowercaseComponentNames)
>>> card.version
<VERSION{}2.1>
""",
"Default behavior test" :
"""
>>> card = base.readOne(vcardWithGroups)
>>> base.getBehavior('note') == None
True
>>> card.note.behavior
<class 'vobject.vcard.VCardTextBehavior'>
>>> print card.note.value
The Mayor of the great city of Goerlitz in the great country of Germany.
Next line.
"""
}
|
"""Shared utils among the dataset implementation."""
import collections
import contextlib
import dataclasses
import enum
import itertools
import re
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Type, TypeVar, Union
from unittest import mock
import chex
import jax3d.projects.nesf as j3d
from jax3d.projects.nesf.nerfstatic.utils import camera_utils
from jax3d.projects.nesf.nerfstatic.utils import gin_utils
from jax3d.projects.nesf.nerfstatic.utils import types
from jax3d.projects.nesf.utils import tree_utils
from jax3d.projects.nesf.utils.typing import f32, i32 # pylint: disable=g-multiple-import
import mediapy
import numpy as np
import skimage.transform
import tensorflow as tf
_T = TypeVar('_T')
_InMemoryLoadExamplesFn = Callable[..., Tuple[types.Batch, 'DatasetMetadata']]
_StreamingLoadExamplesFn = Callable[..., Tuple[tf.data.Dataset,
List['DatasetMetadata']]]
_DATASET_REGISTER: Dict[str, 'RegisteredDataset'] = {}
@dataclasses.dataclass
class RegisteredDataset:
"""Structure containing dataset metadata.
Attributes:
name: dataset name
make_examples_fn: Function which load and returns the examples. If
`in_memory=True`, should return a `Batch` and a `DatasetMetadata`
representing a single scene. If `in_memory=False`, should return
`tf.data.Dataset` and a list of `DatasetMetadata` representing all
scenes.
in_memory: Whether the dataset is loaded in-memory or should be streamed.
config_cls: The eventual class containing dataset specific params
"""
name: str
make_examples_fn: Union[_InMemoryLoadExamplesFn, _StreamingLoadExamplesFn]
in_memory: bool = True
config_cls: Optional[Type[gin_utils.ConfigurableDataclass]] = None
def register_dataset(dataset: RegisteredDataset) -> None:
"""Register the dataset."""
assert dataset.name not in _DATASET_REGISTER
_DATASET_REGISTER[dataset.name] = dataset
def find_registered_dataset(name: str) -> RegisteredDataset:
"""Returns the registered dataset."""
registered_dataset = _DATASET_REGISTER.get(name.lower())
if registered_dataset is None:
raise ValueError(
f'Unknown dataset {name}. Should be one of {_DATASET_REGISTER.keys()}')
return registered_dataset
@dataclasses.dataclass
class DatasetMetadata:
"""Metadata for a single scene in a dataset."""
# Mapping from ints to human-interpretable names for semantic category images.
labels: List[str] = dataclasses.field(default_factory=list)
# Camera parameters.
cameras: Optional[camera_utils.Camera] = None
# Human-interpretable identifier for this scene.
scene_name: Optional[str] = None
class ExampleType(enum.Enum):
"""Output format of the dataset.
Attributes:
RAY: Each batch shape is (local_device_count, local_batch_size /
local_device_count,...)
IMAGE: Each batch is (h, w, ...)
"""
RAY = enum.auto()
IMAGE = enum.auto()
def prefetch(iterator: Iterable[_T], *, buffer_size) -> Iterator[_T]:
"""Pre-fetch the iterator (synchronously)."""
iterator = iter(iterator)
queue = collections.deque()
# Prefetch buffer size to the queue
for x in itertools.islice(iterator, buffer_size):
queue.append(x)
while queue:
yield queue.popleft()
# Eventually push the next element to the queue
try:
queue.append(next(iterator))
except StopIteration:
pass
def _make_view(batch_shape) -> types.Views:
# Ids are not broadcasted, so is shape `()` when `(800, 800, 3)`
id_shape = np.array(batch_shape[:-2])
image_ids = ['img0.png'] * id_shape.prod()
image_ids = np.array(image_ids).reshape(id_shape)
return types.Views(
rays=types.Rays(
scene_id=np.zeros((*batch_shape, 1), dtype=np.int32),
origin=np.random.ranf((*batch_shape, 3)).astype(np.float32),
direction=np.random.ranf((*batch_shape, 3)).astype(np.float32),
),
rgb=np.random.ranf((*batch_shape, 3)).astype(np.float32),
depth=np.random.ranf((*batch_shape, 1)).astype(np.float32),
semantics=np.zeros((*batch_shape, 1), dtype=np.int32),
image_ids=image_ids,
)
def make_examples(
target_batch_shape,
) -> types.Batch:
"""Creates placeholder examples of a batch."""
target_view = _make_view(target_batch_shape)
return types.Batch(target_view=target_view)
@contextlib.contextmanager
def mock_sunds(examples: types.Batch):
"""Mock sunds to returns the dummy examples."""
ds = tf.data.Dataset.from_tensor_slices(examples)
def mock_streaming_make_examples_fn(**kwargs):
del kwargs
return ds, [DatasetMetadata()]
with mock.patch.object(
_DATASET_REGISTER['sunds'],
'make_examples_fn',
mock_streaming_make_examples_fn,
):
yield
@chex.dataclass
class ImageSet:
"""Class for Images predicted by DeepLab."""
# string identifier for the scene this image comes from.
scene_name: str
# string identifier for which image within a scene this image comes from.
image_name: str
# A glob filepattern one can use to find the files held by this ImageSet.
glob_pattern: str
# RGB
rgb: f32['h w 3']
rgb_ground_truth: f32['h w 3']
semantic: i32['h w']
semantic_ground_truth: i32['h w']
class DeepLabImageLoader:
"""For loading colorized semantic maps.
Includes ground truth labels and RGB images.
"""
def __init__(self, xm_work_unit_dir: j3d.Path):
self.xm_work_unit_dir = xm_work_unit_dir
self._image_dir = xm_work_unit_dir / 'vis' / 'segmentation_results'
def step(self) -> int:
return np.nan
def load_all(self) -> List[ImageSet]:
return tree_utils.parallel_map(lambda x: self[x], list(range(len(self))))
def __getitem__(self, key: int) -> ImageSet:
return ImageSet(
scene_name='UNKNOWN_SCENE',
image_name='UNKNOWN_IMAGE',
glob_pattern=str(self._image_dir / f'{key:06d}_*.png'),
rgb=np.full((256, 256, 3), np.nan),
rgb_ground_truth=self._load_png(self._image_dir /
f'{key:06d}_image.png'),
semantic=self._load_png(self._image_dir / f'{key:06d}_prediction.png'),
semantic_ground_truth=self._load_png(self._image_dir /
f'{key:06d}_label.png'),
)
def __contains__(self, key: int) -> bool:
return 0 <= key < len(self)
def __iter__(self) -> Iterable[ImageSet]:
for idx in range(len(self)):
yield self[idx]
def __len__(self) -> int:
items = list(self._image_dir.glob('*.png'))
assert len(items) % 4 == 0, len(items)
return len(items) // 4
def _load_png(self, path: j3d.Path) -> np.ndarray:
image = mediapy.read_image(path)
image = image / 255.0
image = skimage.transform.resize(
image, (256, 256), order=1, preserve_range=True)
return image
class DeepLabSemanticMapLoader:
"""For loading raw semantic maps.
Does not include ground truth semantic labels or RGB images.
"""
def __init__(self, xm_work_unit_dir: j3d.Path):
self.xm_work_unit_dir = xm_work_unit_dir
self._image_dir = xm_work_unit_dir / 'vis' / 'raw_segmentation_results'
def step(self) -> int:
return np.nan
def load_all(self) -> List[ImageSet]:
return tree_utils.parallel_map(lambda x: self[x], list(range(len(self))))
def __getitem__(self, key: int) -> ImageSet:
filepath = self._filepaths[key]
# Parse scene, image names
match = re.search("b'(\d+)_rgba_(\d+)'.png$", filepath.name) # pylint: disable=anomalous-backslash-in-string
if not match:
raise ValueError(filepath)
scene_name, image_name = match.groups()
return ImageSet(
scene_name=scene_name,
image_name=image_name,
glob_pattern=str(filepath),
rgb=np.full((256, 256, 3), np.nan),
rgb_ground_truth=np.full((256, 256, 3), np.nan),
semantic=self._load_png(filepath),
semantic_ground_truth=np.full((256, 256, 3), np.nan),
)
def __contains__(self, key: int) -> bool:
return 0 <= key < len(self)
def __iter__(self) -> Iterable[ImageSet]:
for idx in range(len(self)):
yield self[idx]
def __len__(self) -> int:
return len(self._filepaths)
def _load_png(self, path: j3d.Path) -> np.ndarray:
"""Load image from path."""
image = mediapy.read_image(path)
assert image.shape == (1024, 1024), image.shape
# Take the top-left pixel of every 4x4 square. This isn't the smartest
# thing to do, but it's a lot faster the code below.
image = image[::4, ::4]
# # We want to resize the image back to (256, 256). We do so by grouping
# # 2x2 squares of pixels together and taking the most common semantic
# # category from each.
# image = np.reshape(image, (1024, 1024, 1))
# image = block_to_depth(image, block_size=4)
#
# # MultiShapeNet-13 has 14 semantic categories.
# image = most_common_value_along_axis(image, max_value=14)
return image
@property
def _filepaths(self) -> List[j3d.Path]:
return list(self._image_dir.glob('*.png'))
def filter_images_per_scene(all_images: List[ImageSet],
scene_name: str) -> Dict[int, i32['h w']]:
result = {}
for image in all_images:
if image.scene_name == scene_name:
result[int(image.image_name)] = image.semantic
return result
|
import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=1, faild_point=100000, path_list=[
[TestAction.create_mini_vm, 'vm1', 'cluster=cluster2'],
[TestAction.reboot_vm, 'vm1'],
[TestAction.create_vm_backup, 'vm1', 'vm1-backup1'],
[TestAction.create_mini_vm, 'vm2', 'cluster=cluster1', 'flag=thick'],
[TestAction.create_volume, 'volume1', 'cluster=cluster2', 'flag=scsi'],
[TestAction.resize_data_volume, 'volume1', 5*1024*1024],
[TestAction.poweroff_only, 'cluster=cluster2'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.delete_volume, 'volume1'],
[TestAction.recover_volume, 'volume1'],
[TestAction.add_image, 'image1', 'root', 'http://172.20.1.28/mirror/diskimages/centos_vdbench.qcow2'],
[TestAction.delete_vm_backup, 'vm1-backup1'],
[TestAction.delete_image, 'image1'],
[TestAction.expunge_image, 'image1'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.start_vm, 'vm1'],
[TestAction.create_volume_backup, 'volume1', 'volume1-backup2'],
[TestAction.stop_vm, 'vm1'],
[TestAction.destroy_vm, 'vm2'],
[TestAction.poweroff_only, 'cluster=cluster2'],
[TestAction.create_volume, 'volume2', 'cluster=cluster1', 'flag=scsi'],
[TestAction.resize_data_volume, 'volume2', 5*1024*1024],
[TestAction.create_mini_vm, 'vm3', 'cluster=cluster1'],
[TestAction.attach_volume, 'vm3', 'volume2'],
[TestAction.delete_volume, 'volume2'],
[TestAction.create_volume, 'volume3', 'cluster=cluster2', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume3'],
[TestAction.start_vm, 'vm1'],
[TestAction.create_volume_backup, 'volume3', 'volume3-backup3'],
[TestAction.stop_vm, 'vm1'],
[TestAction.delete_volume_backup, 'volume3-backup3'],
[TestAction.create_mini_vm, 'vm4', 'cluster=cluster2', 'flag=thick'],
[TestAction.expunge_volume, 'volume2'],
[TestAction.reboot_vm, 'vm4'],
[TestAction.create_vm_backup, 'vm3', 'vm3-backup4'],
[TestAction.migrate_vm, 'vm3'],
[TestAction.poweroff_only, 'cluster=cluster1'],
[TestAction.delete_volume_backup, 'volume1-backup2'],
])
'''
The final status:
Running:['vm4']
Stopped:['vm1', 'vm3']
Enadbled:['vm3-backup4']
attached:['volume1', 'volume3']
Detached:[]
Deleted:['vm2', 'vm1-backup1', 'volume3-backup3', 'volume1-backup2']
Expunged:['volume2', 'image1']
Ha:[]
Group:
vm_backup1:['vm3-backup4']---vm3@
'''
|
"""Models for activity references."""
import core.storage.base_model.gae_models as base_models
import feconf
from google.appengine.ext import ndb
class ActivityReferencesModel(base_models.BaseModel):
"""Storage model for a list of activity references.
The id of each model instance is the name of the list. This should be one
of the constants in feconf.ALL_ACTIVITY_REFERENCE_LIST_TYPES.
"""
# The types and ids of activities to show in the library page. Each item
# in this list is a dict with two keys: 'type' and 'id'.
activity_references = ndb.JsonProperty(repeated=True)
@classmethod
def get_or_create(cls, list_name):
"""This creates the relevant model instance, if it does not already
exist.
"""
if list_name not in feconf.ALL_ACTIVITY_REFERENCE_LIST_TYPES:
raise Exception(
'Invalid ActivityListModel id: %s' % list_name)
entity = cls.get(list_name, strict=False)
if entity is None:
entity = cls(id=list_name, activity_references=[])
entity.put()
return entity
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import sys
from resource_management import *
from yarn_conf import configure
class MapReduce2Client(Script):
def install(self, env):
self.install_packages(env)
self.configure(env)
def configure(self, env):
import params
env.set_params(params)
configure()
def status(self, env):
raise ClientComponentHasNoStatus()
if __name__ == "__main__":
MapReduce2Client().execute()
|
"""EmPOWER Feed Class."""
from datetime import datetime, timedelta
from tornado.httpclient import HTTPClient
from empower.persistence import Session
from empower.persistence.persistence import TblFeed
FEED_STATUS_ON = "on"
FEED_STATUS_OFF = "off"
class Feed(object):
"""Power consumption feed originating from an Energino."""
def __init__(self, feed_id):
self.feed_id = feed_id
self.created = datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
self.updated = datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
self.private = False
self.__pnfdev = None
self.mngt = None
self.datastreams = {}
@property
def pnfdev(self):
"""Return the PNFDev."""
return self.__pnfdev
@pnfdev.setter
def pnfdev(self, pnfdev):
"""Set the PNFDev and update database."""
self.__pnfdev = pnfdev
session = Session()
feed = Session().query(TblFeed) \
.filter(TblFeed.feed_id == self.feed_id) \
.first()
if self.pnfdev:
feed.addr = self.pnfdev.addr
else:
feed.addr = None
session.commit()
@property
def is_on(self):
"""Return true if the switch is set to 0."""
if not self.mngt:
return None
if 'switch' in self.datastreams:
return self.datastreams['switch']['current_value'] == 0
@is_on.setter
def is_on(self, value):
"""Set the switch."""
if not self.mngt:
return
if self.is_on == value:
return
if value:
url = 'http://%s/arduino/datastreams/switch/%u' % (self.mngt[0], 0)
else:
url = 'http://%s/arduino/datastreams/switch/%u' % (self.mngt[0], 1)
HTTPClient().fetch(url)
def to_dict(self):
"""Return a JSON-serializable dictionary representing the Feed."""
last = datetime.strptime(self.updated, "%Y-%m-%dT%H:%M:%S.%fZ")
now = datetime.now()
delta = timedelta(seconds=30)
if now - last > delta:
status = 'dead'
else:
status = 'live'
out = {'id': self.feed_id,
'created': self.created,
'updated': self.updated,
'status': status,
'datastreams': self.datastreams.values(),
'feed': '/api/v1/feeds/%u.json' % (self.feed_id),
'mngt': self.mngt}
if self.pnfdev:
out[self.pnfdev.SOLO] = self.pnfdev.addr
return out
def update(self, datastreams):
"""Update the datastrem with new samples.
Args:
datastreams, list of datastream objects.
Returns:
None
Raises:
ValueError: if am invalid datastream is passed.
"""
self.updated = datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
for incoming in datastreams:
if incoming['id'] in self.datastreams:
local = self.datastreams[incoming['id']]
local['at'] = datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
local['current_value'] = incoming['current_value']
if local['max_value'] < local['current_value']:
local['max_value'] = local['current_value']
if local['min_value'] > local['current_value']:
local['min_value'] = local['current_value']
else:
self.datastreams[incoming['id']] = \
{'at': datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
'max_value': incoming['current_value'],
'min_value': incoming['current_value'],
'id': incoming['id'],
'current_value': incoming['current_value']}
def __str__(self):
return str(self.feed_id)
def __hash__(self):
return hash(self.feed_id)
def __eq__(self, other):
if isinstance(other, Feed):
return self.feed_id == other.feed_id
return False
def __ne__(self, other):
return not self.__eq__(other)
|
__author__ = 'Ulric Qin'
from frame.store import db
class Bean(object):
_tbl = ''
_id = 'id'
_cols = ''
@classmethod
def insert(cls, data=None):
if not data:
raise ValueError('argument data is invalid')
size = len(data)
keys = data.keys()
safe_keys = ['`%s`' % k for k in keys]
sql = 'INSERT INTO `%s`(%s) VALUES(%s)' % (cls._tbl, ','.join(safe_keys), '%s' + ',%s' * (size - 1))
last_id = db.insert(sql, [data[key] for key in keys])
return last_id
@classmethod
def delete(cls, where=None, params=None):
sql = 'DELETE FROM `%s`' % cls._tbl
if not where:
return db.update(sql)
sql += ' WHERE ' + where
return db.update(sql, params)
@classmethod
def delete_one(cls, pk=None):
sql = 'DELETE FROM `%s` WHERE %s = %%s' % (cls._tbl, cls._id)
return db.update(sql, [pk])
@classmethod
def update(cls, clause=None, params=None):
sql = 'UPDATE `%s` SET %s' % (cls._tbl, clause)
return db.update(sql, params)
@classmethod
def update_dict(cls, data=None, where='', params=None):
if not data:
raise ValueError('argument data is invalid')
size = len(data)
keys = data.keys()
safe_keys = ['`%s`' % k for k in keys]
values = [data[key] for key in keys]
arr = ['%s=%%s' % key for key in safe_keys]
if not where:
return cls.update(','.join(arr), values)
else:
values.extend(params)
return cls.update(', '.join(arr) + ' WHERE ' + where, values)
@classmethod
def select(cls, cols=None, where=None, params=None, order=None, limit=None, page=None, offset=None):
if cols is None:
cols = cls._cols
if params is None:
params = []
sql = 'SELECT %s FROM `%s`' % (cols, cls._tbl)
if where:
sql = '%s WHERE %s' % (sql, where)
if order:
sql = '%s ORDER BY %s' % (sql, order)
if limit is not None:
sql = '%s LIMIT %s' % (sql, limit)
if offset is not None:
sql = '%s OFFSET %s' % (sql, offset)
if page is not None:
offset = (int(page) - 1) * int(limit)
if offset < 0:
offset = 0
sql = '%s OFFSET %s' % (sql, offset)
return db.query_all(sql, params)
@classmethod
def select_vs(cls, where=None, params=None, order=None, limit=None, page=None, offset=None):
rows = cls.select(where=where, params=params, order=order, limit=limit, page=page, offset=offset)
return [cls(*row) for row in rows]
@classmethod
def read(cls, where=None, params=None):
vs = cls.select_vs(where=where, params=params)
if vs:
return vs[0]
else:
return None
@classmethod
def get(cls, id_val):
if not id_val:
return None
return cls.read('%s = %%s' % cls._id, [id_val])
@classmethod
def column(cls, col=None, where=None, params=None, order=None, limit=None, page=None, offset=None):
rows = cls.select(col, where, params, order, limit, page, offset)
return [row[0] for row in rows]
@classmethod
def total(cls, where=None, params=None):
sql = 'SELECT COUNT(1) FROM `%s`' % cls._tbl
if not where:
ret = db.query_column(sql)
return ret[0]
sql += ' WHERE ' + where
ret = db.query_column(sql, params)
return ret[0]
@classmethod
def exists(cls, where=None, params=None):
return cls.total(where, params) > 0
|
print "|--------------------------------------------|"
print "| Starting Perlin Noise Demo |"
print "|--------------------------------------------|"
scene.addAssetPath('mesh', 'mesh')
scene.addAssetPath('motion', 'ChrMaarten')
scene.addAssetPath('script', 'scripts')
scene.addAssetPath('script', 'behaviorsets')
scene.loadAssets()
print 'Configuring scene parameters and camera'
scene.setScale(1.0)
scene.setBoolAttribute('internalAudio', True)
scene.run('default-viewer.py')
camera = getCamera()
camera.setEye(0.16, 1.44, 1.73)
camera.setCenter(0.16, 0.94, -0.17)
camera.setUpVector(SrVec(0, 1, 0))
camera.setScale(1)
camera.setFov(1.0472)
camera.setFarPlane(100)
camera.setNearPlane(0.1)
camera.setAspectRatio(0.966897)
scene.getPawn('camera').setPosition(SrVec(0, -2, 0))
print 'Setting up joint map for Brad'
scene.run('zebra2-map.py')
zebra2Map = scene.getJointMapManager().getJointMap('zebra2')
bradSkeleton = scene.getSkeleton('ChrBrad.sk')
zebra2Map.applySkeleton(bradSkeleton)
zebra2Map.applyMotionRecurse('ChrBrad')
print 'Setting up Brad'
scene.run('BehaviorSetGestures.py')
setupBehaviorSet()
bradPosX = -45.0
for i in range(2):
baseName = 'ChrBrad%s' % i
brad = scene.createCharacter(baseName, '')
bradSkeleton = scene.createSkeleton('ChrBrad.sk')
brad.setSkeleton(bradSkeleton)
# Set position
bradPos = SrVec((bradPosX + (i * 100))/100, 0, 0)
brad.setPosition(bradPos)
brad.createStandardControllers()
# Gesture map setup
brad.setStringAttribute('gestureMap', 'ChrBrad')
brad.setBoolAttribute('gestureRequest.autoGestureTransition', True)
# Set deformable mesh
source.setVec3Attribute('deformableMeshScale', .01, .01, .01)
brad.setStringAttribute('deformableMesh', 'ChrMaarten.dae')
# Play idle animation
bml.execBML(baseName, '<body posture="ChrBrad@Idle01"/>')
retargetBehaviorSet(baseName)
brad.setStringAttribute("displayType", "GPUmesh")
scene.getCharacter('ChrBrad0').setHPR(SrVec(17, 0, 0))
scene.getCharacter('ChrBrad1').setHPR(SrVec(-17, 0, 0))
lastTime = -5
import random
class PerlinNoiseDemo(SBScript):
def update(self, time):
global lastTime
timeDiff = time - lastTime
# When time's up, do action
if timeDiff >= 5:
lastTime = time
which = random.randrange(0, 3, 1)
# Perlin noise can be added by adding sbm:joint-range="l_shoulder" sbm:frequency="0.03" sbm:scale="0.02" at the end
# Perlin noise
if which == 0:
bml.execBML('ChrBrad0', '<gesture lexeme="DEICTIC" type="YOU" poststroke_hold="2" sbm:joint-range="l_shoulder l_elbow" sbm:frequency="0.05" sbm:scale="0.02"/>')
bml.execBML('ChrBrad1', '<gesture lexeme="DEICTIC" type="YOU" poststroke_hold="2"/>')
elif which == 1:
bml.execBML('ChrBrad0', '<gesture lexeme="METAPHORIC" type="OBLIGATION" mode="LEFT_HAND" poststroke_hold="2" sbm:joint-range="l_shoulder l_elbow" sbm:frequency="0.05" sbm:scale="0.02"/>')
bml.execBML('ChrBrad1', '<gesture lexeme="METAPHORIC" type="OBLIGATION" mode="LEFT_HAND" poststroke_hold="2"/>')
elif which == 2:
bml.execBML('ChrBrad0', '<gesture lexeme="DEICTIC" type="ME" poststroke_hold="2" sbm:joint-range="l_shoulder l_elbow" sbm:frequency="0.05" sbm:scale="0.02"/>')
bml.execBML('ChrBrad1', '<gesture lexeme="DEICTIC" type="ME" poststroke_hold="2"/>')
scene.removeScript('perlinnoisedemo')
perlinnoisedemo = PerlinNoiseDemo()
scene.addScript('perlinnoisedemo', perlinnoisedemo)
|
from tempest.api.compute import base
from tempest import exceptions
from tempest.test import attr
class InstanceActionsV3TestJSON(base.BaseV3ComputeTest):
_interface = 'json'
@classmethod
def setUpClass(cls):
super(InstanceActionsV3TestJSON, cls).setUpClass()
cls.client = cls.servers_client
resp, server = cls.create_test_server(wait_until='ACTIVE')
cls.request_id = resp['x-compute-request-id']
cls.server_id = server['id']
@attr(type='gate')
def test_list_instance_actions(self):
# List actions of the provided server
resp, body = self.client.reboot(self.server_id, 'HARD')
self.client.wait_for_server_status(self.server_id, 'ACTIVE')
resp, body = self.client.list_instance_actions(self.server_id)
self.assertEqual(200, resp.status)
self.assertTrue(len(body) == 2, str(body))
self.assertTrue(any([i for i in body if i['action'] == 'create']))
self.assertTrue(any([i for i in body if i['action'] == 'reboot']))
@attr(type='gate')
def test_get_instance_action(self):
# Get the action details of the provided server
resp, body = self.client.get_instance_action(self.server_id,
self.request_id)
self.assertEqual(200, resp.status)
self.assertEqual(self.server_id, body['instance_uuid'])
self.assertEqual('create', body['action'])
@attr(type=['negative', 'gate'])
def test_list_instance_actions_invalid_server(self):
# List actions of the invalid server id
self.assertRaises(exceptions.NotFound,
self.client.list_instance_actions, 'server-999')
@attr(type=['negative', 'gate'])
def test_get_instance_action_invalid_request(self):
# Get the action details of the provided server with invalid request
self.assertRaises(exceptions.NotFound, self.client.get_instance_action,
self.server_id, '999')
class InstanceActionsV3TestXML(InstanceActionsV3TestJSON):
_interface = 'xml'
|
"""
Copyright 2015 Parsely, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__version__ = '0.3-dev'
VERSION = (0, 3, 'dev')
|
"""Tests for ceilometer/central/manager.py"""
from unittest import mock
from oslotest import base
from ceilometer.hardware import discovery as hardware
from ceilometer.polling.discovery import endpoint
from ceilometer.polling.discovery import localnode
from ceilometer.polling.discovery import tenant as project
from ceilometer import service
class TestEndpointDiscovery(base.BaseTestCase):
def setUp(self):
super(TestEndpointDiscovery, self).setUp()
CONF = service.prepare_service([], [])
CONF.set_override('interface', 'publicURL',
group='service_credentials')
CONF.set_override('region_name', 'test-region-name',
group='service_credentials')
self.discovery = endpoint.EndpointDiscovery(CONF)
self.manager = mock.MagicMock()
self.catalog = (self.manager.keystone.session.auth.get_access.
return_value.service_catalog)
def test_keystone_called(self):
self.discovery.discover(self.manager, param='test-service-type')
expected = [mock.call(service_type='test-service-type',
interface='publicURL',
region_name='test-region-name')]
self.assertEqual(expected, self.catalog.get_urls.call_args_list)
def test_keystone_called_no_service_type(self):
self.discovery.discover(self.manager)
expected = [mock.call(service_type=None,
interface='publicURL',
region_name='test-region-name')]
self.assertEqual(expected,
self.catalog.get_urls
.call_args_list)
def test_keystone_called_no_endpoints(self):
self.catalog.get_urls.return_value = []
self.assertEqual([], self.discovery.discover(self.manager))
class TestLocalnodeDiscovery(base.BaseTestCase):
def setUp(self):
super(TestLocalnodeDiscovery, self).setUp()
CONF = service.prepare_service([], [])
self.discovery = localnode.LocalNodeDiscovery(CONF)
self.manager = mock.MagicMock()
def test_lockalnode_discovery(self):
self.assertEqual(['local_host'], self.discovery.discover(self.manager))
class TestProjectDiscovery(base.BaseTestCase):
def prepare_mock_data(self):
domain_heat = mock.MagicMock()
domain_heat.id = '2f42ab40b7ad4140815ef830d816a16c'
domain_heat.name = 'heat'
domain_heat.enabled = True
domain_heat.links = {
u'self': u'http://192.168.1.1/identity/v3/domains/'
u'2f42ab40b7ad4140815ef830d816a16c'}
domain_default = mock.MagicMock()
domain_default.id = 'default'
domain_default.name = 'Default'
domain_default.enabled = True
domain_default.links = {
u'self': u'http://192.168.1.1/identity/v3/domains/default'}
project_admin = mock.MagicMock()
project_admin.id = '2ce92449a23145ef9c539f3327960ce3'
project_admin.name = 'admin'
project_admin.parent_id = 'default'
project_admin.domain_id = 'default'
project_admin.is_domain = False
project_admin.enabled = True
project_admin.links = {
u'self': u'http://192.168.4.46/identity/v3/projects/'
u'2ce92449a23145ef9c539f3327960ce3'},
project_service = mock.MagicMock()
project_service.id = '9bf93b86bca04e3b815f86a5de083adc'
project_service.name = 'service'
project_service.parent_id = 'default'
project_service.domain_id = 'default'
project_service.is_domain = False
project_service.enabled = True
project_service.links = {
u'self': u'http://192.168.4.46/identity/v3/projects/'
u'9bf93b86bca04e3b815f86a5de083adc'}
project_demo = mock.MagicMock()
project_demo.id = '57d96b9af18d43bb9d047f436279b0be'
project_demo.name = 'demo'
project_demo.parent_id = 'default'
project_demo.domain_id = 'default'
project_demo.is_domain = False
project_demo.enabled = True
project_demo.links = {
u'self': u'http://192.168.4.46/identity/v3/projects/'
u'57d96b9af18d43bb9d047f436279b0be'}
self.domains = [domain_heat, domain_default]
self.default_domain_projects = [project_admin, project_service]
self.heat_domain_projects = [project_demo]
def side_effect(self, domain=None):
if not domain or domain.name == 'Default':
return self.default_domain_projects
elif domain.name == 'heat':
return self.heat_domain_projects
else:
return []
def setUp(self):
super(TestProjectDiscovery, self).setUp()
CONF = service.prepare_service([], [])
self.discovery = project.TenantDiscovery(CONF)
self.prepare_mock_data()
self.manager = mock.MagicMock()
self.manager.keystone.projects.list.side_effect = self.side_effect
def test_project_discovery(self):
self.manager.keystone.domains.list.return_value = self.domains
result = self.discovery.discover(self.manager)
self.assertEqual(len(result), 3)
self.assertEqual(self.manager.keystone.projects.list.call_count, 2)
class TestHardwareDiscovery(base.BaseTestCase):
class MockInstance(object):
addresses = {'ctlplane': [
{'addr': '0.0.0.0',
'OS-EXT-IPS-MAC:mac_addr': '01-23-45-67-89-ab'}
]}
id = 'resource_id'
image = {'id': 'image_id'}
flavor = {'id': 'flavor_id'}
expected = {
'resource_id': 'resource_id',
'resource_url': 'snmp://ro_snmp_user:password@0.0.0.0',
'mac_addr': '01-23-45-67-89-ab',
'image_id': 'image_id',
'flavor_id': 'flavor_id',
}
expected_usm = {
'resource_id': 'resource_id',
'resource_url': ''.join(['snmp://ro_snmp_user:password@0.0.0.0',
'?priv_proto=aes192',
'&priv_password=priv_pass']),
'mac_addr': '01-23-45-67-89-ab',
'image_id': 'image_id',
'flavor_id': 'flavor_id',
}
def setUp(self):
super(TestHardwareDiscovery, self).setUp()
self.CONF = service.prepare_service([], [])
self.discovery = hardware.NodesDiscoveryTripleO(self.CONF)
self.discovery.nova_cli = mock.MagicMock()
self.manager = mock.MagicMock()
def test_hardware_discovery(self):
self.discovery.nova_cli.instance_get_all.return_value = [
self.MockInstance()]
resources = self.discovery.discover(self.manager)
self.assertEqual(1, len(resources))
self.assertEqual(self.expected, resources[0])
def test_hardware_discovery_without_flavor(self):
instance = self.MockInstance()
instance.flavor = {}
self.discovery.nova_cli.instance_get_all.return_value = [instance]
resources = self.discovery.discover(self.manager)
self.assertEqual(0, len(resources))
def test_hardware_discovery_usm(self):
self.CONF.set_override('readonly_user_priv_proto', 'aes192',
group='hardware')
self.CONF.set_override('readonly_user_priv_password', 'priv_pass',
group='hardware')
self.discovery.nova_cli.instance_get_all.return_value = [
self.MockInstance()]
resources = self.discovery.discover(self.manager)
self.assertEqual(self.expected_usm, resources[0])
|
from furl import furl
from lxml import etree
from share.harvest import BaseHarvester
class DataOneHarvester(BaseHarvester):
VERSION = 1
def do_harvest(self, start_date, end_date):
end_date = end_date.format('YYYY-MM-DDT00:00:00', formatter='alternative') + 'Z'
start_date = start_date.format('YYYY-MM-DDT00:00:00', formatter='alternative') + 'Z'
url = furl(self.config.base_url).set(query_params={
'q': 'dateModified:[{} TO {}]'.format(start_date, end_date),
'start': 0,
'rows': 1
}).url
return self.fetch_records(url, start_date, end_date)
def fetch_records(self, url, start_date, end_date):
resp = self.requests.get(url)
doc = etree.XML(resp.content)
total_records = int(doc.xpath("//result/@numFound")[0])
records_processed = 0
while records_processed < total_records:
response = self.requests.get(furl(url).set(query_params={
'q': 'dateModified:[{} TO {}]'.format(start_date, end_date),
'start': records_processed,
'rows': 1000
}).url)
docs = etree.XML(response.content).xpath('//doc')
for doc in docs:
doc_id = doc.xpath("str[@name='id']")[0].text
doc = etree.tostring(doc)
yield (doc_id, doc)
records_processed += len(docs)
|
"""
MulticastQuerier - command ``find /sys/devices/virtual/net/ -name multicast_querier -print -exec cat {} \;``
============================================================================================================
This module provides processing for the output of the
``find -name multicast_querier ...`` command.
Sample output of this command looks like::
/sys/devices/virtual/net/br0/bridge/multicast_querier
0
/sys/devices/virtual/net/br1/bridge/multicast_querier
1
/sys/devices/virtual/net/br2/bridge/multicast_querier
0
The ``bri_val`` method is to return a dictionary contains bridge interface and
its multicast_querier value as the parsing result::
{'br0': 0, 'br1': 1, 'br2': 0}
Examples:
>>> multicast_querier_content = '''
... /sys/devices/virtual/net/br0/bridge/multicast_querier
... 0
... /sys/devices/virtual/net/br1/bridge/multicast_querier
... 1
... /sys/devices/virtual/net/br2/bridge/multicast_querier
... 0
... '''.strip()
>>> from insights.tests import context_wrap
>>> from insights.parsers.multicast_querier import MulticastQuerier
>>> shared = {MulticastQuerier: MulticastQuerier(context_wrap(multicast_querier_content))}
>>> mq_results = MulticastQuerier(context_wrap(multicast_querier_content))
>>> mq_results.bri_val
{'br0': 0, 'br1': 1, 'br2': 0}
"""
from .. import Parser, parser
from insights.specs import multicast_querier
@parser(multicast_querier)
class MulticastQuerier(Parser):
"""
Parse the output of the command:
`find /sys/devices/virtual/net/ -name multicast_querier -print -exec cat {} \;`
Get a dictionary of "bridge interface" and the value of the parameter "multicast_querier"
"""
@property
def bri_val(self):
return self._mapping
def parse_content(self, content):
self._mapping = {}
for line in content:
mq_val = ''
if line.startswith('/sys/'):
bri_iface = line.split('/')[5]
else:
mq_val = int(line.strip())
self._mapping[bri_iface] = mq_val
return
|
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.23
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1Sysctl(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'str',
'value': 'str'
}
attribute_map = {
'name': 'name',
'value': 'value'
}
def __init__(self, name=None, value=None, local_vars_configuration=None): # noqa: E501
"""V1Sysctl - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._value = None
self.discriminator = None
self.name = name
self.value = value
@property
def name(self):
"""Gets the name of this V1Sysctl. # noqa: E501
Name of a property to set # noqa: E501
:return: The name of this V1Sysctl. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1Sysctl.
Name of a property to set # noqa: E501
:param name: The name of this V1Sysctl. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def value(self):
"""Gets the value of this V1Sysctl. # noqa: E501
Value of a property to set # noqa: E501
:return: The value of this V1Sysctl. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this V1Sysctl.
Value of a property to set # noqa: E501
:param value: The value of this V1Sysctl. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and value is None: # noqa: E501
raise ValueError("Invalid value for `value`, must not be `None`") # noqa: E501
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Sysctl):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1Sysctl):
return True
return self.to_dict() != other.to_dict()
|
"""Power operations"""
from .power_common import CommonPowerCommand
from .power_off import PowerOffCommand
from .power_on import PowerOnCommand
from .power_cycle import PowerCycleCommand
|
from model.contact import Contact
import random
def test_delete_contact_by_id(app, db, check_ui):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(firstname="igor"))
old_contacts = db.get_contact_list()
contact = random.choice(old_contacts)
app.contact.delete_contact_by_id(contact.id)
new_contacts = db.get_contact_list()
assert len(old_contacts) - 1 == len(new_contacts)
old_contacts.remove(contact)
assert old_contacts == new_contacts
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
|
def break_words(stuff):
"""This function will break up words for us."""
words = stuff.split(' ')
return words
def sort_words(words):
"""Sorts the words."""
return sorted(words)
def print_first_word(words):
"""Prints the first word after popping it off."""
print words
word = words.pop(0)
print word
def print_last_word(words):
"""Prints the last word after popping it off."""
print words
word = words.pop(-1)
print word
def sort_sentence(sentence):
"""Tales in a full sentence and returns the sorted words."""
words = break_words(sentence)
return sort_words(words)
def print_first_and_last(sentence):
"""Prints the first and last words of the sentence."""
words = break_words(sentence)
print_first_word(words)
print_last_word(words)
def print_first_and_last_sorted(sentence):
"""Sorts the words then prints the first and last one."""
words = sort_sentence(sentence)
print_first_word(sentence)
print_last_word(sentence)
|
"""
simple_service.py
Combine results from GA4GH and ExAC API to build
a simple web service.
"""
import flask
app = flask.Flask(__name__)
import ga4gh.client as client
import requests
@app.route('/')
def hello_world():
return 'Hello World!'
@app.route('/echo/<echostring>')
def echo_route(echostring):
return echostring
EXAC_BASE_URL = "http://exac.hms.harvard.edu/rest/"
GA4GH_BASE_URL = "http://1kgenomes.ga4gh.org/"
@app.route('/gene/<gene_name>')
def gene_route(gene_name):
# First, let's request variants in the gene from ExAC.
# Note that we aren't handling cases when the gene isn't found. The ExAC
# API uses redirects to locate the gene of interest. Better error handling
# is left as an exercise.
print("Looking for " + str(gene_name))
print("If this hangs forever ctrl-c to quit :)")
response = requests.get(
EXAC_BASE_URL + "awesome?query=" + gene_name + "&service=variants_in_gene")
exac_variants = response.json()
# Now we'll check to make sure we got something back.
print("Found " + str(len(exac_variants)) + " variants in " + gene_name)
# As in `combine_apis` we'll get all the variants from the GA4GH
# variant set.
# We can refine our search by getting the range of positions
# from the ExAC variants.
min_start = 2**32
max_start = 0
chrom = "1"
for variant in exac_variants:
if variant['pos'] > max_start:
max_start = variant['pos']
if variant['pos'] < min_start:
min_start = variant['pos']
chrom = variant['chrom']
print("Range: " + str(min_start) + ":" + str(max_start) + " on chrom " + chrom)
c = client.HttpClient(GA4GH_BASE_URL)
ga4gh_variants = [v for v in c.searchVariants(
c.searchVariantSets(c.searchDatasets().next().id).next().id,
start=min_start,
end=max_start,
referenceName=chrom)]
# We'll find if there are any matches and return them.
# Matches is a list of tuples, the first of each tuple
# being the GA4GH variant, and the second being the ExAC
# variant.
matches = []
for exac_variant in exac_variants:
for ga4gh_variant in ga4gh_variants:
# Note that GA4GH positions are 0-based so we add
# 1 to line it up with ExAC.
if (ga4gh_variant.start + 1) == exac_variant['pos']:
matches.append((ga4gh_variant.toJsonDict(), exac_variant))
print("Found " + str(len(matches)) + " matches.")
# You can point a web browser at this address to see some results:
# http://localhost:5000/gene/or4f5
# Now that we have a web service synthesizing the results
# from ExAC and GA4GH, you may use this web service in the same
# way we used ExAC or GA4GH in the hello_ examples.
# response = requests.get("http://localhost:5000/gene/or4f5")
# response_data = response.json()
# for result in response_data['matches']:
# print result
return flask.jsonify({"gene_name": gene_name, "matches": matches})
if __name__ == '__main__':
app.debug = True # helps us figure out if something went wrong
app.run() # starts the server and keeps it running
|
import os
import sys
import copy
import random
import shutil
import numpy as np
SRC_ROOT = "/media/yeephycho/My Passport/Normal_patch"
DST_ROOT = "/media/yeephycho/New Volume/NORMAL_600"
def main(arguments):
filenames = os.listdir(SRC_ROOT)
shuffle_list = copy.deepcopy(filenames)
random.shuffle(shuffle_list)
# for i in range(10):
# print(filenames[i])
# print(shuffle_list[i])
for i in range(6000):
src_path = os.path.join(SRC_ROOT, shuffle_list[i])
dst_path = os.path.join(DST_ROOT, shuffle_list[i])
shutil.copy(src_path, dst_path)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
from collections import OrderedDict
import functools
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.ids_v1.services.ids import pagers
from google.cloud.ids_v1.types import ids
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import IDSTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import IDSGrpcAsyncIOTransport
from .client import IDSClient
class IDSAsyncClient:
"""The IDS Service"""
_client: IDSClient
DEFAULT_ENDPOINT = IDSClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = IDSClient.DEFAULT_MTLS_ENDPOINT
endpoint_path = staticmethod(IDSClient.endpoint_path)
parse_endpoint_path = staticmethod(IDSClient.parse_endpoint_path)
common_billing_account_path = staticmethod(IDSClient.common_billing_account_path)
parse_common_billing_account_path = staticmethod(
IDSClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(IDSClient.common_folder_path)
parse_common_folder_path = staticmethod(IDSClient.parse_common_folder_path)
common_organization_path = staticmethod(IDSClient.common_organization_path)
parse_common_organization_path = staticmethod(
IDSClient.parse_common_organization_path
)
common_project_path = staticmethod(IDSClient.common_project_path)
parse_common_project_path = staticmethod(IDSClient.parse_common_project_path)
common_location_path = staticmethod(IDSClient.common_location_path)
parse_common_location_path = staticmethod(IDSClient.parse_common_location_path)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
IDSAsyncClient: The constructed client.
"""
return IDSClient.from_service_account_info.__func__(IDSAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
IDSAsyncClient: The constructed client.
"""
return IDSClient.from_service_account_file.__func__(IDSAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
return IDSClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
@property
def transport(self) -> IDSTransport:
"""Returns the transport used by the client instance.
Returns:
IDSTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(IDSClient).get_transport_class, type(IDSClient)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, IDSTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the ids client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.IDSTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = IDSClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def list_endpoints(
self,
request: Union[ids.ListEndpointsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListEndpointsAsyncPager:
r"""Lists Endpoints in a given project and location.
.. code-block:: python
from google.cloud import ids_v1
def sample_list_endpoints():
# Create a client
client = ids_v1.IDSClient()
# Initialize request argument(s)
request = ids_v1.ListEndpointsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_endpoints(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.ids_v1.types.ListEndpointsRequest, dict]):
The request object.
parent (:class:`str`):
Required. The parent, which owns this
collection of endpoints.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.ids_v1.services.ids.pagers.ListEndpointsAsyncPager:
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = ids.ListEndpointsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_endpoints,
default_retry=retries.Retry(
initial=0.25,
maximum=32.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListEndpointsAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def get_endpoint(
self,
request: Union[ids.GetEndpointRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ids.Endpoint:
r"""Gets details of a single Endpoint.
.. code-block:: python
from google.cloud import ids_v1
def sample_get_endpoint():
# Create a client
client = ids_v1.IDSClient()
# Initialize request argument(s)
request = ids_v1.GetEndpointRequest(
name="name_value",
)
# Make the request
response = client.get_endpoint(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.ids_v1.types.GetEndpointRequest, dict]):
The request object.
name (:class:`str`):
Required. The name of the endpoint to retrieve. Format:
``projects/{project}/locations/{location}/endpoints/{endpoint}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.ids_v1.types.Endpoint:
Endpoint describes a single IDS
endpoint. It defines a forwarding rule
to which packets can be sent for IDS
inspection.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = ids.GetEndpointRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_endpoint,
default_retry=retries.Retry(
initial=0.25,
maximum=32.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def create_endpoint(
self,
request: Union[ids.CreateEndpointRequest, dict] = None,
*,
parent: str = None,
endpoint: ids.Endpoint = None,
endpoint_id: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Creates a new Endpoint in a given project and
location.
.. code-block:: python
from google.cloud import ids_v1
def sample_create_endpoint():
# Create a client
client = ids_v1.IDSClient()
# Initialize request argument(s)
endpoint = ids_v1.Endpoint()
endpoint.network = "network_value"
endpoint.severity = "CRITICAL"
request = ids_v1.CreateEndpointRequest(
parent="parent_value",
endpoint_id="endpoint_id_value",
endpoint=endpoint,
)
# Make the request
operation = client.create_endpoint(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.ids_v1.types.CreateEndpointRequest, dict]):
The request object.
parent (:class:`str`):
Required. The endpoint's parent.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
endpoint (:class:`google.cloud.ids_v1.types.Endpoint`):
Required. The endpoint to create.
This corresponds to the ``endpoint`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
endpoint_id (:class:`str`):
Required. The endpoint identifier. This will be part of
the endpoint's resource name. This value must start with
a lowercase letter followed by up to 62 lowercase
letters, numbers, or hyphens, and cannot end with a
hyphen. Values that do not match this pattern will
trigger an INVALID_ARGUMENT error.
This corresponds to the ``endpoint_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.ids_v1.types.Endpoint` Endpoint describes a single IDS endpoint. It defines a forwarding rule to
which packets can be sent for IDS inspection.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, endpoint, endpoint_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = ids.CreateEndpointRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if endpoint is not None:
request.endpoint = endpoint
if endpoint_id is not None:
request.endpoint_id = endpoint_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_endpoint,
default_timeout=3600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
ids.Endpoint,
metadata_type=ids.OperationMetadata,
)
# Done; return the response.
return response
async def delete_endpoint(
self,
request: Union[ids.DeleteEndpointRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Deletes a single Endpoint.
.. code-block:: python
from google.cloud import ids_v1
def sample_delete_endpoint():
# Create a client
client = ids_v1.IDSClient()
# Initialize request argument(s)
request = ids_v1.DeleteEndpointRequest(
name="name_value",
)
# Make the request
operation = client.delete_endpoint(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.ids_v1.types.DeleteEndpointRequest, dict]):
The request object.
name (:class:`str`):
Required. The name of the endpoint to
delete.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = ids.DeleteEndpointRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_endpoint,
default_timeout=3600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=ids.OperationMetadata,
)
# Done; return the response.
return response
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-ids",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("IDSAsyncClient",)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.