gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
import sys
import os
import math
import shutil
resultDir = os.environ.get('RESULTS')
if resultDir == None :
print ("WARNING! $RESULTS not set! Attempt to write results will fail!\n")
# Expecting input botConc, topConc, rateConstFull, sysSize, analInterval, numStepsEquilib, numStepsSnapshot, numStepsAnal, numStepsReq, numPasses, timeInterval, fileCode
from KMCLib import *
from KMCLib.Backend import Backend
import numpy
from RateCalc import *
from DensHist import *
botConc = float(sys.argv[1])
topConc = float(sys.argv[2])
rateConstFull = float(sys.argv[3])
sysWidth = int(sys.argv[4])
sysLength = int(sys.argv[5])
analInterval = int(sys.argv[6])
numStepsEquilib = int(sys.argv[7])
numStepsSnapshot = int(sys.argv[8])
numStepsAnal = int(sys.argv[9])
numStepsReq = int(sys.argv[10])
numPasses = int(sys.argv[11])
timeInterval = float(sys.argv[12])
fileInfo = sys.argv[13]
resultsPlace = resultDir+"/"+fileInfo+"/"
if not os.path.exists(resultsPlace):
os.makedirs(resultsPlace)
with open(resultsPlace+'settings', 'w') as f:
f.write('BotConcentration = ' + str(botConc) +'\n')
f.write('TopConcentration = ' + str(topConc) +'\n')
f.write('FullRate = ' + str(rateConstFull) +'\n')
f.write('SysWidth = ' + str(sysWidth) +'\n')
f.write('SysLength = ' + str(sysLength) +'\n')
f.write('TimeInterval = ' + str(timeInterval) +'\n')
f.write('AnalInterval = ' +str(analInterval) + '\n')
f.write('NumStepsEquilib = '+str(numStepsEquilib) +'\n')
f.write('NumStepsSnapshot = '+str(numStepsSnapshot)+'\n')
f.write('NumStepsAnal = '+str(numStepsAnal) +'\n')
"""I've put this in the file to make command line input easier"""
# Load the configuration and interactions.
# We're in 2d
cell_vectors = [[1.0,0.0,0.0],
[0.0,1.0,0.0],
[0.0,0.0,1.0]]
# Only bothering with one set
basis_points = [[0.0, 0.0, 0.0]]
unit_cell = KMCUnitCell(cell_vectors=cell_vectors,
basis_points=basis_points)
# Define the lattice.
xRep = sysWidth
yRep = sysLength + 4
zRep = 1
numPoints = xRep*zRep*yRep
lattice = KMCLattice(unit_cell=unit_cell,
repetitions=(xRep,yRep,zRep),
periodic=(True, True, False))
# Generate the initial types. There's double-layered section of "To" at the top and "Bo" at the bottom
avConc = 0.5*(botConc+topConc)
types = []
for yIndex in range(0, 2):
for xIndex in range(0, xRep):
random = numpy.random.rand()
if random < botConc:
types.append((xIndex, yIndex, 0, 0, "BoO"))
else:
types.append((xIndex, yIndex, 0, 0, "BoV"))
for yIndex in range(2, yRep-2):
for xIndex in range(0, xRep):
random = numpy.random.rand()
if random < avConc:
types.append((xIndex, yIndex, 0, 0, "O"))
else:
types.append((xIndex, yIndex, 0, 0, "V"))
for yIndex in range(yRep-2, yRep):
for xIndex in range(0, xRep):
random = numpy.random.rand()
if random < topConc:
types.append((xIndex, yIndex, 0, 0, "ToO"))
else:
types.append((xIndex, yIndex, 0, 0, "ToV"))
# Setup the configuration.
configuration = KMCConfiguration(lattice=lattice,
types=types,
possible_types=["O","V","ToV","BoV", "ToO", "BoO"], default_type="V")
# Rates.
rateConstEmpty = 1.0
topSpawn = rateConstFull*rateConstFull*math.sqrt(topConc/(1.0-topConc))
botSpawn = rateConstFull*rateConstFull*math.sqrt(botConc/(1.0-botConc))
topDespawn = (rateConstFull**4)/topSpawn
botDespawn = (rateConstFull**4)/botSpawn
#
##
###
"""I've put the processes in here to make it easier to adjust them via command line arguments."""
# Fill the list of processes.
processes = []
# Only on the first set of basis_points for O/V
basis_sites = [0]
# Bulk processes
# Up
#0
elements_before = ["O", "V"]
elements_after = ["V", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 1.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Will customise
# Down
#1
elements_before = ["O", "V"]
elements_after = ["V", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, -1.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Will customise
# Left
#2
elements_before = ["O", "V"]
elements_after = ["V", "O"]
coordinates = [[0.0, 0.0, 0.0], [-1.0, 0.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Will customise
# Right
#3
elements_before = ["O", "V"]
elements_after = ["V", "O"]
coordinates = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Will customise
# Oxygen annihilation at the top boundary
#4
elements_before = ["O", "ToV"]
elements_after = ["V", "ToV"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 1.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Oxygen creation at the top boundary
#5
elements_before = ["ToO", "V"]
elements_after = ["ToO", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, -1.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Now for Oxygen annihilation at the bottom boundary
#6
elements_before = ["O", "BoV"]
elements_after = ["V", "BoV"]
coordinates = [[0.0, 0.0, 0.0], [0.0, -1.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Oxygen creation at the bottom boundary
#7
elements_before = ["BoO", "V"]
elements_after = ["BoO", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 1.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Boundary Oxygen creation at the bottom boundary
#8
elements_before = ["BoV"]
elements_after = ["BoO"]
coordinates = [[0.0, 0.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Boundary Oxygen annihilation at the bottom boundary
#9
elements_before = ["BoO"]
elements_after = ["BoV"]
coordinates = [[0.0, 0.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Boundary Oxygen creation at the top boundary
#10
elements_before = ["ToV"]
elements_after = ["ToO"]
coordinates = [[0.0, 0.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Boundary Oxygen annihilation at the top boundary
#11
elements_before = ["ToO"]
elements_after = ["ToV"]
coordinates = [[0.0, 0.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Create the interactions object.
interactions = KMCInteractions(processes, implicit_wildcards=True)
# Define the custom rates calculator, using the lol model as a template
class modelRates2d(KMCRateCalculatorPlugin):
# Class for defining the custom rates function for the KMCLib paper.
def rate(self, geometry, elements_before, elements_after, rate_constant, process_number, global_coordinate):
if process_number == 8:
return botSpawn
if process_number == 9:
return botDespawn
if process_number == 10:
return topSpawn
if process_number == 11:
return topDespawn
numNeighbours = len([e for e in elements_before if e == "O"]) + len([e for e in elements_before if e == "ToO"]) + len([e for e in elements_before if e == "BoO"])
return math.pow(rateConstFull, numNeighbours-1)
def cutoff(self):
# Overloaded base class API function
return 1.0
interactions.setRateCalculator(rate_calculator=modelRates2d)
"""End of processes"""
###
##
#
# Create the model.
model = KMCLatticeModel(configuration, interactions)
compositionTracker = Composition(time_interval=timeInterval)
# Define the parameters; not entirely sure if these are sensible or not...
control_parameters_equilib = KMCControlParameters(number_of_steps=numStepsEquilib, analysis_interval=numStepsEquilib/100,
dump_interval=numStepsEquilib/10)
control_parameters_req = KMCControlParameters(number_of_steps=numStepsReq, analysis_interval=numStepsReq/100,
dump_interval=numStepsReq/10)
control_parameters_anal = KMCControlParameters(number_of_steps=numStepsAnal, analysis_interval=1,
dump_interval=numStepsAnal/10)
# Run the simulation - save trajectory to resultsPlace, which should by now exist
model.run(control_parameters_equilib, trajectory_filename=(resultsPlace+"equilib.traj"))
with open(resultsPlace+"inBot.dat", 'w') as f:
pass
with open(resultsPlace+"outBot.dat", 'w') as f:
pass
with open(resultsPlace+"inTop.dat", 'w') as f:
pass
with open(resultsPlace+"outTop.dat", 'w') as f:
pass
if not os.path.exists(resultsPlace+"numHists"):
os.makedirs(resultsPlace+"numHists")
ovNumHist = []
for index in range(0, numPoints):
ovNumHist.append(0.0)
for passNum in range(0, numPasses):
processStatsOxInBot = RateCalc(processes=[7])
processStatsOxOutBot = RateCalc(processes=[6])
processStatsOxInTop = RateCalc(processes=[5])
processStatsOxOutTop = RateCalc(processes=[4])
numHist = DensHist(spec=["O"], inProc=[7, 5], outProc=[6, 4])
model.run(control_parameters_req, trajectory_filename=("/dev/null"))
model.run(control_parameters_anal, trajectory_filename=("/dev/null"), analysis=[processStatsOxInBot, processStatsOxOutBot, processStatsOxInTop, processStatsOxOutTop, numHist])
with open(resultsPlace+"inBot.dat", 'a') as f:
processStatsOxInBot.printResults(f)
with open(resultsPlace+"outBot.dat", 'a') as f:
processStatsOxOutBot.printResults(f)
with open(resultsPlace+"inTop.dat", 'a') as f:
processStatsOxInTop.printResults(f)
with open(resultsPlace+"outTop.dat", 'a') as f:
processStatsOxOutTop.printResults(f)
with open(resultsPlace+"numHists/numHist"+str(passNum)+".dat", 'w') as f:
pass
with open(resultsPlace+"numHists/numHist"+str(passNum)+".dat", 'a') as f:
numHist.printResults(f)
with open(resultsPlace+"numHists/numHist"+str(passNum)+".dat", 'r') as f:
lines = f.readlines()
for index in range(0, numPoints):
words = lines[index].split()
ovNumHist[index] += float(words[1])
os.remove(resultsPlace+"numHists/numHist"+str(passNum)+".dat")
with open(resultsPlace+"ovNumHist.dat", 'w') as f:
for index in range(0, numPoints):
f.write(str(index)+" "+str(ovNumHist[index])+"\n")
shutil.rmtree(resultsPlace+"numHists", ignore_errors=True)
print("Process would appear to have succesfully terminated! How very suspicious...")
|
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from contextlib import contextmanager
from typing import List, Iterator, Optional, Dict, Tuple
from _py2tmp.coverage import SourceBranch
from _py2tmp.ir1 import ir1
from _py2tmp.ir1.free_variables import get_unique_free_variables_in_stmts
from _py2tmp.ir2 import ir2, get_return_type
class Writer:
def obfuscate_identifier(self, identifier: str) -> str: ... # pragma: no cover
class FunWriter(Writer):
def __init__(self, identifier_generator: Iterator[str]):
self.identifier_generator = identifier_generator
self.is_error_fun_ref = self.new_var(ir1.FunctionType(argtypes=(ir1.ErrorOrVoidType(),),
returns=ir1.BoolType()),
is_global_function=True)
self.function_defns = [self._create_is_error_fun_defn()]
self.obfuscated_identifiers_by_identifier: Dict[str, str] = defaultdict(lambda: self.new_id())
def new_id(self) -> str:
return next(self.identifier_generator)
def obfuscate_identifier(self, identifier: str):
return self.obfuscated_identifiers_by_identifier[identifier]
def new_var(self, expr_type: ir1.ExprType, is_global_function: bool = False):
return ir1.VarReference(expr_type=expr_type,
name=self.new_id(),
is_global_function=is_global_function,
is_function_that_may_throw=isinstance(expr_type, ir1.FunctionType))
def write_function(self, fun_defn: ir1.FunctionDefn):
self.function_defns.append(fun_defn)
def _create_is_error_fun_defn(self):
# def is_error(x: ErrorOrVoid):
# v = Type('void')
# b = (x == v)
# b2 = not b
# return b2
x_var = self.new_var(expr_type=ir1.ErrorOrVoidType())
arg_decls = (ir1.FunctionArgDecl(expr_type=x_var.expr_type, name=x_var.name),)
stmt_writer = StmtWriter(self,
current_fun_return_type=ir1.BoolType(),
current_fun_args=arg_decls,
current_fun_name=self.is_error_fun_ref.name,
try_except_contexts=[])
v_var = stmt_writer.new_var_for_expr(ir1.AtomicTypeLiteral('void'))
b_var = stmt_writer.new_var_for_expr(ir1.EqualityComparison(lhs=x_var, rhs=v_var))
b2_var = stmt_writer.new_var_for_expr(ir1.NotExpr(b_var))
stmt_writer.write_stmt(ir1.ReturnStmt(result=b2_var, error=None, source_branch=None))
return ir1.FunctionDefn(name=self.is_error_fun_ref.name,
description='The is_error (meta)function',
args=arg_decls,
body=tuple(stmt_writer.stmts),
return_type=ir1.BoolType())
class TryExceptContext:
def __init__(self,
caught_exception_type: ir1.CustomType,
caught_exception_name: str,
except_fun_call_expr: ir1.FunctionCall):
self.caught_exception_type = caught_exception_type
self.caught_exception_name = caught_exception_name
self.except_fun_call_expr = except_fun_call_expr
class StmtWriter(Writer):
def __init__(self,
fun_writer: FunWriter,
current_fun_name: str,
current_fun_args: Tuple[ir1.FunctionArgDecl, ...],
current_fun_return_type: Optional[ir1.ExprType],
try_except_contexts: List[TryExceptContext]):
for context in try_except_contexts:
assert context.except_fun_call_expr.expr_type == current_fun_return_type
self.fun_writer = fun_writer
self.current_fun_name = current_fun_name
self.current_fun_args = current_fun_args
self.current_fun_return_type = current_fun_return_type
self.stmts: List[ir1.Stmt] = []
self.try_except_contexts = try_except_contexts.copy()
def write_function(self, fun_defn: ir1.FunctionDefn):
self.fun_writer.write_function(fun_defn)
def write_stmt(self, stmt: ir1.Stmt):
self.stmts.append(stmt)
def new_id(self) -> str:
return self.fun_writer.new_id()
def obfuscate_identifier(self, identifier: str):
return self.fun_writer.obfuscate_identifier(identifier)
def new_var(self, expr_type: ir1.ExprType):
return self.fun_writer.new_var(expr_type)
def new_var_for_expr(self, expr: ir1.Expr):
var = self.fun_writer.new_var(expr.expr_type)
self.write_stmt(ir1.Assignment(lhs=var, rhs=expr, source_branch=None))
return var
def new_var_for_expr_with_error_checking(self, expr: ir1.Expr):
if self.current_fun_return_type:
# x, err = <expr>
# b = is_error(err)
# if b:
# b1 = isinstance(err, MyError1)
# if b1:
# e1 = err # type: MyError1
# res1, err1 = except_handler_fun1(...)
# return res1, err1
# ...
# bN = isinstance(err, MyErrorN)
# if bN:
# eN = err # type: MyErrorN
# resN, errN = except_handler_funN(...)
# return resN, errN
# return None, err
x_var = self.fun_writer.new_var(expr.expr_type)
error_var = self.fun_writer.new_var(ir1.ErrorOrVoidType())
self.write_stmt(ir1.Assignment(lhs=x_var, lhs2=error_var, rhs=expr, source_branch=None))
b_var = self.new_var_for_expr(ir1.FunctionCall(fun=self.fun_writer.is_error_fun_ref,
args=(error_var,)))
outer_if_branch_writer = StmtWriter(self.fun_writer,
self.current_fun_name,
self.current_fun_args,
self.current_fun_return_type,
try_except_contexts=self.try_except_contexts)
for context in self.try_except_contexts:
if_branch_writer = StmtWriter(self.fun_writer,
self.current_fun_name,
self.current_fun_args,
self.current_fun_return_type,
try_except_contexts=self.try_except_contexts)
if_branch_writer.write_stmt(ir1.Assignment(lhs=ir1.VarReference(expr_type=context.caught_exception_type,
name=self.obfuscate_identifier(context.caught_exception_name),
is_global_function=False,
is_function_that_may_throw=False),
rhs=ir1.SafeUncheckedCast(var=error_var,
expr_type=context.caught_exception_type),
source_branch=None))
res_i = if_branch_writer.new_var(expr_type=self.current_fun_return_type)
err_i = if_branch_writer.new_var(expr_type=ir1.ErrorOrVoidType())
if_branch_writer.write_stmt(ir1.Assignment(lhs=res_i,
lhs2=err_i,
rhs=context.except_fun_call_expr,
source_branch=None))
if_branch_writer.write_stmt(ir1.ReturnStmt(result=res_i, error=err_i, source_branch=None))
b_i = outer_if_branch_writer.new_var_for_expr(
ir1.IsInstanceExpr(error_var, context.caught_exception_type))
outer_if_branch_writer.write_stmt(ir1.IfStmt(cond=b_i,
if_stmts=tuple(if_branch_writer.stmts),
else_stmts=()))
outer_if_branch_writer.write_stmt(ir1.ReturnStmt(result=None, error=error_var, source_branch=None))
self.write_stmt(ir1.IfStmt(cond=b_var,
if_stmts=tuple(outer_if_branch_writer.stmts),
else_stmts=()))
return x_var
else:
# This statement is at top-level.
# x, err = <expr>
x_var = self.fun_writer.new_var(expr.expr_type)
error_var = self.fun_writer.new_var(ir1.ErrorOrVoidType())
self.write_stmt(ir1.Assignment(lhs=x_var, lhs2=error_var, rhs=expr, source_branch=None))
self.write_stmt(ir1.CheckIfError(error_var))
return x_var
@contextmanager
def enter_try_except_context(self, context: TryExceptContext):
self.try_except_contexts.append(context)
yield
context1 = self.try_except_contexts.pop()
assert context1 is context
def type_to_ir1(expr_type: ir2.ExprType):
if isinstance(expr_type, ir2.BoolType):
return ir1.BoolType()
elif isinstance(expr_type, ir2.IntType):
return ir1.IntType()
elif isinstance(expr_type, ir2.TypeType):
return ir1.TypeType()
elif isinstance(expr_type, ir2.BottomType):
return ir1.BottomType()
elif isinstance(expr_type, ir2.ListType):
return ir1.ListType(elem_type=type_to_ir1(expr_type.elem_type))
elif isinstance(expr_type, ir2.SetType):
return ir1.ListType(elem_type=type_to_ir1(expr_type.elem_type))
elif isinstance(expr_type, ir2.FunctionType):
return ir1.FunctionType(argtypes=tuple(type_to_ir1(arg)
for arg in expr_type.argtypes),
returns=type_to_ir1(expr_type.returns))
elif isinstance(expr_type, ir2.CustomType):
return ir1.CustomType(name=expr_type.name,
arg_types=tuple(ir1.CustomTypeArgDecl(name=arg.name, expr_type=type_to_ir1(arg.expr_type))
for arg in expr_type.arg_types),
constructor_source_branches=expr_type.constructor_source_branches)
else:
raise NotImplementedError('Unexpected type: %s' % str(expr_type.__class__))
def expr_to_ir1(expr: ir2.Expr, writer: StmtWriter) -> ir1.VarReference:
if isinstance(expr, ir2.VarReference):
return var_reference_to_ir1(expr, writer)
elif isinstance(expr, ir2.MatchExpr):
return match_expr_to_ir1(expr, writer)
elif isinstance(expr, ir2.BoolLiteral):
return bool_literal_to_ir1(expr, writer)
elif isinstance(expr, ir2.IntLiteral):
return int_literal_to_ir1(expr, writer)
elif isinstance(expr, ir2.AtomicTypeLiteral):
return atomic_type_literal_to_ir1(expr, writer)
elif isinstance(expr, ir2.PointerTypeExpr):
return pointer_type_expr_to_ir1(expr, writer)
elif isinstance(expr, ir2.ReferenceTypeExpr):
return reference_type_expr_to_ir1(expr, writer)
elif isinstance(expr, ir2.RvalueReferenceTypeExpr):
return rvalue_reference_type_expr_to_ir1(expr, writer)
elif isinstance(expr, ir2.ConstTypeExpr):
return const_type_expr_to_ir1(expr, writer)
elif isinstance(expr, ir2.ArrayTypeExpr):
return array_type_expr_to_ir1(expr, writer)
elif isinstance(expr, ir2.FunctionTypeExpr):
return function_type_expr_to_ir1(expr, writer)
elif isinstance(expr, ir2.TemplateInstantiationExpr):
return template_instantiation_expr_to_ir1(expr, writer)
elif isinstance(expr, ir2.TemplateMemberAccessExpr):
return template_member_access_expr_to_ir1(expr, writer)
elif isinstance(expr, ir2.ListExpr):
return list_expr_to_ir1(expr, writer)
elif isinstance(expr, ir2.SetExpr):
return set_expr_to_ir1(expr, writer)
elif isinstance(expr, ir2.FunctionCall):
return function_call_to_ir1(expr, writer)
elif isinstance(expr, ir2.EqualityComparison):
return equality_comparison_to_ir1(expr, writer)
elif isinstance(expr, ir2.InExpr):
return in_expr_to_ir1(expr, writer)
elif isinstance(expr, ir2.AttributeAccessExpr):
return attribute_access_expr_to_ir1(expr, writer)
elif isinstance(expr, ir2.AndExpr):
return and_expr_to_ir1(expr, writer)
elif isinstance(expr, ir2.OrExpr):
return or_expr_to_ir1(expr, writer)
elif isinstance(expr, ir2.NotExpr):
return not_expr_to_ir1(expr, writer)
elif isinstance(expr, ir2.IntUnaryMinusExpr):
return int_unary_minus_expr_to_ir1(expr, writer)
elif isinstance(expr, ir2.IntListSumExpr):
return int_list_sum_expr_to_ir1(expr, writer)
elif isinstance(expr, ir2.IntSetSumExpr):
return int_set_sum_expr_to_ir1(expr, writer)
elif isinstance(expr, ir2.BoolListAllExpr):
return bool_list_all_expr_to_ir1(expr, writer)
elif isinstance(expr, ir2.BoolSetAllExpr):
return bool_set_all_expr_to_ir1(expr, writer)
elif isinstance(expr, ir2.BoolListAnyExpr):
return bool_list_any_expr_to_ir1(expr, writer)
elif isinstance(expr, ir2.BoolSetAnyExpr):
return bool_set_any_expr_to_ir1(expr, writer)
elif isinstance(expr, ir2.IntComparisonExpr):
return int_comparison_expr_to_ir1(expr, writer)
elif isinstance(expr, ir2.IntBinaryOpExpr):
return int_binary_op_expr_to_ir1(expr, writer)
elif isinstance(expr, ir2.ListConcatExpr):
return list_concat_expr_to_ir1(expr, writer)
elif isinstance(expr, ir2.ListComprehension):
return list_comprehension_expr_to_ir1(expr, writer)
elif isinstance(expr, ir2.SetComprehension):
return set_comprehension_expr_to_ir1(expr, writer)
else:
raise NotImplementedError('Unexpected expression: %s' % str(expr.__class__))
def type_pattern_expr_to_ir1(expr: ir2.Expr, writer: StmtWriter) -> ir1.PatternExpr:
if isinstance(expr, ir2.VarReference):
return var_reference_to_ir1_pattern(expr, writer)
elif isinstance(expr, ir2.AtomicTypeLiteral):
return atomic_type_literal_to_ir1_type_pattern(expr, writer)
# TODO: Re-enable this once it's possible to use bools in template instantiations.
# elif isinstance(expr, ir2.BoolLiteral):
# return bool_literal_to_ir1_type_pattern(expr, writer)
elif isinstance(expr, ir2.PointerTypeExpr):
return pointer_type_expr_to_ir1_type_pattern(expr, writer)
elif isinstance(expr, ir2.ReferenceTypeExpr):
return reference_type_expr_to_ir1_type_pattern(expr, writer)
elif isinstance(expr, ir2.RvalueReferenceTypeExpr):
return rvalue_reference_type_expr_to_ir1_type_pattern(expr, writer)
elif isinstance(expr, ir2.ConstTypeExpr):
return const_type_expr_to_ir1_type_pattern(expr, writer)
elif isinstance(expr, ir2.ArrayTypeExpr):
return array_type_expr_to_ir1_type_pattern(expr, writer)
elif isinstance(expr, ir2.FunctionTypeExpr):
return function_type_expr_to_ir1_type_pattern(expr, writer)
elif isinstance(expr, ir2.TemplateInstantiationExpr):
return template_instantiation_expr_to_ir1_type_pattern(expr, writer)
elif isinstance(expr, ir2.ListExpr):
return list_expr_to_ir1_type_pattern(expr, writer)
else:
raise NotImplementedError('Unexpected expression: %s' % str(expr.__class__))
def function_arg_decl_to_ir1(decl: ir2.FunctionArgDecl, writer: Writer):
return ir1.FunctionArgDecl(expr_type=type_to_ir1(decl.expr_type),
name=writer.obfuscate_identifier(decl.name))
def var_reference_to_ir1(var: ir2.VarReference, writer: StmtWriter):
return ir1.VarReference(expr_type=type_to_ir1(var.expr_type),
name=var.name if var.is_global_function else writer.obfuscate_identifier(var.name),
is_global_function=var.is_global_function,
is_function_that_may_throw=var.is_function_that_may_throw)
def _select_arbitrary_forwarded_arg(args: Tuple[ir1.FunctionArgDecl, ...]):
for arg in args:
if not isinstance(arg.expr_type, ir1.FunctionType):
selected_arg = arg
break
else:
selected_arg = args[0]
return ir1.VarReference(expr_type=selected_arg.expr_type,
name=selected_arg.name,
is_global_function=False,
is_function_that_may_throw=isinstance(selected_arg.expr_type, ir1.FunctionType))
def match_expr_to_ir1(match_expr: ir2.MatchExpr, writer: StmtWriter):
matched_vars = tuple(expr_to_ir1(expr, writer)
for expr in match_expr.matched_exprs)
match_cases = []
for match_case in match_expr.match_cases:
match_case_writer = StmtWriter(writer.fun_writer,
writer.current_fun_name,
writer.current_fun_args,
type_to_ir1(match_expr.expr_type),
writer.try_except_contexts)
match_case_var = expr_to_ir1(match_case.expr, match_case_writer)
match_case_writer.write_stmt(ir1.ReturnStmt(result=match_case_var, error=None, source_branch=None))
forwarded_vars = get_unique_free_variables_in_stmts(match_case_writer.stmts)
if not forwarded_vars:
forwarded_vars = (_select_arbitrary_forwarded_arg(writer.current_fun_args),)
match_fun_name = writer.new_id()
arg_decls = tuple(ir1.FunctionArgDecl(expr_type=var.expr_type, name=var.name)
for var in forwarded_vars)
writer.write_function(ir1.FunctionDefn(name=match_fun_name,
description='(meta)function wrapping the code in a branch of a match expression from the function %s' % writer.current_fun_name,
args=arg_decls,
body=tuple(match_case_writer.stmts),
return_type=match_case_var.expr_type))
match_fun_ref = ir1.VarReference(expr_type=ir1.FunctionType(argtypes=tuple(var.expr_type
for var in forwarded_vars),
returns=match_case_var.expr_type),
name=match_fun_name,
is_global_function=True,
is_function_that_may_throw=True)
match_cases.append(ir1.MatchCase(type_patterns=tuple(type_pattern_expr_to_ir1(type_pattern, writer)
for type_pattern in match_case.type_patterns),
matched_var_names=tuple(writer.obfuscate_identifier(var_name)
for var_name in match_case.matched_var_names),
matched_variadic_var_names=tuple(writer.obfuscate_identifier(var_name)
for var_name in match_case.matched_variadic_var_names),
expr=ir1.FunctionCall(fun=match_fun_ref,
args=forwarded_vars),
match_case_start_branch=match_case.match_case_start_branch,
match_case_end_branch=match_case.match_case_end_branch))
return writer.new_var_for_expr_with_error_checking(ir1.MatchExpr(matched_vars, tuple(match_cases)))
def bool_literal_to_ir1(literal: ir2.BoolLiteral, writer: StmtWriter):
return writer.new_var_for_expr(ir1.BoolLiteral(value=literal.value))
def int_literal_to_ir1(literal: ir2.IntLiteral, writer: StmtWriter):
return writer.new_var_for_expr(ir1.IntLiteral(value=literal.value))
def atomic_type_literal_to_ir1(literal: ir2.AtomicTypeLiteral, writer: StmtWriter):
return writer.new_var_for_expr(ir1.AtomicTypeLiteral(cpp_type=literal.cpp_type))
def pointer_type_expr_to_ir1(expr: ir2.PointerTypeExpr, writer: StmtWriter):
return writer.new_var_for_expr(ir1.PointerTypeExpr(expr_to_ir1(expr.type_expr, writer)))
def reference_type_expr_to_ir1(expr: ir2.ReferenceTypeExpr, writer: StmtWriter):
return writer.new_var_for_expr(ir1.ReferenceTypeExpr(expr_to_ir1(expr.type_expr, writer)))
def rvalue_reference_type_expr_to_ir1(expr: ir2.RvalueReferenceTypeExpr, writer: StmtWriter):
return writer.new_var_for_expr(ir1.RvalueReferenceTypeExpr(expr_to_ir1(expr.type_expr, writer)))
def const_type_expr_to_ir1(expr: ir2.ConstTypeExpr, writer: StmtWriter):
return writer.new_var_for_expr(ir1.ConstTypeExpr(expr_to_ir1(expr.type_expr, writer)))
def array_type_expr_to_ir1(expr: ir2.ArrayTypeExpr, writer: StmtWriter):
return writer.new_var_for_expr(ir1.ArrayTypeExpr(expr_to_ir1(expr.type_expr, writer)))
def function_type_expr_to_ir1(expr: ir2.FunctionTypeExpr, writer: StmtWriter):
return writer.new_var_for_expr(ir1.FunctionTypeExpr(return_type_expr=expr_to_ir1(expr.return_type_expr, writer),
arg_list_expr=expr_to_ir1(expr.arg_list_expr, writer)))
def template_instantiation_expr_to_ir1(expr: ir2.TemplateInstantiationExpr, writer: StmtWriter):
return writer.new_var_for_expr(ir1.TemplateInstantiationExpr(template_atomic_cpp_type=expr.template_atomic_cpp_type,
arg_list_expr=expr_to_ir1(expr.arg_list_expr, writer)))
def template_member_access_expr_to_ir1(expr: ir2.TemplateMemberAccessExpr, writer: StmtWriter):
return writer.new_var_for_expr(
ir1.TemplateMemberAccessExpr(class_type_expr=expr_to_ir1(expr.class_type_expr, writer),
member_name=expr.member_name,
arg_list_expr=expr_to_ir1(expr.arg_list_expr, writer)))
def list_expr_to_ir1(list_expr: ir2.ListExpr, writer: StmtWriter):
assert list_expr.list_extraction_expr is None
elem_vars = tuple(expr_to_ir1(elem_expr, writer)
for elem_expr in list_expr.elem_exprs)
return writer.new_var_for_expr(ir1.ListExpr(elem_type=type_to_ir1(list_expr.elem_type),
elems=elem_vars))
def set_expr_to_ir1(set_expr: ir2.SetExpr, writer: StmtWriter):
result = writer.new_var_for_expr(ir1.ListExpr(elem_type=type_to_ir1(set_expr.elem_type),
elems=()))
elem_vars = tuple(expr_to_ir1(elem_expr, writer)
for elem_expr in set_expr.elem_exprs)
for var in elem_vars:
result = writer.new_var_for_expr(ir1.AddToSetExpr(set_expr=result,
elem_expr=var))
return result
def function_call_to_ir1(call_expr: ir2.FunctionCall, writer: StmtWriter):
fun_var = expr_to_ir1(call_expr.fun_expr, writer)
arg_vars = tuple(expr_to_ir1(arg_expr, writer)
for arg_expr in call_expr.args)
if fun_var.is_function_that_may_throw:
return writer.new_var_for_expr_with_error_checking(ir1.FunctionCall(fun=fun_var,
args=arg_vars))
else:
return writer.new_var_for_expr(ir1.FunctionCall(fun=fun_var,
args=arg_vars))
def equality_comparison_to_ir1(comparison_expr: ir2.EqualityComparison, writer: StmtWriter):
if isinstance(comparison_expr.lhs.expr_type, ir2.SetType):
return writer.new_var_for_expr(ir1.SetEqualityComparison(lhs=expr_to_ir1(comparison_expr.lhs, writer),
rhs=expr_to_ir1(comparison_expr.rhs, writer)))
else:
return writer.new_var_for_expr(ir1.EqualityComparison(lhs=expr_to_ir1(comparison_expr.lhs, writer),
rhs=expr_to_ir1(comparison_expr.rhs, writer)))
def in_expr_to_ir1(expr: ir2.InExpr, writer: StmtWriter):
return writer.new_var_for_expr(ir1.IsInListExpr(lhs=expr_to_ir1(expr.lhs, writer),
rhs=expr_to_ir1(expr.rhs, writer)))
def attribute_access_expr_to_ir1(attribute_access_expr: ir2.AttributeAccessExpr, writer: StmtWriter):
return writer.new_var_for_expr(ir1.AttributeAccessExpr(var=expr_to_ir1(attribute_access_expr.expr, writer),
attribute_name=attribute_access_expr.attribute_name,
expr_type=type_to_ir1(attribute_access_expr.expr_type)))
def and_expr_to_ir1(expr: ir2.AndExpr, writer: StmtWriter):
# y = f() and g()
#
# becomes:
#
# if f():
# x = g()
# else:
# x = False
# y = x
lhs_var = expr_to_ir1(expr.lhs, writer)
if_branch_writer = StmtWriter(writer.fun_writer,
writer.current_fun_name,
writer.current_fun_args,
writer.current_fun_return_type,
writer.try_except_contexts)
rhs_var = expr_to_ir1(expr.rhs, if_branch_writer)
writer.write_stmt(ir1.IfStmt(cond=lhs_var,
if_stmts=tuple(if_branch_writer.stmts),
else_stmts=(ir1.Assignment(lhs=rhs_var,
rhs=ir1.BoolLiteral(value=False),
source_branch=None),)))
return rhs_var
def or_expr_to_ir1(expr: ir2.OrExpr, writer: StmtWriter):
# y = f() or g()
#
# becomes:
#
# if f():
# x = True
# else:
# x = g()
# y = x
lhs_var = expr_to_ir1(expr.lhs, writer)
else_branch_writer = StmtWriter(writer.fun_writer,
writer.current_fun_name,
writer.current_fun_args,
writer.current_fun_return_type,
writer.try_except_contexts)
rhs_var = expr_to_ir1(expr.rhs, else_branch_writer)
writer.write_stmt(ir1.IfStmt(cond=lhs_var,
if_stmts=(ir1.Assignment(lhs=rhs_var,
rhs=ir1.BoolLiteral(value=True),
source_branch=None),),
else_stmts=tuple(else_branch_writer.stmts)))
return rhs_var
def not_expr_to_ir1(expr: ir2.NotExpr, writer: StmtWriter):
return writer.new_var_for_expr(ir1.NotExpr(expr_to_ir1(expr.expr, writer)))
def int_unary_minus_expr_to_ir1(expr: ir2.IntUnaryMinusExpr, writer: StmtWriter):
return writer.new_var_for_expr(ir1.UnaryMinusExpr(expr_to_ir1(expr.expr, writer)))
def int_list_sum_expr_to_ir1(expr: ir2.IntListSumExpr, writer: StmtWriter):
return writer.new_var_for_expr(ir1.IntListSumExpr(expr_to_ir1(expr.list_expr, writer)))
def int_set_sum_expr_to_ir1(expr: ir2.IntSetSumExpr, writer: StmtWriter):
return writer.new_var_for_expr(ir1.IntListSumExpr(expr_to_ir1(expr.set_expr, writer)))
def bool_list_all_expr_to_ir1(expr: ir2.BoolListAllExpr, writer: StmtWriter):
return writer.new_var_for_expr(ir1.BoolListAllExpr(expr_to_ir1(expr.list_expr, writer)))
def bool_set_all_expr_to_ir1(expr: ir2.BoolSetAllExpr, writer: StmtWriter):
return writer.new_var_for_expr(ir1.BoolListAllExpr(expr_to_ir1(expr.set_expr, writer)))
def bool_list_any_expr_to_ir1(expr: ir2.BoolListAnyExpr, writer: StmtWriter):
return writer.new_var_for_expr(ir1.BoolListAnyExpr(expr_to_ir1(expr.list_expr, writer)))
def bool_set_any_expr_to_ir1(expr: ir2.BoolSetAnyExpr, writer: StmtWriter):
return writer.new_var_for_expr(ir1.BoolListAnyExpr(expr_to_ir1(expr.set_expr, writer)))
def int_comparison_expr_to_ir1(expr: ir2.IntComparisonExpr, writer: StmtWriter):
return writer.new_var_for_expr(ir1.IntComparisonExpr(lhs=expr_to_ir1(expr.lhs, writer),
rhs=expr_to_ir1(expr.rhs, writer),
op=expr.op))
def int_binary_op_expr_to_ir1(expr: ir2.IntBinaryOpExpr, writer: StmtWriter):
return writer.new_var_for_expr(ir1.IntBinaryOpExpr(lhs=expr_to_ir1(expr.lhs, writer),
rhs=expr_to_ir1(expr.rhs, writer),
op=expr.op))
def list_concat_expr_to_ir1(expr: ir2.ListConcatExpr, writer: StmtWriter):
return writer.new_var_for_expr(ir1.ListConcatExpr(lhs=expr_to_ir1(expr.lhs, writer),
rhs=expr_to_ir1(expr.rhs, writer)))
def deconstructed_list_comprehension_expr_to_ir1(list_var: ir2.VarReference,
loop_var: ir1.VarReference,
result_elem_expr: ir1.Expr,
writer: StmtWriter,
loop_body_start_branch: SourceBranch,
loop_exit_branch: SourceBranch):
# [f(x, y) * 2
# for x in l]
#
# Becomes:
#
# def g(x, y):
# return f(x, y) * 2 # (in fact, this will be converted further)
#
# [g(x, y)
# for x in l]
result_elem_type = type_to_ir1(result_elem_expr.expr_type)
helper_fun_writer = StmtWriter(writer.fun_writer,
current_fun_name=writer.current_fun_name,
current_fun_args=writer.current_fun_args,
current_fun_return_type=result_elem_type,
# We can't forward the try_except_contexts here because the return type is different,
# but it's ok because a list comprehension can't contain "raise" statements (while
# of course it can throw indirectly).
try_except_contexts=[])
helper_fun_writer.write_stmt(ir1.ReturnStmt(result=expr_to_ir1(result_elem_expr, helper_fun_writer),
error=None,
source_branch=None))
forwarded_vars = get_unique_free_variables_in_stmts(helper_fun_writer.stmts)
if not forwarded_vars:
if writer.current_fun_args:
forwarded_vars = [_select_arbitrary_forwarded_arg(writer.current_fun_args)]
else:
forwarded_vars = [writer.new_var_for_expr(expr=ir1.AtomicTypeLiteral('void'))]
helper_fun_name = writer.new_id()
writer.write_function(ir1.FunctionDefn(name=helper_fun_name,
description='(meta)function wrapping the result expression in a list/set comprehension from the function %s' % writer.current_fun_name,
args=tuple(ir1.FunctionArgDecl(expr_type=var.expr_type, name=var.name)
for var in forwarded_vars),
body=tuple(helper_fun_writer.stmts),
return_type=result_elem_type))
helper_fun_call = ir1.FunctionCall(fun=ir1.VarReference(name=helper_fun_name,
expr_type=ir1.FunctionType(argtypes=tuple(var.expr_type
for var in forwarded_vars),
returns=result_elem_type),
is_global_function=True,
is_function_that_may_throw=True),
args=forwarded_vars)
return writer.new_var_for_expr_with_error_checking(ir1.ListComprehensionExpr(list_var=list_var,
loop_var=var_reference_to_ir1(loop_var, writer),
result_elem_expr=helper_fun_call,
loop_body_start_branch=loop_body_start_branch,
loop_exit_branch=loop_exit_branch))
def list_comprehension_expr_to_ir1(expr: ir2.ListComprehension, writer: StmtWriter):
l_var = expr_to_ir1(expr.list_expr, writer)
return deconstructed_list_comprehension_expr_to_ir1(list_var=l_var,
loop_var=expr.loop_var,
result_elem_expr=expr.result_elem_expr,
writer=writer,
loop_body_start_branch=expr.loop_body_start_branch,
loop_exit_branch=expr.loop_exit_branch)
def set_comprehension_expr_to_ir1(expr: ir2.SetComprehension, writer: StmtWriter):
# {f(x, y) * 2
# for x in s}
#
# Becomes:
#
# l = set_to_list(s)
# l2 = [f(x, y) * 2
# for x in l] # (in fact, this will be converted further)
# list_to_set(l2)
s_var = expr_to_ir1(expr.set_expr, writer)
l_var = writer.new_var_for_expr(ir1.SetToListExpr(s_var))
l2_var = deconstructed_list_comprehension_expr_to_ir1(list_var=l_var,
loop_var=expr.loop_var,
result_elem_expr=expr.result_elem_expr,
writer=writer,
loop_body_start_branch=expr.loop_body_start_branch,
loop_exit_branch=expr.loop_exit_branch)
return writer.new_var_for_expr(ir1.ListToSetExpr(l2_var))
def var_reference_to_ir1_pattern(var: ir2.VarReference, writer: StmtWriter):
return ir1.VarReferencePattern(expr_type=type_to_ir1(var.expr_type),
name=var.name if var.is_global_function else writer.obfuscate_identifier(var.name),
is_global_function=var.is_global_function,
is_function_that_may_throw=var.is_function_that_may_throw)
def atomic_type_literal_to_ir1_type_pattern(expr: ir2.AtomicTypeLiteral, writer: StmtWriter):
return ir1.AtomicTypeLiteralPattern(expr.cpp_type)
# TODO: Re-enable this once it's possible to use bools in template instantiations.
# def bool_literal_to_ir1_type_pattern(expr: ir2.BoolLiteral, writer: StmtWriter):
# return ir1.BoolLiteral(expr.value)
def pointer_type_expr_to_ir1_type_pattern(expr: ir2.PointerTypeExpr, writer: StmtWriter):
return ir1.PointerTypePatternExpr(type_pattern_expr_to_ir1(expr.type_expr, writer))
def reference_type_expr_to_ir1_type_pattern(expr: ir2.ReferenceTypeExpr, writer: StmtWriter):
return ir1.ReferenceTypePatternExpr(type_pattern_expr_to_ir1(expr.type_expr, writer))
def rvalue_reference_type_expr_to_ir1_type_pattern(expr: ir2.RvalueReferenceTypeExpr, writer: StmtWriter):
return ir1.RvalueReferenceTypePatternExpr(type_pattern_expr_to_ir1(expr.type_expr, writer))
def const_type_expr_to_ir1_type_pattern(expr: ir2.ConstTypeExpr, writer: StmtWriter):
return ir1.ConstTypePatternExpr(type_pattern_expr_to_ir1(expr.type_expr, writer))
def array_type_expr_to_ir1_type_pattern(expr: ir2.ArrayTypeExpr, writer: StmtWriter):
return ir1.ArrayTypePatternExpr(type_pattern_expr_to_ir1(expr.type_expr, writer))
def function_type_expr_to_ir1_type_pattern(expr: ir2.FunctionTypeExpr, writer: StmtWriter):
return ir1.FunctionTypePatternExpr(return_type_expr=type_pattern_expr_to_ir1(expr.return_type_expr, writer),
arg_list_expr=type_pattern_expr_to_ir1(expr.arg_list_expr, writer))
def template_instantiation_expr_to_ir1_type_pattern(expr: ir2.TemplateInstantiationExpr, writer: StmtWriter):
# This is the only ListExpr that's allowed in a template instantiation in a pattern.
assert isinstance(expr.arg_list_expr, ir2.ListExpr)
arg_exprs = tuple(type_pattern_expr_to_ir1(arg_expr, writer)
for arg_expr in expr.arg_list_expr.elem_exprs)
list_extraction_expr = expr.arg_list_expr.list_extraction_expr
return ir1.TemplateInstantiationPatternExpr(template_atomic_cpp_type=expr.template_atomic_cpp_type,
arg_exprs=arg_exprs,
list_extraction_arg_expr=var_reference_to_ir1_pattern(list_extraction_expr, writer) if list_extraction_expr else None)
def list_expr_to_ir1_type_pattern(expr: ir2.ListExpr, writer: StmtWriter):
return ir1.ListPatternExpr(elem_type=type_to_ir1(expr.elem_type),
elems=tuple(type_pattern_expr_to_ir1(elem_expr, writer)
for elem_expr in expr.elem_exprs),
list_extraction_expr = var_reference_to_ir1(expr.list_extraction_expr, writer)
if expr.list_extraction_expr else None)
def assert_to_ir1(assert_stmt: ir2.Assert, writer: StmtWriter):
writer.write_stmt(ir1.Assert(var=expr_to_ir1(assert_stmt.expr, writer),
message=assert_stmt.message,
source_branch=assert_stmt.source_branch))
def pass_stmt_to_ir1(stmt: ir2.PassStmt, writer: StmtWriter):
writer.write_stmt(ir1.PassStmt(source_branch=stmt.source_branch))
def try_except_stmt_to_ir1(try_except_stmt: ir2.TryExcept,
then_stmts: Tuple[ir2.Stmt, ...],
writer: StmtWriter):
# try:
# x = f()
# y = g()
# except MyError as e:
# y = e.x
# if b:
# return 5
# z = y + 3
# return z
#
# Becomes:
#
# def then_fun(y):
# z = y + 3
# return z
#
# def except_fun(e, b):
# y = e.x
# if b:
# return 5
# x0, err0 = then_fun(y)
# b0 = is_error(err0)
# if b0:
# return None, err0
# return x0, None
#
# x, f_err = f()
# f_b = is_error(f_err)
# if f_b:
# b0 = is_instance_of_MyError(f_err)
# if b0:
# e = f_err # type: MyError
# res, err = except_fun(...)
# return res, err
# return None, f_err
# y, g_err = g()
# g_b = is_error(g_err)
# if g_b:
# b0 = is_instance_of_MyError(g_err)
# if b0:
# e = g_err # type: MyError
# res, err = except_fun(...)
# return res, err
# return None, g_err
# res, err = then_fun()
# return res, err
if then_stmts:
then_stmts_writer = StmtWriter(writer.fun_writer,
writer.current_fun_name,
writer.current_fun_args,
writer.current_fun_return_type,
writer.try_except_contexts)
stmts_to_ir1(then_stmts, then_stmts_writer)
then_fun_forwarded_vars = get_unique_free_variables_in_stmts(then_stmts_writer.stmts)
if not then_fun_forwarded_vars:
then_fun_forwarded_vars = [_select_arbitrary_forwarded_arg(writer.current_fun_args)]
then_fun_defn = ir1.FunctionDefn(name=writer.new_id(),
description='(meta)function wrapping the code after a try-except statement from the function %s' % writer.current_fun_name,
args=tuple(ir1.FunctionArgDecl(expr_type=var.expr_type, name=var.name)
for var in then_fun_forwarded_vars),
body=tuple(then_stmts_writer.stmts),
return_type=writer.current_fun_return_type)
writer.write_function(then_fun_defn)
then_fun_ref = ir1.VarReference(expr_type=ir1.FunctionType(argtypes=tuple(arg.expr_type
for arg in then_fun_defn.args),
returns=then_fun_defn.return_type),
name=then_fun_defn.name,
is_global_function=True,
is_function_that_may_throw=True)
then_fun_call_expr = ir1.FunctionCall(fun=then_fun_ref, args=then_fun_forwarded_vars)
else:
then_fun_call_expr = None
except_stmts_writer = StmtWriter(writer.fun_writer,
writer.current_fun_name,
writer.current_fun_args,
writer.current_fun_return_type,
writer.try_except_contexts)
except_stmts_writer.write_stmt(ir1.PassStmt(source_branch=try_except_stmt.except_branch))
stmts_to_ir1(try_except_stmt.except_body, except_stmts_writer)
if then_fun_call_expr and not get_return_type(try_except_stmt.except_body).always_returns:
except_stmts_writer.write_stmt(
ir1.ReturnStmt(result=except_stmts_writer.new_var_for_expr_with_error_checking(then_fun_call_expr),
error=None,
source_branch=None))
except_fun_forwarded_vars = get_unique_free_variables_in_stmts(except_stmts_writer.stmts)
if not except_fun_forwarded_vars:
except_fun_forwarded_vars = [_select_arbitrary_forwarded_arg(writer.current_fun_args)]
except_fun_defn = ir1.FunctionDefn(name=writer.new_id(),
description='(meta)function wrapping the code in an except block from the function %s' % writer.current_fun_name,
args=tuple(ir1.FunctionArgDecl(expr_type=var.expr_type, name=var.name)
for var in except_fun_forwarded_vars),
body=tuple(except_stmts_writer.stmts),
return_type=writer.current_fun_return_type)
writer.write_function(except_fun_defn)
except_fun_ref = ir1.VarReference(expr_type=ir1.FunctionType(argtypes=tuple(arg.expr_type
for arg in except_fun_defn.args),
returns=except_fun_defn.return_type),
name=except_fun_defn.name,
is_global_function=True,
is_function_that_may_throw=True)
except_fun_call_expr = ir1.FunctionCall(fun=except_fun_ref, args=except_fun_forwarded_vars)
with writer.enter_try_except_context(TryExceptContext(type_to_ir1(try_except_stmt.caught_exception_type),
try_except_stmt.caught_exception_name,
except_fun_call_expr)):
writer.write_stmt(ir1.PassStmt(source_branch=try_except_stmt.try_branch))
stmts_to_ir1(try_except_stmt.try_body, writer)
if then_fun_call_expr and not get_return_type(try_except_stmt.try_body).always_returns:
writer.write_stmt(ir1.ReturnStmt(result=writer.new_var_for_expr_with_error_checking(then_fun_call_expr),
error=None,
source_branch=None))
def assignment_to_ir1(assignment: ir2.Assignment, writer: StmtWriter):
writer.write_stmt(ir1.Assignment(lhs=var_reference_to_ir1(assignment.lhs, writer),
rhs=expr_to_ir1(assignment.rhs, writer),
source_branch=assignment.source_branch))
def unpacking_assignment_to_ir1(assignment: ir2.UnpackingAssignment, writer: StmtWriter):
writer.write_stmt(ir1.UnpackingAssignment(lhs_list=tuple(var_reference_to_ir1(var, writer)
for var in assignment.lhs_list),
rhs=expr_to_ir1(assignment.rhs, writer),
error_message=assignment.error_message,
source_branch=assignment.source_branch))
def return_stmt_to_ir1(return_stmt: ir2.ReturnStmt, writer: StmtWriter):
writer.write_stmt(ir1.ReturnStmt(result=expr_to_ir1(return_stmt.expr, writer),
error=None,
source_branch=return_stmt.source_branch))
def raise_stmt_to_ir1(raise_stmt: ir2.RaiseStmt, writer: StmtWriter):
exception_expr = expr_to_ir1(raise_stmt.expr, writer)
for context in writer.try_except_contexts:
if context.caught_exception_type == exception_expr.expr_type:
# try:
# raise f(x)
# except MyError as e:
# ...
#
# Becomes:
#
# def handler(e, ...) :
# ...
#
# e = f(x)
# result, err = handler(e, ...)
# return result, err
exception_var = ir1.VarReference(expr_type=exception_expr.expr_type,
name=writer.obfuscate_identifier(context.caught_exception_name),
is_global_function=False,
is_function_that_may_throw=False)
writer.write_stmt(ir1.Assignment(lhs=exception_var, rhs=exception_expr, source_branch=raise_stmt.source_branch))
handler_result_var = writer.new_var(context.except_fun_call_expr.expr_type)
handler_error_var = writer.new_var(ir1.ErrorOrVoidType())
writer.write_stmt(ir1.Assignment(lhs=handler_result_var,
lhs2=handler_error_var,
rhs=context.except_fun_call_expr,
source_branch=None))
writer.write_stmt(ir1.ReturnStmt(result=handler_result_var,
error=handler_error_var,
source_branch=None))
break
else:
writer.write_stmt(ir1.ReturnStmt(result=None,
error=exception_expr,
source_branch=raise_stmt.source_branch))
def if_stmt_to_ir1(if_stmt: ir2.IfStmt, writer: StmtWriter):
cond_var = expr_to_ir1(if_stmt.cond_expr, writer)
if_branch_writer = StmtWriter(writer.fun_writer,
writer.current_fun_name,
writer.current_fun_args,
writer.current_fun_return_type,
writer.try_except_contexts)
stmts_to_ir1(if_stmt.if_stmts, if_branch_writer)
else_branch_writer = StmtWriter(writer.fun_writer,
writer.current_fun_name,
writer.current_fun_args,
writer.current_fun_return_type,
writer.try_except_contexts)
stmts_to_ir1(if_stmt.else_stmts, else_branch_writer)
writer.write_stmt(ir1.IfStmt(cond=cond_var,
if_stmts=tuple(if_branch_writer.stmts),
else_stmts=tuple(else_branch_writer.stmts)))
def stmts_to_ir1(stmts: Tuple[ir2.Stmt, ...], writer: StmtWriter):
for index, stmt in enumerate(stmts):
if isinstance(stmt, ir2.IfStmt):
if_stmt_to_ir1(stmt, writer)
elif isinstance(stmt, ir2.Assignment):
assignment_to_ir1(stmt, writer)
elif isinstance(stmt, ir2.UnpackingAssignment):
unpacking_assignment_to_ir1(stmt, writer)
elif isinstance(stmt, ir2.ReturnStmt):
return_stmt_to_ir1(stmt, writer)
elif isinstance(stmt, ir2.RaiseStmt):
raise_stmt_to_ir1(stmt, writer)
elif isinstance(stmt, ir2.Assert):
assert_to_ir1(stmt, writer)
elif isinstance(stmt, ir2.TryExcept):
try_except_stmt_to_ir1(stmt, stmts[index + 1:], writer)
return
elif isinstance(stmt, ir2.PassStmt):
pass_stmt_to_ir1(stmt, writer)
else:
raise NotImplementedError('Unexpected statement: %s' % str(stmt.__class__))
def function_defn_to_ir1(function_defn: ir2.FunctionDefn, writer: FunWriter):
return_type = type_to_ir1(function_defn.return_type)
arg_decls = tuple(function_arg_decl_to_ir1(arg, writer) for arg in function_defn.args)
stmt_writer = StmtWriter(writer, function_defn.name, arg_decls, return_type, try_except_contexts=[])
stmts_to_ir1(function_defn.body, stmt_writer)
writer.write_function(ir1.FunctionDefn(name=function_defn.name,
description='',
args=arg_decls,
body=tuple(stmt_writer.stmts),
return_type=return_type))
def module_to_ir1(module: ir2.Module, identifier_generator: Iterator[str]):
writer = FunWriter(identifier_generator)
for function_defn in module.function_defns:
function_defn_to_ir1(function_defn, writer)
stmt_writer = StmtWriter(writer, current_fun_name='', current_fun_args=(), current_fun_return_type=None, try_except_contexts=[])
for assertion in module.assertions:
assert_to_ir1(assertion, stmt_writer)
custom_types_defns = [type_to_ir1(expr_type) for expr_type in module.custom_types]
check_if_error_defn = ir1.CheckIfErrorDefn(tuple((type_to_ir1(expr_type), expr_type.exception_message)
for expr_type in module.custom_types if expr_type.is_exception_class))
pass_stmts = tuple(ir1.PassStmt(stmt.source_branch)
for stmt in module.pass_stmts)
return ir1.Module(body=(*custom_types_defns, check_if_error_defn, *writer.function_defns, *stmt_writer.stmts, *pass_stmts),
public_names=frozenset(module.public_names),)
|
|
"""
Driver classes for cisco devices.
- IOS - driver for cisco IOS operating system
"""
import logging
from nocexec.drivers.base import NOCExecDriver
from nocexec.exception import SSHClientError, TelnetClientError, \
NOCExecError, SSHClientExecuteCmdError, TelnetClientExecuteCmdError
LOG = logging.getLogger('nocexec.drivers.cisco')
__all__ = ['IOSError', 'IOSCommandError', 'IOS']
class IOSError(NOCExecError):
"""
The base exception class for Cisco IOS driver
"""
pass
class IOSCommandError(IOSError):
"""
The exception class for command errors
"""
pass
class IOS(NOCExecDriver): # pylint: disable=too-many-instance-attributes
"""
A driver class for connecting to Cisco IOS devices using the SSH or Telnet
protocol and executing commands.
:param device: domain or ip address of device (default: "")
:param login: username for authorization on device (default: "")
:param password: password for authorization on device (default: "")
:param port: port number for connection (default: 22)
:param timeout: timeout waiting for connection (default: 5)
:param protocol: use protocol ('ssh' or 'telnet') for
connection (default: "ssh")
:type device: string
:type login: string
:type password: string
:type port: int
:type timeout: int
:type protocol: string
:Example:
>>> from nocexec.drivers.cisco import IOS
>>> with IOS("192.168.0.1", "user", "password") as cli:
... for l in cli.view("show system mtu"):
... print(l)
['', 'System MTU size is 1500 bytes', 'System Jumbo MTU size is 1500
bytes', 'System Alternate MTU size is 1500 bytes', 'Routing MTU size is
1500 bytes']
.. seealso:: :class:`nocexec.drivers.juniper.JunOS` and
:class:`nocexec.drivers.extreme.XOS`
.. note::
raises exceptions inherited from IOSError exception
"""
def __init__(self, *args, **kwargs):
super(IOS, self).__init__(*args, **kwargs)
self._shell_prompt = "#"
self._config_prompt = r"\(config.*?\)#"
self._priv_mode = False
self._config_mode = False
def _prepare_shell(self):
# disable clipaging, check permission level nad fill hostname
shell_ends = ['>', '#']
self.cli.connection.sendline("terminal length 0")
answ = self.cli.connection.expect(shell_ends)
self._hostname = self.cli.connection.before.splitlines()[-1]
self._shell_prompt = self._hostname + "#"
self._config_prompt = self._hostname + self._config_prompt
self._priv_mode = bool(answ)
def _enable_privileged(self):
try:
self.cli.execute("enable", wait=[self._hostname + "#"])
self._shell_prompt = self._hostname + "#"
self._priv_mode = True
return True
except (SSHClientExecuteCmdError, TelnetClientExecuteCmdError):
return False
def _disable_privileged(self):
try:
self.cli.execute("disable", wait=[self._hostname + ">"])
self._shell_prompt = self._hostname + ">"
self._priv_mode = False
return True
except (SSHClientExecuteCmdError, TelnetClientExecuteCmdError):
return False
def _enter_config(self):
if not self._priv_mode and not self._enable_privileged():
LOG.error("unregistered mode is used")
return False
if self._config_mode:
return True
try:
self.cli.execute("configure terminal",
wait=[self._config_prompt])
self._config_mode = True
return True
except (SSHClientExecuteCmdError, TelnetClientExecuteCmdError) as err:
LOG.error("enter config mode error: %s", str(err))
return False
def _exit_config(self):
if not self._config_mode:
return True
try:
self.cli.execute("end", wait=[self._shell_prompt])
self._config_mode = False
return True
except (SSHClientExecuteCmdError, TelnetClientExecuteCmdError) as err:
LOG.error("enter config mode error: %s", str(err))
return False
def connect(self):
"""
Connection to the device via the specified protocol.
.. note::
raises an exception IOSError if a connection error occurs.
"""
super(IOS, self).init_client()
try:
self.cli.connect()
except (SSHClientError, TelnetClientError) as err:
raise IOSError(err)
self._prepare_shell()
def edit(self, command):
"""
Running a command on the Cisco IOS device in configuration mode with
the expected result and error handling. Before executing the command,
the access level is checked and the configuration mode is enabled.
:param command: sent command
:type command: string
:returns: list of lines with the result of the command execution
:rtype: list of lines
.. warning:: command is required argument
.. note::
raises an exception IOSError if connection not established.
raises an exception IOSCommandError if an error occurs.
"""
if self.cli is None:
raise IOSError("no connection to the device")
if not self._enter_config():
raise IOSCommandError("can not enter configuration mode")
try:
result = self.cli.execute(
command=command, wait=[self._config_prompt])
except (SSHClientExecuteCmdError, TelnetClientExecuteCmdError) as err:
raise IOSCommandError(err)
return result
def view(self, command):
"""
Running a command on the Cisco IOS device in view mode with
the expected result and error handling. Before executing the command,
the configuration mode is disabled.
:param command: sent command
:type command: string
:returns: list of lines with the result of the command execution
:rtype: list of lines
.. warning:: command is required argument
.. note::
raises an exception IOSError if connection not established.
raises an exception IOSCommandError if an error occurs.
"""
if self.cli is None:
raise IOSError("no connection to the device")
if not self._exit_config():
raise IOSCommandError("can not exit the configuration mode")
try:
result = self.cli.execute(
command=command, wait=[self._shell_prompt])
except (SSHClientExecuteCmdError, TelnetClientExecuteCmdError) as err:
raise IOSCommandError(err)
return result
def save(self):
"""
Saving the configuration on the device
:returns: True if configuration saved, False if not
:rtype: bool
.. note::
not raises exceptions.
"""
if self.cli is None:
return False
if not self._exit_config():
return False
try:
self.cli.execute(command="write memory", wait=[r"\[OK\]"])
return True
except (SSHClientExecuteCmdError, TelnetClientExecuteCmdError) as err:
LOG.error("CiscoIOS save configuration on device '%s' error: %s",
self._device, str(err))
return False
|
|
# Copyright 2013 Viewfinder Inc. All Rights Reserved.
"""Tests for merge accounts operation.
"""
__author__ = 'andy@emailscrubbed.com (Andy Kimball)'
import mock
from copy import deepcopy
from viewfinder.backend.base import message, util
from viewfinder.backend.db.activity import Activity
from viewfinder.backend.db.contact import Contact
from viewfinder.backend.db.db_client import DBKey
from viewfinder.backend.db.follower import Follower
from viewfinder.backend.db.identity import Identity
from viewfinder.backend.db.operation import Operation
from viewfinder.backend.db.viewpoint import Viewpoint
from viewfinder.backend.op.merge_accounts_op import MergeAccountsOperation
from viewfinder.backend.services.email_mgr import TestEmailManager
from viewfinder.backend.www.test import auth_test, service_base_test
class MergeAccountsTestCase(service_base_test.ServiceBaseTestCase):
def setUp(self):
super(MergeAccountsTestCase, self).setUp()
self._CreateSimpleTestAssets()
# Create a viewpoint for user #2 and one for user #3.
self._vp_id, self._ep_id = self._ShareSimpleTestAssets([self._user2.user_id])
self._vp_id2, self._ep_id2 = self._ShareSimpleTestAssets([self._user3.user_id])
# Create confirmed cookies.
self._confirmed_cookie2 = self._tester.GetSecureUserCookie(user_id=self._user2.user_id,
device_id=self._device_ids[1],
user_name=self._user2.name,
confirm_time=util._TEST_TIME)
self._confirmed_cookie3 = self._tester.GetSecureUserCookie(user_id=self._user3.user_id,
device_id=self._device_ids[2],
user_name=self._user3.name,
confirm_time=util._TEST_TIME)
def testMergeWithCookie(self):
"""Test basic merge using source cookie."""
# Merge user #3 into user #2.
self._tester.MergeAccounts(self._cookie2, source_user_cookie=self._confirmed_cookie3)
self.assertEqual(len(self._tester.QueryFollowed(self._cookie2)['viewpoints']), 3)
# Remove cookie so that base class won't try to validate its user's assets (and fail).
self._cookies.remove(self._cookie3)
def testMergeWithIdentity(self):
"""Test basic merge using source identity."""
# Merge user #3 into user #2.
source_identity_dict = self._TestGenerateMergeToken('Email:%s' % self._user3.email,
user_cookie=self._cookie2,
error_if_linked=False)
self._tester.MergeAccounts(self._cookie2, source_identity_dict=source_identity_dict)
self.assertEqual(len(self._tester.QueryFollowed(self._cookie2)['viewpoints']), 3)
# Link a previously unlinked email to user #2.
source_identity_dict = self._TestGenerateMergeToken('Email:another-email@emailscrubbed.com',
user_cookie=self._cookie2,
error_if_linked=True)
self._tester.MergeAccounts(self._cookie2, source_identity_dict=source_identity_dict)
self.assertEqual(self._tester.ListIdentities(self._cookie2),
['Email:another-email@emailscrubbed.com', 'Email:user3@emailscrubbed.com', 'FacebookGraph:2'])
# Remove cookie so that base class won't try to validate its user's assets (and fail).
self._cookies.remove(self._cookie3)
@mock.patch.object(MergeAccountsOperation, '_FOLLOWER_LIMIT', 2)
def testMergeMultipleViewpoints(self):
"""Test merge with multiple source viewpoints."""
self._ShareSimpleTestAssets([self._user3.user_id])
self._ShareSimpleTestAssets([self._user3.user_id])
self._ShareSimpleTestAssets([self._user3.user_id])
# Merge user #3 into user #2.
self._tester.MergeAccounts(self._cookie2, source_user_cookie=self._confirmed_cookie3)
self.assertEqual(len(self._tester.QueryFollowed(self._cookie2)['viewpoints']), 6)
# Remove cookie so that base class won't try to validate its user's assets (and fail).
self._cookies.remove(self._cookie3)
def testMergeOverlappingViewpoints(self):
"""Test merge where some source viewpoints are already followed by the target user."""
self._ShareSimpleTestAssets([self._user2.user_id, self._user3.user_id])
self._ShareSimpleTestAssets([self._user2.user_id, self._user3.user_id])
# Merge user #2 into user #3.
self._tester.MergeAccounts(self._cookie3, source_user_cookie=self._confirmed_cookie2)
self.assertEqual(len(self._tester.QueryFollowed(self._cookie3)['viewpoints']), 5)
# Remove cookie so that base class won't try to validate its user's assets (and fail).
self._cookies.remove(self._cookie2)
def testMergeRemovedSourceViewpoint(self):
"""Test merge where a source viewpoint has been removed."""
vp_id, ep_id = self._ShareSimpleTestAssets([self._user2.user_id])
self._tester.RemoveViewpoint(self._cookie2, vp_id)
# Merge user #2 into user #3.
self._tester.MergeAccounts(self._cookie3, source_user_cookie=self._confirmed_cookie2)
self.assertEqual(len(self._tester.QueryFollowed(self._cookie3)['viewpoints']), 3)
# Remove cookie so that base class won't try to validate its user's assets (and fail).
self._cookies.remove(self._cookie2)
def testMergeRemovedTargetViewpoint(self):
"""Test merge where the target user has removed the target viewpoint."""
# ------------------------------
# RemoveViewpoint case (revivable).
# ------------------------------
vp_id, _ = self._ShareSimpleTestAssets([self._user2.user_id, self._user3.user_id])
self._tester.RemoveViewpoint(self._cookie3, vp_id)
self._tester.MergeAccounts(self._cookie3, source_user_cookie=self._confirmed_cookie2)
response_dict = self._tester.QueryFollowed(self._cookie3)
vp_dict = util.GetSingleListItem([vp_dict for vp_dict in response_dict['viewpoints']
if vp_dict['viewpoint_id'] == vp_id])
self.assertIn(Follower.REMOVED, vp_dict['labels'])
# Remove cookie so that base class won't try to validate its user's assets (and fail).
self._cookies.remove(self._cookie2)
# ------------------------------
# RemoveFollowers case (un-revivable).
# ------------------------------
vp_id, _ = self._ShareSimpleTestAssets([self._user3.user_id])
self._tester.RemoveFollowers(self._cookie, vp_id, [self._user.user_id])
self._tester.MergeAccounts(self._cookie, source_user_cookie=self._confirmed_cookie3)
response_dict = self._tester.QueryFollowed(self._cookie)
vp_dict = util.GetSingleListItem([vp_dict for vp_dict in response_dict['viewpoints']
if vp_dict['viewpoint_id'] == vp_id])
self.assertIn(Follower.REMOVED, vp_dict['labels'])
self.assertIn(Follower.UNREVIVABLE, vp_dict['labels'])
# Remove cookie so that base class won't try to validate its user's assets (and fail).
self._cookies.remove(self._cookie3)
def testMergeOldFollower(self):
"""Bug 468: Test merge with follower that never had adding_user_id set."""
# Simulate followers in prod db that never had adding_user_id set.
self._UpdateOrAllocateDBObject(Follower, user_id=self._user3.user_id, viewpoint_id=self._vp_id2,
adding_user_id=None)
# Merge user #3 into user #2.
self._tester.MergeAccounts(self._cookie2, source_user_cookie=self._confirmed_cookie3)
follower = self._RunAsync(Follower.Query, self._client, self._user2.user_id, self._vp_id2, None)
self.assertIsNone(follower.adding_user_id)
# Remove cookie so that base class won't try to validate its user's assets (and fail).
self._cookies.remove(self._cookie3)
def testRemovedAddingUser(self):
"""Test merge with follower whose adding_user_id has been removed."""
# Share to second viewpoint.
vp_id, _ = self._tester.ShareNew(self._cookie2, [(self._ep_id, self._photo_ids)], [self._user3.user_id])
# Now remove user #1 from first viewpoint.
self._tester.RemoveFollowers(self._cookie, self._vp_id, [self._user.user_id])
# Merge user #2 into user #3.
self._tester.MergeAccounts(self._cookie3, source_user_cookie=self._confirmed_cookie2)
response_dict = self._tester.QueryViewpoints(self._cookie3, [self._tester.CreateViewpointSelection(self._vp_id)])
self.assertEqual(response_dict['viewpoints'][0]['followers'],
[{'follower_id': 1, 'labels': ['removed', 'unrevivable'], 'follower_timestamp': util._TEST_TIME},
{'follower_id': 2, 'adding_user_id': 1, 'follower_timestamp': util._TEST_TIME},
{'follower_id': 3, 'adding_user_id': 1, 'follower_timestamp': util._TEST_TIME}])
# Remove cookie so that base class won't try to validate its user's assets (and fail).
self._cookies.remove(self._cookie2)
def testLinkUnboundIdentity(self):
"""Link an identity that exists, but is not bound to any user."""
identity_key = 'Email:new.user@emailscrubbed.com'
self._UpdateOrAllocateDBObject(Identity, key=identity_key)
source_identity_dict = self._TestGenerateMergeToken(identity_key, user_cookie=self._cookie3)
self._tester.MergeAccounts(self._cookie3, source_identity_dict=source_identity_dict)
def testLinkAfterUnlink(self):
"""Test linking an identity after it was unlinked from another user."""
# Link a phone to user #3.
identity_key = 'Phone:+12345678901'
source_identity_dict = self._TestGenerateMergeToken(identity_key, user_cookie=self._cookie3)
self._tester.MergeAccounts(self._cookie3, source_identity_dict=source_identity_dict)
# Unlink the phone.
self._tester.UnlinkIdentity(self._cookie3, identity_key)
# Now link it to user #1.
source_identity_dict = self._TestGenerateMergeToken(identity_key, user_cookie=self._cookie)
self._tester.MergeAccounts(self._cookie, source_identity_dict=source_identity_dict)
self.assertEqual(self._tester.ListIdentities(self._cookie),
[u'Email:user1@emailscrubbed.com', u'Phone:+12345678901'])
def testLinkWithContacts(self):
"""Test link of an identity which another user has as a contact."""
# Create contact for user #1
identity_key = 'Email:foo@emailscrubbed.com'
contact_dict = Contact.CreateContactDict(self._user.user_id,
[(identity_key, None)],
util._TEST_TIME,
Contact.GMAIL)
self._UpdateOrAllocateDBObject(Contact, **contact_dict)
# Link the identity to user #2 and verify that user #1 is notified.
source_identity_dict = self._TestGenerateMergeToken(identity_key, user_cookie=self._cookie2)
self._tester.MergeAccounts(self._cookie2, source_identity_dict=source_identity_dict)
response_dict = self._tester.QueryNotifications(self._cookie, 1, scan_forward=False)
self.assertEqual(response_dict['notifications'][0]['name'], 'link identity')
def testMergeToken(self):
"""Test the /merge_token auth API."""
# ------------------------------
# Generate email as a mobile client.
# ------------------------------
source_identity_dict = self._TestGenerateMergeToken('Email:mobile-user@bar.com', user_cookie=self._cookie2)
email = TestEmailManager.Instance().emails['mobile-user@bar.com'][0]
self.assertEqual(email['toname'], self._user2.name)
self.assertIn('Hello %s' % self._user2.name, email['html'])
self.assertIn('Hello %s' % self._user2.name, email['text'])
self.assertIn('link mobile-user@bar.com', email['html'])
self.assertIn(source_identity_dict['access_token'], email['html'])
self.assertIn(source_identity_dict['access_token'], email['text'])
# ------------------------------
# Generate email as a web client.
# ------------------------------
source_identity_dict = self._TestGenerateMergeToken('Email:web-user@bar.com', user_cookie=self._cookie3)
email = TestEmailManager.Instance().emails['web-user@bar.com'][0]
self.assertIn(source_identity_dict['access_token'], email['html'])
self.assertIn(source_identity_dict['access_token'], email['text'])
# ------------------------------
# ERROR: Use non-canonical identity.
# ------------------------------
self.assertRaisesHttpError(400, self._TestGenerateMergeToken, 'Phone:456-7890', user_cookie=self._cookie2)
# ------------------------------
# ERROR: Try to call without being logged in.
# ------------------------------
self.assertRaisesHttpError(403, self._TestGenerateMergeToken, 'Email:foo@bar.com', user_cookie=None)
# ------------------------------
# ERROR: Try to use unsupported identity type.
# ------------------------------
self.assertRaisesHttpError(400,
self._TestGenerateMergeToken,
'Facebook:1234',
user_cookie=self._cookie2)
# ------------------------------
# ERROR: Raise error if an identity is already linked to a user when "error_if_linked" is true.
# ------------------------------
self.assertRaisesHttpError(403,
self._TestGenerateMergeToken,
'Email:%s' % self._user3.email,
user_cookie=self._cookie,
error_if_linked=True)
# ------------------------------
# ERROR: Try to use merge token with /verify/viewfinder.
# ------------------------------
source_identity_dict = self._TestGenerateMergeToken('Email:web-user@bar.com', user_cookie=self._cookie3)
verify_url = self._tester.GetUrl('/verify/viewfinder')
request_dict = {'headers': {'version': message.MAX_SUPPORTED_MESSAGE_VERSION,
'synchronous': True},
'identity': source_identity_dict['identity'],
'access_token': source_identity_dict['access_token']}
self.assertRaisesHttpError(400,
auth_test._SendAuthRequest,
self._tester,
verify_url,
'POST',
request_dict=request_dict)
self._validator.ValidateUpdateDBObject(Identity, key=source_identity_dict['identity'], expires=0)
def testAccessToken(self):
"""Use valid and invalid access tokens with merge_accounts."""
# ------------------------------
# ERROR: Try to use invalid token.
# ------------------------------
source_identity_dict = self._TestGenerateMergeToken('Email:foo@bar.com', user_cookie=self._cookie3)
bad_source_identity_dict = deepcopy(source_identity_dict)
bad_source_identity_dict['access_token'] = 'unknown'
self.assertRaisesHttpError(403,
self._tester.MergeAccounts,
self._cookie3,
source_identity_dict=bad_source_identity_dict)
# ------------------------------
# Use valid token, which should succeed.
# ------------------------------
self._tester.MergeAccounts(self._cookie3, source_identity_dict=source_identity_dict)
# ------------------------------
# Use valid token, which should fail, since tokens are single-use.
# ------------------------------
self.assertRaisesHttpError(403,
self._tester.MergeAccounts,
self._cookie3,
source_identity_dict=source_identity_dict)
def testMergeSkipSystem(self):
"""Test that merge will skip system viewpoints."""
# Prepare system viewpoint.
self._MakeSystemViewpoint(self._vp_id2)
# Merge user #3 into user #2.
self._tester.MergeAccounts(self._cookie2, source_user_cookie=self._confirmed_cookie3)
# Ensure that user #2 was not added as a follower to the system viewpoint.
follower = self._RunAsync(Follower.Query, self._client, self._user2.user_id, self._vp_id2, None, must_exist=False)
self.assertIsNone(follower)
# Remove cookie so that base class won't try to validate its user's assets (and fail).
self._cookies.remove(self._cookie3)
@mock.patch.object(Operation, 'FAILPOINTS_ENABLED', True)
def testMergeIdempotency(self):
"""Force op failure in order to test idempotency."""
# Do not use tester framework, as notifications are not idempotent (by-design), and unlike other
# operations, the MergeAccountsOperation interleaves notifications with db mutations.
self._validate = False
request_dict = {'activity': self._tester.CreateActivityDict(self._cookie2),
'source_user_cookie': self._confirmed_cookie3}
self._tester.SendRequest('merge_accounts', self._cookie2, request_dict)
actual_dict = self._tester.SendRequest('query_followed', self._cookie2, {})
self.assertEqual(len(actual_dict['viewpoints']), 3)
actual_dict = self._tester.SendRequest('list_identities', self._cookie2, {})
self.assertEqual(len(actual_dict['identities']), 2)
self.assertRaisesHttpError(401, self._tester.SendRequest, 'list_identities', self._cookie3, {})
@mock.patch.object(Operation, 'FAILPOINTS_ENABLED', True)
def testLinkIdempotency(self):
"""Force op failure in order to test idempotency."""
source_identity_dict = self._TestGenerateMergeToken('Email:test@test.com', user_cookie=self._cookie2)
self._tester.MergeAccounts(self._cookie2, source_identity_dict=source_identity_dict)
def testNoMergeSource(self):
"""ERROR: Try to merge without a user cookie or identity source."""
self.assertRaisesHttpError(400,
self._tester.MergeAccounts,
self._confirmed_cookie2)
def testMergeIntoSelf(self):
"""ERROR: Try to merge account into itself."""
self.assertRaisesHttpError(400,
self._tester.MergeAccounts,
self._confirmed_cookie2,
source_user_cookie=self._confirmed_cookie2)
def testBadCookie(self):
"""ERROR: Pass bad source user cookie."""
self.assertRaisesHttpError(403, self._tester.MergeAccounts, self._cookie, source_user_cookie='BADF00D')
def testMergeTerminatedAccount(self):
"""ERROR: Try to merge a terminated user account."""
self._tester.TerminateAccount(self._confirmed_cookie3)
self.assertRaisesHttpError(400,
self._tester.MergeAccounts,
self._cookie,
source_user_cookie=self._confirmed_cookie3)
# Remove cookie so that base class won't try to validate its user's assets (and fail).
self._cookies.remove(self._cookie3)
def testMergeProspectiveUser(self):
"""Merge from a prospective user account."""
prospective_user, vp_id, ep_id = self._CreateProspectiveUser()
# ------------------------------
# Merge using confirmed cookie.
# ------------------------------
prospective_cookie = self._tester.GetSecureUserCookie(user_id=prospective_user.user_id,
device_id=prospective_user.webapp_dev_id,
user_name=None,
viewpoint_id=vp_id,
confirm_time=util._TEST_TIME)
self._tester.MergeAccounts(self._cookie, source_user_cookie=prospective_cookie)
# ------------------------------
# ERROR: Try to merge using unconfirmed cookie.
# ------------------------------
prospective_cookie = self._tester.GetSecureUserCookie(user_id=prospective_user.user_id,
device_id=prospective_user.webapp_dev_id,
user_name=None,
viewpoint_id=vp_id)
self.assertRaisesHttpError(403, self._tester.MergeAccounts, self._cookie, source_user_cookie=prospective_cookie)
def testMergeWithContacts(self):
"""Test merge in which another user has the source user as a contact."""
# Create user #1 contacts for user #2 and user #3.
exp_contacts = []
for user, identity in zip(self._users[1:], self._identities[1:]):
contact_dict = Contact.CreateContactDict(self._user.user_id,
[(identity.key, None)],
util._TEST_TIME,
Contact.GMAIL)
exp_contacts.append({'contact_id': contact_dict['contact_id'],
'contact_source': contact_dict['contact_source'],
'identities': [{'identity': identity.key, 'user_id': 3}]})
self._UpdateOrAllocateDBObject(Contact, **contact_dict)
# Merge user #2 into user #3. Contact's user id should be updated.
self._tester.MergeAccounts(self._cookie3, source_user_cookie=self._confirmed_cookie2)
self.assertEqual(sorted(self._tester.QueryContacts(self._cookie)['contacts']), sorted(exp_contacts))
# Remove cookie so that base class won't try to validate its user's assets (and fail).
self._cookies.remove(self._cookie2)
def _TestGenerateMergeToken(self, identity_key, user_cookie, error_if_linked=None):
"""Invokes the merge_token auth API that triggers the email of a Viewfinder access token.
Validates that an identity was created. Returns a source_identity_dict that can be passed
directly to merge_accounts.
"""
url = self._tester.GetUrl('/merge_token/viewfinder')
request_dict = {'headers': {'version': message.MAX_SUPPORTED_MESSAGE_VERSION,
'synchronous': True},
'identity': identity_key}
util.SetIfNotNone(request_dict, 'error_if_linked', error_if_linked)
auth_test._SendAuthRequest(self._tester, url, 'POST', user_cookie=user_cookie, request_dict=request_dict)
identity = self._RunAsync(Identity.Query, self._client, identity_key, None)
# Validate the identity.
self._validator.ValidateUpdateDBObject(Identity,
key=identity_key,
authority='Viewfinder',
user_id=identity.user_id,
access_token=identity.access_token,
expires=identity.expires)
return {'identity': identity.key, 'access_token': identity.access_token}
def _TestMergeAccounts(tester, user_cookie, request_dict):
"""Called by the ServiceTester in order to test merge_accounts service API call."""
validator = tester.validator
target_user_id, device_id = tester.GetIdsFromCookie(user_cookie)
# Send merge_accounts request.
actual_dict = tester.SendRequest('merge_accounts', user_cookie, request_dict)
op_dict = tester._DeriveNotificationOpDict(target_user_id, device_id, request_dict)
source_user_cookie = request_dict.get('source_user_cookie')
if source_user_cookie is not None:
source_user_id, _ = tester.GetIdsFromCookie(source_user_cookie)
else:
source_identity_key = request_dict['source_identity']['identity']
identity = validator.GetModelObject(Identity, source_identity_key)
source_user_id = identity.user_id
# If source user account exists, then validate merge.
if source_user_id is not None:
# Validate merge users case.
# Validate that target user is added to all viewpoints followed by the source user.
for source_follower in validator.QueryModelObjects(Follower, predicate=lambda f: f.user_id == source_user_id):
viewpoint_id = source_follower.viewpoint_id
# Skip removed followers.
if source_follower.IsRemoved():
continue
# Skip default and system viewpoints.
viewpoint = validator.GetModelObject(Viewpoint, DBKey(viewpoint_id, None))
if viewpoint.IsDefault() or viewpoint.IsSystem():
continue
# Skip viewpoints that target already follows.
db_key = DBKey(target_user_id, viewpoint_id)
target_follower = validator.GetModelObject(Follower, db_key, must_exist=False)
if target_follower is not None:
continue
validator.ValidateFollower(user_id=target_user_id,
viewpoint_id=viewpoint_id,
timestamp=op_dict['op_timestamp'],
labels=[Follower.CONTRIBUTE],
last_updated=op_dict['op_timestamp'],
adding_user_id=source_follower.adding_user_id,
viewed_seq=None)
# Validate activity and notifications for the viewpoint merge.
activity_id = request_dict['activity']['activity_id']
truncated_ts, device_id, (client_id, server_id) = Activity.DeconstructActivityId(activity_id)
activity_id = Activity.ConstructActivityId(truncated_ts, device_id, (client_id, viewpoint_id))
activity_dict = {'name': 'merge_accounts',
'activity_id': activity_id,
'timestamp': request_dict['activity']['timestamp'],
'target_user_id': target_user_id,
'source_user_id': source_user_id}
def _GetInvalidate(follower_id):
if follower_id == target_user_id:
return validator.CreateViewpointInvalidation(viewpoint_id)
else:
return {'viewpoints': [{'viewpoint_id': viewpoint_id, 'get_followers': True}]}
validator.ValidateFollowerNotifications(viewpoint_id,
activity_dict,
op_dict,
_GetInvalidate)
# Validate all followers are friends.
all_followers = validator.QueryModelObjects(Follower, predicate=lambda f: f.viewpoint_id == viewpoint_id)
validator.ValidateFriendsInGroup([f.user_id for f in all_followers])
validator.ValidateViewpointAccounting(viewpoint_id)
# Validate that all identities have been moved from the source to the target user.
for identity in validator.QueryModelObjects(Identity, predicate=lambda i: i.user_id == source_user_id):
# Validate Identity objects.
validator.ValidateUpdateDBObject(Identity, key=identity.key, user_id=target_user_id, expires=0)
# Validate Contact objects.
validator.ValidateRewriteContacts(identity.key, op_dict)
# Validate contact notifications.
validator.ValidateContactNotifications('merge identities', identity.key, op_dict)
# Validate target user notification.
validator.ValidateUserNotification('merge users', target_user_id, op_dict)
# Validate the account termination.
validator.ValidateTerminateAccount(source_user_id, op_dict, merged_with=target_user_id)
else:
# Validate link identity case.
# Validate Identity object.
validator.ValidateUpdateDBObject(Identity, key=source_identity_key, user_id=target_user_id, expires=0)
# Validate Contact objects.
validator.ValidateRewriteContacts(source_identity_key, op_dict)
# Validate contact notifications.
validator.ValidateContactNotifications('link identity', identity.key, op_dict)
# Validate target user notification.
validator.ValidateUserNotification('link user', target_user_id, op_dict)
tester._CompareResponseDicts('merge_accounts', target_user_id, request_dict, {}, actual_dict)
return actual_dict
|
|
"""
Filename: plot_interhemispheric_heat_difference_timeseries.py
Author: Damien Irving, irving.damien@gmail.com
Description: Plot ensemble interhemispheric difference timeseries
"""
# Import general Python modules
import sys, os, pdb, re
import argparse
import numpy
import iris
from iris.experimental.equalise_cubes import equalise_attributes
import iris.plot as iplt
import matplotlib.pyplot as plt
from matplotlib import gridspec
import matplotlib as mpl
import seaborn
import cmdline_provenance as cmdprov
# Import my modules
cwd = os.getcwd()
repo_dir = '/'
for directory in cwd.split('/')[1:]:
repo_dir = os.path.join(repo_dir, directory)
if directory == 'ocean-analysis':
break
modules_dir = os.path.join(repo_dir, 'modules')
sys.path.append(modules_dir)
try:
import general_io as gio
except ImportError:
raise ImportError('Must run this script from anywhere within the ocean-analysis git repo')
# Define functions
var_colors = {'ohc': 'blue', 'hfds': 'orange', 'rndt': 'red'}
exp_colors = {'historical-rcp85': 'black', 'historical': 'black', 'GHG-only': 'red',
'AA-only': 'blue', '1pctCO2': 'orange'}
names = {'thetao': 'Sea Water Potential Temperature',
'ohc': 'ocean heat content',
'hfds': 'Downward Heat Flux at Sea Water Surface',
'rndt': 'TOA Incoming Net Radiation'}
titles = {'rndt': 'netTOA', 'hfds': 'OHU', 'ohc': 'OHC', 'thetao': 'Average Ocean Temperature'}
plot_variables = {'thetao': ' Average Ocean Temperature',
'ohc': 'OHC',
'hfds': 'OHU',
'rndt': 'netTOA'}
scale_factors = {'CanESM2': -54567.5, 'CCSM4': -729817.5}
linestyles = {'historical-rcp85': 'solid', 'historical': 'solid', 'GHG-only': '--', 'AA-only': ':'}
grid_configs = {1: (1, 1), 2: (1, 2), 3: (1, 3), 4: (2, 2)}
seaborn.set(style='ticks')
mpl.rcParams['axes.labelsize'] = 24
mpl.rcParams['axes.titlesize'] = 28
mpl.rcParams['xtick.labelsize'] = 24
mpl.rcParams['ytick.labelsize'] = 24
mpl.rcParams['legend.fontsize'] = 20
def equalise_time_axes(cube_list):
"""Make all the time axes the same."""
iris.util.unify_time_units(cube_list)
reference_cube = cube_list[0]
new_cube_list = iris.cube.CubeList([])
for cube in cube_list:
assert len(cube.coord('time').points) == len(reference_cube.coord('time').points)
cube.coord('time').points = reference_cube.coord('time').points
cube.coord('time').bounds = reference_cube.coord('time').bounds
cube.coord('time').units = reference_cube.coord('time').units
cube.coord('time').attributes = reference_cube.coord('time').attributes
new_cube_list.append(cube)
return new_cube_list
def ensemble_aggregation(cube_list, operator):
"""Calculate the ensemble mean."""
aggregators = {'mean': iris.analysis.MEAN, 'median': iris.analysis.MEDIAN}
if len(cube_list) > 1:
equalise_attributes(cube_list)
equalise_time_axes(cube_list)
ensemble_cube = cube_list.merge_cube()
ensemble_agg = ensemble_cube.collapsed('ensemble_member', aggregators[operator])
ensemble_spread = ensemble_cube.collapsed('ensemble_member', iris.analysis.PERCENTILE, percent=[25, 75])
else:
ensemble_agg = cube_list[0]
ensemble_spread = None
return ensemble_agg, ensemble_spread
def calc_anomaly(cube):
"""Calculate the anomaly."""
anomaly = cube.copy()
anomaly.data = anomaly.data - anomaly.data[0]
return anomaly
def get_simulation_attributes(cube):
"""Get model, experiment and mip information."""
model = cube.attributes['model_id']
experiment = cube.attributes['experiment_id']
physics = cube.attributes['physics_version']
run = cube.attributes['realization']
mip = 'r%si1p%s' %(run, physics)
if experiment == 'historicalMisc':
experiment = 'historicalAA'
return model, experiment, mip
def calc_hemispheric_value(sh_file, nh_file, val_type, var, time_constraint, ensemble_number):
"""Calculate the interhemispheric difference timeseries."""
agg = 'mean' if var =='thetao' else 'sum'
nh_name = names[var] + ' nh ' + agg
nh_cube = iris.load_cube(nh_file, nh_name & time_constraint)
nh_cube.var_name = nh_cube.var_name.replace('_', '-')
nh_attributes = get_simulation_attributes(nh_cube)
nh_anomaly = calc_anomaly(nh_cube)
nh_anomaly.data = nh_anomaly.data.astype(numpy.float32)
sh_name = names[var] + ' sh ' + agg
sh_cube = iris.load_cube(sh_file, sh_name & time_constraint)
sh_cube.var_name = sh_cube.var_name.replace('_', '-')
sh_attributes = get_simulation_attributes(sh_cube)
sh_anomaly = calc_anomaly(sh_cube)
sh_anomaly.data = sh_anomaly.data.astype(numpy.float32)
assert nh_attributes == sh_attributes
metric = nh_cube.copy()
if var == 'ohc-adjusted':
globe_data = nh_cube.data + sh_cube.data
globe_anomaly = sh_anomaly.data + nh_anomaly.data
nh_mean = nh_cube.data.mean()
globe_mean = globe_data.mean()
constant = 1 - 2*(nh_mean/globe_mean)
diff = nh_anomaly.data - sh_anomaly.data
metric.data = diff + (globe_anomaly * constant)
else:
if val_type == 'diff':
metric.data = nh_anomaly.data - sh_anomaly.data
elif val_type == 'sh':
metric.data = sh_anomaly.data
elif val_type == 'nh':
metric.data = nh_anomaly.data
elif val_type == 'sum':
metric.data = nh_anomaly.data + sh_anomaly.data
new_aux_coord = iris.coords.AuxCoord(ensemble_number, long_name='ensemble_member', units='no_unit')
metric.add_aux_coord(new_aux_coord)
metric.cell_methods = ()
return metric, nh_cube.attributes['history']
def set_plot_features(inargs, ax, plotnum, var, nvars):
""" """
if inargs.ylim_uptake and var in ['rndt', 'hfds']:
ylower, yupper = inargs.ylim_uptake
plt.ylim(ylower * 1e24, yupper * 1e24)
elif inargs.ylim_ohc and var == 'ohc':
ylower, yupper = inargs.ylim_ohc
plt.ylim(ylower * 1e24, yupper * 1e24)
elif inargs.ylim_temperature and var == 'thetao':
ylower, yupper = inargs.ylim_temperature
plt.ylim(ylower, yupper)
if not (nvars == 4 and plotnum in [0, 1]):
ax.set_xlabel('Year')
if var in ['rndt', 'hfds', 'ohc']:
ax.set_ylabel('NH minus SH (Joules)')
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0), useMathText=True)
ax.yaxis.major.formatter._useMathText = True
else:
ax.set_ylabel('NH minus SH (K)')
ax.tick_params(top='off')
if nvars > 1:
panel_labels = {0: '(a)', 1: '(b)', 2: '(c)', 3: '(d)'}
ax.text(0.93, 0.97, panel_labels[plotnum], transform=ax.transAxes,
fontsize=24, va='top')
if var == 'thetao' or inargs.metric in ['sh', 'nh']:
plt.legend(loc=2)
else:
plt.legend(loc=2)
if inargs.hline:
ax.axhline(y=0, color='0.5', linestyle='--', linewidth=0.5)
def get_plot_vars(inargs):
"""Count the number of variables to plot"""
vars = ['rndt', 'hfds', 'ohc', 'thetao']
plot_vars = []
plot_files = []
for var_index, file_list in enumerate([inargs.toa_files, inargs.ohu_files, inargs.ohc_files, inargs.thetao_files]):
if file_list:
plot_vars.append(vars[var_index])
plot_files.append(file_list)
return plot_vars, plot_files
def main(inargs):
"""Run the program."""
plot_vars, plot_files = get_plot_vars(inargs)
nvars = len(plot_vars)
nrows, ncols = grid_configs[nvars]
fig = plt.figure(figsize=[11 * ncols, 7 * nrows])
gs = gridspec.GridSpec(nrows, ncols, wspace=0.27, hspace=0.25)
axes = []
for index in range(nvars):
axes.append(plt.subplot(gs[index]))
time_constraints = {'historical-rcp85': gio.get_time_constraint(inargs.rcp_time),
'historical': gio.get_time_constraint(inargs.historical_time),
'GHG-only': gio.get_time_constraint(inargs.historical_time),
'AA-only': gio.get_time_constraint(inargs.historical_time),
'1pctCO2': gio.get_time_constraint(inargs.pctCO2_time)}
for experiment_num, experiment in enumerate(inargs.experiment_list):
time_constraint = time_constraints[experiment]
ensemble_agg_dict = {}
ensemble_spread_dict = {}
for var_index, var_files in enumerate(plot_files):
var = plot_vars[var_index]
cube_list = iris.cube.CubeList([])
for model_num, model_files in enumerate(var_files):
start = experiment_num * 2
sh_file, nh_file = model_files[start: start+2]
value, history = calc_hemispheric_value(sh_file, nh_file, inargs.metric, var, time_constraint, model_num)
if model_num == 0:
first_model = value.attributes['model_id']
cube_list.append(value)
if inargs.individual:
plt.sca(axes[var_index])
iplt.plot(value, color=exp_colors[experiment], linewidth=0.3)
ensemble_agg_dict[var], ensemble_spread_dict[var] = ensemble_aggregation(cube_list, inargs.ensagg)
plt.sca(axes[var_index])
iplt.plot(ensemble_agg_dict[var], label=experiment, color=exp_colors[experiment])
if inargs.spread and ensemble_spread_dict[var]:
time_values = ensemble_spread_dict[var][0, ::].coord('time').points + scale_factors[first_model]
upper_bound = ensemble_spread_dict[var][0, ::].data
lower_bound = ensemble_spread_dict[var][-1, ::].data
iplt.plt.fill_between(time_values, upper_bound, lower_bound, facecolor=exp_colors[experiment], alpha=0.15)
if inargs.title:
plt.suptitle('interhemispheric difference in accumulated heat')
for index, var in enumerate(plot_vars):
plt.sca(axes[index])
if nvars > 1:
plt.title(titles[var])
set_plot_features(inargs, axes[index], index, var, nvars)
dpi = inargs.dpi if inargs.dpi else plt.savefig.__globals__['rcParams']['figure.dpi']
print('dpi =', dpi)
plt.savefig(inargs.outfile, bbox_inches='tight', dpi=dpi)
log_text = cmdprov.new_log(infile_history={nh_file: history}, git_repo=repo_dir)
log_file = re.sub('.png', '.met', inargs.outfile)
cmdprov.write_log(log_file, log_text)
if __name__ == '__main__':
extra_info ="""
author:
Damien Irving, irving.damien@gmail.com
"""
description = 'Plot ensemble interhemispheric heat difference boxplot for OHC, hfds and rndt'
parser = argparse.ArgumentParser(description=description,
epilog=extra_info,
argument_default=argparse.SUPPRESS,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("outfile", type=str, help="output file")
parser.add_argument("experiment_list", type=str, nargs='*', choices=('historical', 'historical-rcp85', 'GHG-only', 'AA-only', '1pctCO2'),
help="experiments to plot")
parser.add_argument("--metric", type=str, default='diff', choices=('diff', 'nh', 'sh', 'sum'),
help="Metric to plot (hemispheric values or difference) [default=diff]")
parser.add_argument("--toa_files", type=str, nargs='*', action='append', default=[],
help="netTOA files in this order: exp1 NH, exp1 SH, exp2 NH, exp2 SH, etc")
parser.add_argument("--ohu_files", type=str, nargs='*', action='append', default=[],
help="OHU files in this order: exp1 NH, exp1 SH, exp2 NH, exp2 SH, etc")
parser.add_argument("--ohc_files", type=str, nargs='*', action='append', default=[],
help="OHC files in this order: exp1 NH, exp1 SH, exp2 NH, exp2 SH, etc")
parser.add_argument("--thetao_files", type=str, nargs='*', action='append', default=[],
help="thetao files in this order: exp1 NH, exp1 SH, exp2 NH, exp2 SH, etc")
parser.add_argument("--ylim_uptake", type=float, nargs=2, default=None,
help="y limits for netTOA and OHU plots (x 10^24)")
parser.add_argument("--ylim_ohc", type=float, nargs=2, default=None,
help="y limits for OHC plots (x 10^24)")
parser.add_argument("--ylim_temperature", type=float, nargs=2, default=None,
help="y limits for ocean temperature plots")
parser.add_argument("--ensagg", type=str, default='median', choices=('mean', 'median'),
help="Ensemble mean or median [default=median]")
parser.add_argument("--dpi", type=float, default=None,
help="Figure resolution in dots per square inch [default=auto]")
parser.add_argument("--title", action="store_true", default=False,
help="Include a plot title [default=False]")
parser.add_argument("--individual", action="store_true", default=False,
help="Show curves for individual models [default=False]")
parser.add_argument("--spread", action="store_true", default=False,
help="Plot shading for ensemble spread [default=False]")
parser.add_argument("--historical_time", type=str, nargs=2, metavar=('START_DATE', 'END_DATE'),
default=('1861-01-01', '2005-12-31'),
help="Time period for historical experiments [default = entire]")
parser.add_argument("--rcp_time", type=str, nargs=2, metavar=('START_DATE', 'END_DATE'),
default=('1861-01-01', '2100-12-31'),
help="Time period for rcp experiments [default = entire]")
parser.add_argument("--pctCO2_time", type=str, nargs=2, metavar=('START_DATE', 'END_DATE'),
default=('1861-01-01', '2000-12-31'),
help="Time period for 1pctCO2 experiment [default = entire]")
parser.add_argument("--hline", action="store_true", default=False,
help="Plot a horizontal guideline [default: false]")
args = parser.parse_args()
main(args)
|
|
"""
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Ontology Engineering Group
http://www.oeg-upm.net/
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Copyright (C) 2016 Ontology Engineering Group.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
"""
import calendar
import json
import os
import ssl
import sys
import time
from datetime import datetime
from decimal import Decimal
from functools import wraps
from urllib2 import urlopen, HTTPError, Request
from wsgiref.handlers import format_date_time
import overpy
import shapely
from LatLon import LatLon
from concurrent.futures import ThreadPoolExecutor
from flask import Flask, request, jsonify
from flask_caching import Cache
from fuzzywuzzy import process
from overpy import exception
from overpy.exception import DataIncomplete
from redis_cache import cache_it_json, SimpleCache
from shapely.errors import TopologicalError
from shapely.geometry import Point, Polygon, LineString, MultiPoint, MultiPolygon
from shapely.ops import nearest_points
from shapely.wkt import dumps
from geo_pass import geocoding, debug, error, ZSimpleCache
from geo_pass.poly import ways2poly
__author__ = 'Fernando Serena'
MAX_AGE = int(os.environ.get('MAX_AGE', 86400))
CACHE_LIMIT = int(os.environ.get('CACHE_LIMIT', 100000))
CACHE_REDIS_HOST = os.environ.get('CACHE_REDIS_HOST', '127.0.0.1')
CACHE_REDIS_DB = int(os.environ.get('CACHE_REDIS_DB', 0))
CACHE_REDIS_PORT = int(os.environ.get('CACHE_REDIS_PORT', 6379))
app = Flask(__name__)
cache = Cache(app, config={
'CACHE_TYPE': 'redis',
'CACHE_KEY_PREFIX': 'geo',
'CACHE_REDIS_HOST': CACHE_REDIS_HOST,
'CACHE_REDIS_DB': CACHE_REDIS_DB,
'CACHE_REDIS_PORT': CACHE_REDIS_PORT
})
class Overpass(overpy.Overpass):
def __init__(self, url=None, cache=None, jwt=None):
super(Overpass, self).__init__(url=url)
self.cache = cache
self.jwt = jwt
self.pool = ThreadPoolExecutor(max_workers=4)
def parse_json(self, data, encoding="utf-8"):
"""
Parse raw response from Overpass service.
:param data: Raw JSON Data
:type data: String or Bytes
:param encoding: Encoding to decode byte string
:type encoding: String
:return: Result object
:rtype: overpy.Result
"""
try:
elements = data.get('elements', [{}])[0]
if elements.get('type', '') == 'count':
return {k: int(v) for k, v in elements['tags'].iteritems()}
else:
members = elements.get('members', [])
except IndexError:
members = None
if members:
members = filter(lambda m: m.get('geometry', True), members)
data['elements'][0]['members'] = members
for m in members:
if 'geometry' in m:
geometry = filter(lambda p: p, m['geometry'])
m['geometry'] = geometry
return overpy.Result.from_json(data, api=self)
def __request(self, query):
debug("querying:" + query)
if self.url.startswith('https'):
gcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
else:
gcontext = None
if self.jwt:
request = Request(self.url, headers={"Authorization": self.jwt})
else:
request = self.url
future = self.pool.submit(urlopen, request, query, context=gcontext)
try:
f = future.result()
except HTTPError as e:
f = e
response = f.read(self.read_chunk_size)
while True:
data = f.read(self.read_chunk_size)
if len(data) == 0:
break
response = response + data
f.close()
if f.code == 200:
if overpy.PY2:
http_info = f.info()
content_type = http_info.getheader("content-type")
else:
content_type = f.getheader("Content-Type")
if content_type == "application/json":
if isinstance(response, bytes):
response = response.decode("utf-8")
response = json.loads(response)
return response
raise exception.OverpassUnknownContentType(content_type)
if f.code == 400:
msgs = []
for msg in self._regex_extract_error_msg.finditer(response):
tmp = self._regex_remove_tag.sub(b"", msg.group("msg"))
try:
tmp = tmp.decode("utf-8")
except UnicodeDecodeError:
tmp = repr(tmp)
msgs.append(tmp)
raise exception.OverpassBadRequest(
query,
msgs=msgs
)
if f.code == 429:
raise exception.OverpassTooManyRequests
if f.code == 504:
raise exception.OverpassGatewayTimeout
raise exception.OverpassUnknownHTTPStatusCode(f.code)
def query(self, query, cache=True, expire=MAX_AGE, maxsize=536870912):
if not isinstance(query, bytes):
query = query.encode("utf-8")
query = "[maxsize:{}][out:json];\n".format(maxsize) + query
if cache:
opt_query = ';'.join([st.strip('\n').strip(' ') for st in query.split(';')])
try:
response = cache_it_json(cache=self.cache, expire=expire)(self.__request)(opt_query)
except Exception as e:
raise ValueError(e.message)
else:
response = self.__request(query)
response_str = json.dumps(response)
try:
return self.parse_json(json.loads(response_str, parse_float=Decimal))
except AttributeError:
return None
cache_proc = SimpleCache(hashkeys=True, host=CACHE_REDIS_HOST, port=CACHE_REDIS_PORT,
db=CACHE_REDIS_DB + 1, namespace='pr', limit=10000000, expire=MAX_AGE)
cache_q = ZSimpleCache(hashkeys=True, host=CACHE_REDIS_HOST, port=CACHE_REDIS_PORT,
db=CACHE_REDIS_DB + 2, namespace='q', limit=100000, expire=MAX_AGE)
api = Overpass(url=os.environ.get('OVERPASS_API_URL', 'http://127.0.0.1:5000/api/interpreter'),
cache=cache_q, jwt=os.environ.get('JWT_TOKEN', None))
def make_cache_key(*args, **kwargs):
path = request.path
qargs = dict(request.args.items())
args = 'u'.join([u'{}{}'.format(k, qargs[k]) for k in sorted(qargs.keys())])
return (path + args).encode('utf-8')
def make_around_cache_key(*args, **kwargs):
qargs = dict(request.args.items())
args = 'around' + ''.join(['{}{}'.format(k, qargs[k]) for k in sorted(qargs.keys())])
return args.encode('utf-8')
def make_tags_cache_key(*args, **kwargs):
path = request.path
return path.encode('utf-8')
def way_distance(result, way, point):
min = sys.maxint
for nid in way._node_ids:
try:
node = result.get_node(nid, resolve_missing=False)
node_ll = LatLon(node.lat, node.lon)
node_d = point.distance(node_ll)
if node_d < min:
min = node_d
except DataIncomplete:
pass
return min
def node_distance(node, point):
node_ll = LatLon(node.lat, node.lon)
return point.distance(node_ll)
def is_road(way):
tags = way.get('tag', {})
return 'highway' in tags or 'footway' in tags
def is_building(way):
tags = way.get('tag', {})
return 'amenity' in tags or ('building' in tags and tags['building'] != 'no')
@cache_it_json(cache=cache_proc)
def query_way_buildings(id):
debug('Way buildings of ' + str(id))
query = """
way({});
way(around:20)[building]["building"!~"no"];
out center;""".format(id)
result = api.query(query, cache=False)
all_way_buildings = filter(lambda w: w.id != int(id), result.ways)
return map(lambda x: {'id': x.id, 'center_lat': float(x.center_lat), 'center_lon': float(x.center_lon)},
all_way_buildings)
@cache_it_json(cache=cache_proc)
def query_way_elms(id):
debug('Way elements of ' + str(id))
query = """
way({});
(
node(around:20)[highway];
node(around:20)[footway];
);
out body;""".format(id)
result = api.query(query, cache=False)
return map(lambda x: {'id': x.id, 'lat': float(x.lat), 'lon': float(x.lon), 'tags': x.tags}, result.nodes)
@cache_it_json(cache=cache_proc)
def query_building_ways(id):
debug('Building ways of ' + str(id))
def filter_way(w):
buildings = query_way_buildings(w.id)
return any([b for b in buildings if b['id'] == int(id)])
result = api.query("""
way({});
way(around:20) -> .aw;
(
way.aw[highway];
way.aw[footway];
);
out ids;
""".format(id), cache=False)
all_near_ways = result.ways
building_ways = filter(lambda w: filter_way(w), all_near_ways)
return map(lambda x: {'id': x.id}, filter(lambda w: w.id != int(id), building_ways))
@cache_it_json(cache=cache_proc)
def query_building_elms(way):
debug('Building elements of ' + str(way['id']))
result = api.query("""way({}); out geom; >; out body;""".format(way['id']))
points = [(float(result.get_node(n).lon), float(result.get_node(n).lat)) for n in way['nd']]
shape = Polygon(points).envelope
result = api.query("""
way({});
node(around:5) -> .na;
(
node.na[shop];
node.na[amenity];
node.na[tourism];
);
out;
""".format(way['id']), cache=False)
elms = []
for n in result.nodes:
n_point = Point(float(n.lon), float(n.lat))
if n_point.within(shape):
elms.append({'id': n.id, 'lat': float(n.lat), 'lon': float(n.lon), 'tags': n.tags})
return elms
@cache_it_json(cache=cache_proc)
def query_surrounding_buildings(id):
debug('Surrounding buildings of ' + str(id))
result = api.query("""
way({});
out center;
> -> .wn;
node(around:10) -> .ar;
node.ar.wn;
<;
out center;""".format(id), cache=False)
return map(lambda x: {'id': x.id, 'center_lat': float(x.center_lat), 'center_lon': float(x.center_lon)},
filter(lambda w: w.id != int(id) and 'building' in w.tags, result.ways))
@cache_it_json(cache=cache_proc)
def query_intersect_ways(id):
debug('Intersecting ways of ' + str(id))
result = api.query("""
way({});
>;
out body;
< -> .wall;
(
way.wall[highway];
way.wall[footway];
);
out geom;""".format(id), cache=False)
node_ids = set(map(lambda x: x.id, result.nodes))
ways = filter(lambda w: w.id != int(id), result.ways)
intersections = {}
for w in ways:
cuts = set(w._node_ids).intersection(node_ids)
if cuts:
intersections[w.id] = list(cuts)
return intersections
@cache_it_json(cache=cache_proc)
def query_node_building(node):
result = api.query("""
node({});
way(around:10)[building]["building"!~"no"];
(._; >;);
out geom;""".format(node['id']), cache=False)
point = Point(node['lon'], node['lat'])
for w in result.ways:
geom = w.nodes
points = map(lambda n: [float(n.lon), float(n.lat)], geom)
shape = Polygon(points)
if point.within(shape.envelope):
return {'id': w.id}
@cache_it_json(cache=cache_proc)
def query_around(id, way=True, lat=None, lon=None, radius=None):
type = 'way' if way else 'node'
result = api.query("""
{}({});
(node(around:{},{},{}); <;) -> .all;
(node.all[highway];
node.all[shop];
node.all[amenity];
node.all[tourism];
way.all[highway];
way.all[footway];
way.all[building];
);
out ids;
""".format(type, id, radius, lat, lon), cache=False)
elements = list(result.ways) + list(result.nodes)
return map(lambda x: x.id, elements)
@cache_it_json(cache=cache_proc)
def query_nodes(*nodes):
q_nodes = map(lambda x: 'node({});'.format(x), nodes)
result = api.query("""
({});
out;""".format('\n'.join(q_nodes)), cache=False)
return list(result.nodes)
def transform(f):
def transform_value(v):
if isinstance(v, datetime):
return calendar.timegm(v.timetuple())
return v
@wraps(f)
def wrapper(*args, **kwargs):
data = {}
for elm in f(*args, **kwargs):
data['id'] = elm.id
if elm.tags:
data['tag'] = elm.tags
if elm.attributes:
data.update({k: transform_value(elm.attributes[k]) for k in elm.attributes})
if isinstance(elm, overpy.Way):
data['nd'] = elm._node_ids
if is_building(data):
if elm.center_lat:
data['center'] = {'lat': float(elm.center_lat), 'lon': float(elm.center_lon)}
else:
data['lat'] = float(elm.lat)
data['lon'] = float(elm.lon)
return data
return wrapper
@cache_it_json(cache=cache_proc)
@transform
def g_way(id):
center = api.query("""
way({});
out center;
""".format(id))
return list(center.ways)
def g_coord_area(lat, lon):
result = api.query("""
is_in({},{});
out tags;
""".format(lat, lon))
max_admin_level = max([int(a.tags['admin_level']) for a in result.areas if 'admin_level' in a.tags])
return [a.id for a in result.areas if 'admin_level' in a.tags and
a.tags.get('boundary', '') == 'administrative' and int(a.tags['admin_level']) == max_admin_level]
@cache_it_json(cache=cache_proc)
def g_way_geom(id):
debug('Geometry of ' + str(id))
geom = api.query("""
way({});
(._; >;);
out geom;
""".format(id), cache=False)
return map(lambda n: (float(n.lon), float(n.lat)), list(geom.ways).pop().nodes)
def relation_multipolygon(rels):
rel_members = [[m for m in r.members if m.geometry and m.role == 'outer'] for r in rels]
all_members = reduce(lambda x, y: x + y, rel_members, [])
poly, incomplete = ways2poly(all_members)
geoms = []
for p in poly:
geoms.append(Polygon([(n.lon, n.lat) for n in p])) # .simplify(0.000001))
for l in incomplete:
geoms.append(Polygon(LineString([(n.lon, n.lat) for n in l]))) # .simplify(0.000001))
return MultiPolygon(geoms)
@cache_it_json(cache=cache_proc)
def g_area_geom(id):
debug('Area geometry of ' + str(id))
def transform_tag_value(k, v):
if k == 'admin_level':
return int(v)
return u'"{}"'.format(v)
result = api.query("""area({});out;""".format(id))
area_tags = result.areas[0].tags
area_tags = {key: transform_tag_value(key, v) for key, v in area_tags.items() if
key in ['type', 'name:en', 'name', 'boundary', 'wikidata', 'is_in']}
if 'name' not in area_tags:
return []
geom = api.query(u"""
area({});
rel(pivot);
out geom;
""".format(id), cache=False)
if geom is None:
return []
tag_filters = u''.join(
[u'["{}"={}]'.format(key, value) for key, value in area_tags.items()])
if not geom.relations:
geom = api.query(u"""
rel{};
out geom;
""".format(tag_filters), cache=False)
if geom.relations:
area_poly = relation_multipolygon(geom.relations)
else:
geom = api.query(u"""
area({});
way(pivot);
out geom;
>;
out skel qt;
""".format(id), cache=False)
all_points = list(geom.ways).pop().nodes
area_poly = Polygon(map(lambda n: (float(n.lon), float(n.lat)), all_points))
if isinstance(area_poly, Polygon):
area_poly = MultiPolygon([area_poly])
return [map(lambda x: tuple(x[0]), zip(p.exterior.coords)) for p in area_poly]
def get_area_multipolygon(id):
area_geoms = g_area_geom(str(id))
area_polys = [Polygon(points) for points in area_geoms]
return MultiPolygon(area_polys)
@cache_it_json(cache=cache_proc)
@transform
def g_node(id):
result = api.query("""
node({});
out;
""".format(id))
return list(result.nodes)
@cache_it_json(cache=cache_proc)
def g_node_position(id):
r = api.query("""
node({});
out;
""".format(id))
node = list(r.nodes).pop()
return float(node.lat), float(node.lon)
def nodes_in_buffer(nodes, buffer):
for n in nodes:
n_lat, n_lon = g_node_position(n)
p = Point(float(n_lon), float(n_lat))
if p.within(buffer):
return True
return False
def filter_building(way, b, polygons):
near_way_points = nearest_points(polygons[b['id']], way)
b_near_way = near_way_points[0]
buff = b_near_way.buffer(b_near_way.distance(near_way_points[1]) + 0.00001, mitre_limit=1.0)
buff = shapely.affinity.scale(buff, 1.0, 0.75)
n_intersect = buff.boundary.intersection(way)
n_intersect = [n_intersect] if isinstance(n_intersect, Point) else list(n_intersect)
n_intersect = MultiPoint(n_intersect)
filtered = True
if n_intersect:
nearest = nearest_points(polygons[b['id']], n_intersect)
mp = MultiPoint(list(n_intersect) + [nearest[0]])
try:
mp = Polygon(mp)
filtered = any(
filter(lambda (bid, p): bid != b['id'] and p.boundary.intersects(mp.boundary),
polygons.items()))
except ValueError:
pass
return filtered
def get_way_attrs(id, tags, buffer):
way = g_way(id)
if is_road(way):
way['type'] = 'way'
if not tags:
all_w_buildings = query_way_buildings(id)
w_buildings = filter(lambda x: not buffer or Point(x['center_lon'], x['center_lat']).within(buffer),
all_w_buildings)
shape = LineString(g_way_geom(id))
all_building_polygons = {b['id']: Polygon(g_way_geom(b['id'])) for b in w_buildings}
w_buildings = filter(lambda b: not filter_building(shape, b, all_building_polygons), w_buildings)
way['buildings'] = map(lambda x: 'way/{}'.format(x['id']),
w_buildings)
all_intersects = query_intersect_ways(id)
w_intersects = filter(lambda (w, cuts): not buffer or nodes_in_buffer(cuts, buffer), all_intersects.items())
way['intersect'] = map(lambda x: 'way/{}'.format(x[0]), w_intersects)
for node in query_way_elms(id):
p = Point(node['lon'], node['lat'])
if not buffer or p.within(buffer):
if 'contains' not in way:
way['contains'] = []
n_key = _elm_key(node, MATCH_TAGS)
if n_key is None:
n_key = 'node'
way['contains'].append({
'id': 'node/{}'.format(node['id']),
'type': n_key
})
elif is_building(way):
way['type'] = 'building'
if not tags:
for node in query_building_elms(way):
p = Point(node['lon'], node['lat'])
if not buffer or p.within(buffer):
if 'contains' not in way:
way['contains'] = []
n_key = _elm_key(node, MATCH_TAGS)
if n_key is None:
n_key = 'node'
way['contains'].append({
'id': 'node/{}'.format(node['id']),
'type': n_key
})
way['ways'] = map(lambda x: 'way/{}'.format(x['id']), query_building_ways(id))
surr = []
way['surrounding_buildings'] = surr
for w in query_surrounding_buildings(id):
if w['id'] == int(id):
way['center'] = {'lat': w['center_lat'], 'lon': w['center_lon']}
else:
p = Point(w['center_lon'], w['center_lat'])
if not buffer or p.within(buffer):
surr.append('way/{}'.format(w['id']))
way['areas'] = map(lambda aid: 'area/{}'.format(aid),
g_coord_area(way['center']['lat'], way['center']['lon']))
if 'nd' in way:
del way['nd']
return way
@app.route('/way/<id>')
@cache.cached(timeout=MAX_AGE, key_prefix=make_cache_key)
def get_way(id):
lat = request.args.get('lat')
lng = request.args.get('lng')
area = request.args.get('area')
tags = request.args.get('tags')
tags = tags is not None
buffer = None
if area:
try:
int(area)
except ValueError:
area = match_area_id(area)
buffer = get_area_multipolygon(area)
elif any([lat, lng]):
lat = float(lat)
lng = float(lng)
radius = float(request.args.get('radius', 100))
poi = LatLon(lat, lng)
r_p = poi.offset(90, radius / 1000.0)
buffer = Point(lng, lat).buffer(abs((float(r_p.lon) - float(poi.lon))), resolution=5, mitre_limit=1.0)
buffer = shapely.affinity.scale(buffer, 1.0, 0.75)
way = get_way_attrs(id, tags, buffer)
response = jsonify(way)
response.headers['Cache-Control'] = 'max-age={}'.format(MAX_AGE)
response.headers['Last-Modified'] = format_date_time(time.mktime(datetime.now().timetuple()))
return response
@app.route('/way/<id>/geom')
@cache.cached(timeout=MAX_AGE)
def get_way_geom(id):
points = g_way_geom(id)
way = g_way(id)
shape = Polygon(points) if is_building(way) else LineString(points)
response = jsonify({'wkt': dumps(shape)})
response.headers['Cache-Control'] = 'max-age={}'.format(MAX_AGE)
response.headers['Last-Modified'] = format_date_time(time.mktime(datetime.now().timetuple()))
return response
def _process_rel_member(m):
if m['type'] == 'node':
mid = 'node/{}'.format(m['ref'])
else:
mid = 'way/{}'.format(m['ref'])
return {'id': mid, 'role': m['role']}
@app.route('/node/<id>')
@cache.cached(timeout=MAX_AGE, key_prefix=make_tags_cache_key)
def get_node(id):
debug('Getting node ' + str(id))
node = g_node(id)
tags = request.args.get('tags')
tags = tags is not None
n_key = _elm_key(node, MATCH_TAGS)
node['type'] = n_key
if not tags:
n_building = query_node_building(node)
if n_building:
node['building'] = 'way/{}'.format(n_building['id'])
node['areas'] = map(lambda aid: 'area/{}'.format(aid),
g_coord_area(node['lat'], node['lon']))
response = jsonify(node)
response.headers['Cache-Control'] = 'max-age={}'.format(MAX_AGE)
response.headers['Last-Modified'] = format_date_time(time.mktime(datetime.now().timetuple()))
return response
@app.route('/node/<id>/geom')
@cache.cached(timeout=MAX_AGE, key_prefix=make_tags_cache_key)
def get_node_geom(id):
node = g_node(id)
point = Point(node['lon'], node['lat'])
response = jsonify({'wkt': dumps(point)})
response.headers['Cache-Control'] = 'max-age={}'.format(MAX_AGE)
response.headers['Last-Modified'] = format_date_time(time.mktime(datetime.now().timetuple()))
return response
def search_sub_admin_areas(sub_admin_level, geoms):
cur_admin_level = sub_admin_level
all_sub_areas_result = {}
while not all_sub_areas_result and cur_admin_level < 11:
cur_admin_level += 1
if cur_admin_level == 5:
cur_admin_level += 1 # skip religious admins (level 5)
if not isinstance(geoms, MultiPolygon):
geoms = MultiPolygon([geoms])
for geom in geoms:
geom_points = map(lambda x: tuple(x[0]), zip(geom.convex_hull.exterior.coords))
lat_lng_points = ['{} {}'.format(p[1], p[0]) for p in geom_points]
poly_str = 'poly:"{}"'.format(' '.join(lat_lng_points))
sub_areas_result = api.query("""
(
rel[boundary=administrative][type][admin_level={}][name]({});
way[boundary=administrative][type][admin_level={}][name]({});
);
out tags;
out center;
""".format(cur_admin_level, poly_str, cur_admin_level, poly_str))
for r in sub_areas_result.relations:
r_id = 'r{}'.format(r.id)
if r_id.format(r.id) not in all_sub_areas_result:
all_sub_areas_result[r_id] = r
for w in sub_areas_result.ways:
w_id = 'w{}'.format(w.id)
if w_id.format(w.id) not in all_sub_areas_result:
all_sub_areas_result[w_id] = w
return all_sub_areas_result.values(), cur_admin_level
@cache_it_json(cache=cache_proc)
def area_sfc(id):
sub_area_multipoly = get_area_multipolygon(id)
return sub_area_multipoly.area
@cache_it_json(cache=cache_proc)
def query_sub_areas(id, admin_level):
def next_admin_level():
if admin_level <= 2:
return sub_admin_level < 4
elif admin_level < 8:
return sub_admin_level - admin_level <= 3
elif sub_admin_level < 11:
return sub_admin_level - admin_level <= 2
return False
area_names = g_area_names()
contained_areas = set()
admin_level = int(admin_level)
if admin_level >= 0:
geoms = get_area_multipolygon(id)
diff_geoms = get_area_multipolygon(id)
area_factor = simplify_factor(geoms, max=0.1)
simpl_geoms = geoms.simplify(area_factor)
sub_admins_ids = set()
sub_admin_level = admin_level
geoms_area = geoms.area
while next_admin_level() and (diff_geoms.area > geoms_area * 0.1 or not contained_areas):
bounds = diff_geoms.bounds
if len(bounds) != 4:
break
sub_admins, sub_admin_level = search_sub_admin_areas(sub_admin_level, diff_geoms)
sub_area_ids = set()
for a in sub_admins:
if a.id not in sub_admins_ids:
sub_admins_ids.add(a.id)
else:
continue
matching_sub_ids = filter(lambda an: an['l'] == sub_admin_level, area_names.get(a.tags['name'], {}))
if len(matching_sub_ids) == 1:
sub_area_ids.add(matching_sub_ids[0]['id'])
sub_area_ids = filter(lambda a: a not in contained_areas, sub_area_ids)
simpl_geoms_area = simpl_geoms.area
diff = None
for sub_area_id in sub_area_ids:
sub_area_multipoly = get_area_multipolygon(sub_area_id)
sub_area = area_sfc(sub_area_id)
try:
diff = simpl_geoms.difference(sub_area_multipoly.convex_hull)
area_reduction = simpl_geoms_area - diff.area
except TopologicalError:
try:
diff = geoms.difference(sub_area_multipoly)
area_reduction = geoms_area - diff.area
except TopologicalError as e:
error(e.message)
if isinstance(diff, Polygon) or isinstance(diff, MultiPolygon):
try:
overlap_rate = area_reduction / sub_area
except ZeroDivisionError:
pass
else:
if overlap_rate > 0.9:
contained_areas.add(sub_area_id)
try:
diff_geoms = diff_geoms.difference(sub_area_multipoly)
except Exception:
for sub_area_poly in sub_area_multipoly:
try:
diff_geoms = diff_geoms.difference(sub_area_poly)
except Exception as e:
pass
return list(contained_areas)
@cache_it_json(cache=cache_proc)
def g_area(id):
result = api.query("""area({});out;""".format(id))
area = result.areas.pop()
admin_level = int(area.tags.get('admin_level', -1))
spec_type = None
if admin_level == 4:
spec_type = 'province'
elif admin_level == 8:
spec_type = 'municipality'
elif admin_level == 2:
spec_type = 'country'
type = 'area'
if spec_type:
type = ':'.join([type, spec_type])
debug('Sub areas of ' + area.tags['name'])
contained_areas = query_sub_areas(id, admin_level)
return {'type': type,
'id': id,
'tag': {k: v for k, v in area.tags.iteritems() if
not k.startswith('name') or k == 'name' or k == 'name:es' or k == 'name:es' or k == 'name:it'},
'contains': contained_areas}
@app.route('/area/<id>')
@cache.cached(timeout=MAX_AGE, key_prefix=make_cache_key)
def get_area(id):
area_dict = g_area(id)
area_dict['contains'] = map(lambda a: 'area/{}'.format(a), area_dict['contains'])
response = jsonify(area_dict)
response.headers['Cache-Control'] = 'max-age={}'.format(MAX_AGE)
response.headers['Last-Modified'] = format_date_time(time.mktime(datetime.now().timetuple()))
return response
def simplify_factor(multipolygon, max=0.01):
n_nodes = reduce(lambda x, y: x + len(y.exterior.coords), multipolygon, 0)
simpl_linear_factor = 0.0000013
simpl_factor = simpl_linear_factor * n_nodes
if simpl_factor > max:
simpl_factor = max
return simpl_factor
@app.route('/area/<id>/geom')
@cache.cached(timeout=MAX_AGE)
def get_area_geom(id):
multipolygon = get_area_multipolygon(id)
simpl_factor = simplify_factor(multipolygon)
multipolygon = multipolygon.simplify(simpl_factor)
response = jsonify({'wkt': dumps(multipolygon)})
response.headers['Cache-Control'] = 'max-age={}'.format(MAX_AGE)
response.headers['Last-Modified'] = format_date_time(time.mktime(datetime.now().timetuple()))
return response
def _elm_key(elm, match=set()):
key = None
tags = elm.get('tags', elm.get('tag'))
matching_tags = list(set(match).intersection(set(tags.keys())))
matching_tags = sorted(matching_tags, key=lambda x: x == 'building')
try:
tag = matching_tags.pop()
key = '{}:{}'.format(tag, tags[tag])
except IndexError:
pass
return key
MATCH_TAGS = {'shop', 'highway', 'amenity', 'building', 'tourism'}
@cache_it_json(cache=cache_proc)
def g_area_names():
all_areas = api.query("""
area[boundary=administrative][type][name][admin_level];
out body;
""")
area_map = {}
for area in all_areas.areas:
try:
name = area.tags.get('name', area.tags.get('name:es'))
if name not in area_map:
area_map[name] = []
area_map[name].append({'id': str(area.id), 'l': int(area.tags['admin_level'])})
except KeyError:
pass
return area_map
@cache_it_json(cache=cache_proc, expire=3600)
def match_area_id(name):
area_names = g_area_names()
match, score = process.extractOne(name, area_names.keys())
if score > 50:
areas = area_names[match]
min_admin_level = min(map(lambda x: x['l'], areas))
selected_matchings = filter(lambda x: x['l'] == min_admin_level, areas)
final_match = selected_matchings.pop()
return final_match['id']
@cache_it_json(cache=cache_proc)
def get_area_buildings(id):
area_geoms = get_free_area(str(id))
buildings = []
for points in area_geoms:
lat_lng_points = ['{} {}'.format(p[1], p[0]) for p in points]
geo_filter = 'poly:"{}"'.format(' '.join(lat_lng_points))
query_str = """
way[building]["building"!~"no"]({});
out tags;
""".format(geo_filter)
results = api.query(query_str, maxsize=134217728, expire=86400)
for i, way in enumerate(results.ways):
elm = {'id': 'way/{}'.format(way.id)} # way_distance(osm_result, way, poi)}
key = _elm_key({'tags': way.tags}, MATCH_TAGS)
if key is None:
key = 'way'
if 'name' in way.tags:
elm['name'] = way.tags['name']
elm['type'] = 'way:' + key
buildings.append(elm)
return buildings
#@cache_it_json(cache=cache_proc)
def get_free_area(id):
area = g_area(str(id))
free_mp = get_area_multipolygon(id)
if area['contains']:
sub_mp_list = [get_area_multipolygon(sub_area) for sub_area in area['contains']]
for smp in sub_mp_list:
try:
free_mp = free_mp.difference(smp)
except TopologicalError:
pass
if isinstance(free_mp, Polygon):
free_mp = MultiPolygon([free_mp])
return [map(lambda x: tuple(x[0]), zip(p.exterior.coords)) for p in free_mp]
def area_type(admin_level):
type = 'area'
if admin_level == 4:
type += ':province'
elif admin_level == 8:
type += ':municipality'
elif admin_level == 2:
type += ':country'
return type
@app.route('/elements')
@cache.cached(timeout=MAX_AGE, key_prefix=make_cache_key)
def get_geo_elements():
limit = request.args.get('limit', None)
if limit:
limit = int(limit)
elm_filters = request.args.getlist('filter')
elms = []
area = request.args.get('area')
restrict = request.args.get('restrict', None)
restrict = restrict is not None
if area:
try:
int(area)
except ValueError:
area = match_area_id(area)
else:
try:
location = request.args.get('location')
ll = geocoding(location)
lat, lng = ll['lat'], ll['lng']
except Exception:
try:
lat = float(request.args.get('lat'))
lng = float(request.args.get('lng'))
except TypeError:
response = jsonify({'message': 'Bad arguments'})
response.status_code = 400
return response
geo_filters = []
if area:
area_mp = get_area_multipolygon(area)
center_tuple = list(area_mp.representative_point().coords)[0]
center = LatLon(*reversed(center_tuple))
free_area = get_free_area(area)
if 'area' in elm_filters:
area_dict = g_area(str(area))
admin_level = int(area_dict['tag']['admin_level'])
if restrict:
elms = [{'type': area_type(admin_level), 'id': 'area/{}'.format(area)}]
else:
subareas = [g_area(sa) for sa in area_dict['contains']]
elms = [{'type': area_type(sa['tag']['admin_level']), 'id': 'area/{}'.format(sa['id'])} for sa in
subareas]
for points in free_area:
# lat, lng = points[0][1], points[0][0]
lat_lng_points = ['{} {}'.format(p[1], p[0]) for p in points]
geo_filter = 'poly:"{}"'.format(' '.join(lat_lng_points))
geo_filters.append(geo_filter)
else:
radius = int(request.args.get('radius', 200))
center = LatLon(lat, lng)
geo_filter = 'around:{},{},{}'.format(radius, lat, lng)
geo_filters.append(geo_filter)
for geo_filter in geo_filters:
if elm_filters:
way_query_union = []
node_query_union = []
if 'building' in elm_filters:
way_query_union.append('way.wa[building]["building"!~"no"]')
if 'way' in elm_filters:
way_query_union.append('way.wa[highway]')
way_query_union.append('way.wa[footway]')
if 'shop' in elm_filters:
node_query_union.append('node.na[shop]')
if 'amenity' in elm_filters:
node_query_union.append('node.na[amenity]')
if 'highway' in elm_filters:
node_query_union.append('node.na[highway]')
if 'tourism' in elm_filters:
node_query_union.append('node.na[tourism]')
query_str = ''
if way_query_union:
query_str += 'way({}) -> .wa;\n(\n'.format(geo_filter)
query_str += ';\n'.join(way_query_union)
query_str += ';\n);\nout tags;'
if node_query_union:
query_str += 'node({}) -> .na;\n(\n'.format(geo_filter)
query_str += ';\n'.join(node_query_union)
query_str += ';\n);\nout tags;'
else:
query_str = """
way[building]["building"!~"no"]({});
out tags;
""".format(geo_filter)
osm_result = api.query(query_str, maxsize=134217728, expire=864000)
for i, node in enumerate(osm_result.nodes):
elm = {'id': 'node/{}'.format(node.id)} # node_distance(node, poi)}
key = _elm_key({'tags': node.tags}, MATCH_TAGS)
if key is None:
key = 'node'
if 'name' in node.tags:
elm['name'] = node.tags['name']
elm['type'] = 'node:' + key
if elm not in elms:
elms.append(elm)
if limit and i == limit - 1:
break
for i, way in enumerate(osm_result.ways):
elm = {'id': 'way/{}'.format(way.id)} # way_distance(osm_result, way, poi)}
key = _elm_key({'tags': way.tags}, MATCH_TAGS)
if key is None:
key = 'way'
if 'name' in way.tags:
elm['name'] = way.tags['name']
elm['type'] = 'way:' + key
if elm not in elms:
elms.append(elm)
if limit and i == limit - 1:
break
if 'area' in elm_filters:
osm_result = api.query("""
is_in({},{}) -> .a;
area.a[admin_level];
out;""".format(center.lat, center.lon))
max_admin_level = 0
areas_found = []
for res_area in osm_result.areas:
if area and restrict and int(res_area.id) != int(area):
continue
admin_level = int(res_area.tags['admin_level'])
if admin_level > max_admin_level:
max_admin_level = admin_level
areas_found.append({'id': res_area.id, 'l': admin_level})
for d in filter(lambda d: d['l'] == max_admin_level, areas_found):
elm = {'id': 'area/{}'.format(d['id']), 'type': area_type(d['l'])}
if elm not in elms:
elms.append(elm)
result = {
'center': {
'lat': float(center.lat),
'lng': float(center.lon)
},
'results': elms
}
response = jsonify(result)
response.headers['Cache-Control'] = 'max-age={}'.format(MAX_AGE)
response.headers['Last-Modified'] = format_date_time(time.mktime(datetime.now().timetuple()))
return response
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5006, use_reloader=False, debug=False, threaded=True)
|
|
## @file
# This file is used to parse and evaluate expression in directive or PCD value.
#
# Copyright (c) 2011 - 2017, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
## Import Modules
#
from Common.GlobalData import *
from CommonDataClass.Exceptions import BadExpression
from CommonDataClass.Exceptions import WrnExpression
from Misc import GuidStringToGuidStructureString
ERR_STRING_EXPR = 'This operator cannot be used in string expression: [%s].'
ERR_SNYTAX = 'Syntax error, the rest of expression cannot be evaluated: [%s].'
ERR_MATCH = 'No matching right parenthesis.'
ERR_STRING_TOKEN = 'Bad string token: [%s].'
ERR_MACRO_TOKEN = 'Bad macro token: [%s].'
ERR_EMPTY_TOKEN = 'Empty token is not allowed.'
ERR_PCD_RESOLVE = 'PCD token cannot be resolved: [%s].'
ERR_VALID_TOKEN = 'No more valid token found from rest of string: [%s].'
ERR_EXPR_TYPE = 'Different types found in expression.'
ERR_OPERATOR_UNSUPPORT = 'Unsupported operator: [%s]'
ERR_REL_NOT_IN = 'Expect "IN" after "not" operator.'
WRN_BOOL_EXPR = 'Operand of boolean type cannot be used in arithmetic expression.'
WRN_EQCMP_STR_OTHERS = '== Comparison between Operand of string type and Boolean/Number Type always return False.'
WRN_NECMP_STR_OTHERS = '!= Comparison between Operand of string type and Boolean/Number Type always return True.'
ERR_RELCMP_STR_OTHERS = 'Operator taking Operand of string type and Boolean/Number Type is not allowed: [%s].'
ERR_STRING_CMP = 'Unicode string and general string cannot be compared: [%s %s %s]'
ERR_ARRAY_TOKEN = 'Bad C array or C format GUID token: [%s].'
ERR_ARRAY_ELE = 'This must be HEX value for NList or Array: [%s].'
ERR_EMPTY_EXPR = 'Empty expression is not allowed.'
ERR_IN_OPERAND = 'Macro after IN operator can only be: $(FAMILY), $(ARCH), $(TOOL_CHAIN_TAG) and $(TARGET).'
## SplitString
# Split string to list according double quote
# For example: abc"de\"f"ghi"jkl"mn will be: ['abc', '"de\"f"', 'ghi', '"jkl"', 'mn']
#
def SplitString(String):
# There might be escaped quote: "abc\"def\\\"ghi"
Str = String.replace('\\\\', '//').replace('\\\"', '\\\'')
RetList = []
InQuote = False
Item = ''
for i, ch in enumerate(Str):
if ch == '"':
InQuote = not InQuote
if not InQuote:
Item += String[i]
RetList.append(Item)
Item = ''
continue
if Item:
RetList.append(Item)
Item = ''
Item += String[i]
if InQuote:
raise BadExpression(ERR_STRING_TOKEN % Item)
if Item:
RetList.append(Item)
return RetList
## ReplaceExprMacro
#
def ReplaceExprMacro(String, Macros, ExceptionList = None):
StrList = SplitString(String)
for i, String in enumerate(StrList):
InQuote = False
if String.startswith('"'):
InQuote = True
MacroStartPos = String.find('$(')
if MacroStartPos < 0:
for Pcd in gPlatformPcds.keys():
if Pcd in String:
if Pcd not in gConditionalPcds:
gConditionalPcds.append(Pcd)
continue
RetStr = ''
while MacroStartPos >= 0:
RetStr = String[0:MacroStartPos]
MacroEndPos = String.find(')', MacroStartPos)
if MacroEndPos < 0:
raise BadExpression(ERR_MACRO_TOKEN % String[MacroStartPos:])
Macro = String[MacroStartPos+2:MacroEndPos]
if Macro not in Macros:
# From C reference manual:
# If an undefined macro name appears in the constant-expression of
# !if or !elif, it is replaced by the integer constant 0.
RetStr += '0'
elif not InQuote:
Tklst = RetStr.split()
if Tklst and Tklst[-1] in ['IN', 'in'] and ExceptionList and Macro not in ExceptionList:
raise BadExpression(ERR_IN_OPERAND)
# Make sure the macro in exception list is encapsulated by double quote
# For example: DEFINE ARCH = IA32 X64
# $(ARCH) is replaced with "IA32 X64"
if ExceptionList and Macro in ExceptionList:
RetStr += '"' + Macros[Macro] + '"'
elif Macros[Macro].strip():
RetStr += Macros[Macro]
else:
RetStr += '""'
else:
RetStr += Macros[Macro]
RetStr += String[MacroEndPos+1:]
String = RetStr
MacroStartPos = String.find('$(')
StrList[i] = RetStr
return ''.join(StrList)
SupportedInMacroList = ['TARGET', 'TOOL_CHAIN_TAG', 'ARCH', 'FAMILY']
class ValueExpression(object):
# Logical operator mapping
LogicalOperators = {
'&&' : 'and', '||' : 'or',
'!' : 'not', 'AND': 'and',
'OR' : 'or' , 'NOT': 'not',
'XOR': '^' , 'xor': '^',
'EQ' : '==' , 'NE' : '!=',
'GT' : '>' , 'LT' : '<',
'GE' : '>=' , 'LE' : '<=',
'IN' : 'in'
}
NonLetterOpLst = ['+', '-', '*', '/', '%', '&', '|', '^', '~', '<<', '>>', '!', '=', '>', '<']
PcdPattern = re.compile(r'[_a-zA-Z][0-9A-Za-z_]*\.[_a-zA-Z][0-9A-Za-z_]*$')
HexPattern = re.compile(r'0[xX][0-9a-fA-F]+$')
RegGuidPattern = re.compile(r'[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}')
SymbolPattern = re.compile("("
"\$\([A-Z][A-Z0-9_]*\)|\$\(\w+\.\w+\)|\w+\.\w+|"
"&&|\|\||!(?!=)|"
"(?<=\W)AND(?=\W)|(?<=\W)OR(?=\W)|(?<=\W)NOT(?=\W)|(?<=\W)XOR(?=\W)|"
"(?<=\W)EQ(?=\W)|(?<=\W)NE(?=\W)|(?<=\W)GT(?=\W)|(?<=\W)LT(?=\W)|(?<=\W)GE(?=\W)|(?<=\W)LE(?=\W)"
")")
@staticmethod
def Eval(Operator, Oprand1, Oprand2 = None):
WrnExp = None
if Operator not in ["==", "!=", ">=", "<=", ">", "<", "in", "not in"] and \
(type(Oprand1) == type('') or type(Oprand2) == type('')):
raise BadExpression(ERR_STRING_EXPR % Operator)
TypeDict = {
type(0) : 0,
type(0L) : 0,
type('') : 1,
type(True) : 2
}
EvalStr = ''
if Operator in ["!", "NOT", "not"]:
if type(Oprand1) == type(''):
raise BadExpression(ERR_STRING_EXPR % Operator)
EvalStr = 'not Oprand1'
elif Operator in ["~"]:
if type(Oprand1) == type(''):
raise BadExpression(ERR_STRING_EXPR % Operator)
EvalStr = '~ Oprand1'
else:
if Operator in ["+", "-"] and (type(True) in [type(Oprand1), type(Oprand2)]):
# Boolean in '+'/'-' will be evaluated but raise warning
WrnExp = WrnExpression(WRN_BOOL_EXPR)
elif type('') in [type(Oprand1), type(Oprand2)] and type(Oprand1)!= type(Oprand2):
# == between string and number/boolean will always return False, != return True
if Operator == "==":
WrnExp = WrnExpression(WRN_EQCMP_STR_OTHERS)
WrnExp.result = False
raise WrnExp
elif Operator == "!=":
WrnExp = WrnExpression(WRN_NECMP_STR_OTHERS)
WrnExp.result = True
raise WrnExp
else:
raise BadExpression(ERR_RELCMP_STR_OTHERS % Operator)
elif TypeDict[type(Oprand1)] != TypeDict[type(Oprand2)]:
if Operator in ["==", "!=", ">=", "<=", ">", "<"] and set((TypeDict[type(Oprand1)], TypeDict[type(Oprand2)])) == set((TypeDict[type(True)], TypeDict[type(0)])):
# comparison between number and boolean is allowed
pass
elif Operator in ['&', '|', '^', "and", "or"] and set((TypeDict[type(Oprand1)], TypeDict[type(Oprand2)])) == set((TypeDict[type(True)], TypeDict[type(0)])):
# bitwise and logical operation between number and boolean is allowed
pass
else:
raise BadExpression(ERR_EXPR_TYPE)
if type(Oprand1) == type('') and type(Oprand2) == type(''):
if (Oprand1.startswith('L"') and not Oprand2.startswith('L"')) or \
(not Oprand1.startswith('L"') and Oprand2.startswith('L"')):
raise BadExpression(ERR_STRING_CMP % (Oprand1, Operator, Oprand2))
if 'in' in Operator and type(Oprand2) == type(''):
Oprand2 = Oprand2.split()
EvalStr = 'Oprand1 ' + Operator + ' Oprand2'
# Local symbols used by built in eval function
Dict = {
'Oprand1' : Oprand1,
'Oprand2' : Oprand2
}
try:
Val = eval(EvalStr, {}, Dict)
except Exception, Excpt:
raise BadExpression(str(Excpt))
if Operator in ['and', 'or']:
if Val:
Val = True
else:
Val = False
if WrnExp:
WrnExp.result = Val
raise WrnExp
return Val
def __init__(self, Expression, SymbolTable={}):
self._NoProcess = False
if type(Expression) != type(''):
self._Expr = Expression
self._NoProcess = True
return
self._Expr = ReplaceExprMacro(Expression.strip(),
SymbolTable,
SupportedInMacroList)
if not self._Expr.strip():
raise BadExpression(ERR_EMPTY_EXPR)
#
# The symbol table including PCD and macro mapping
#
self._Symb = SymbolTable
self._Symb.update(self.LogicalOperators)
self._Idx = 0
self._Len = len(self._Expr)
self._Token = ''
self._WarnExcept = None
# Literal token without any conversion
self._LiteralToken = ''
# Public entry for this class
# @param RealValue: False: only evaluate if the expression is true or false, used for conditional expression
# True : return the evaluated str(value), used for PCD value
#
# @return: True or False if RealValue is False
# Evaluated value of string format if RealValue is True
#
def __call__(self, RealValue=False, Depth=0):
if self._NoProcess:
return self._Expr
self._Depth = Depth
self._Expr = self._Expr.strip()
if RealValue and Depth == 0:
self._Token = self._Expr
if self.__IsNumberToken():
return self._Expr
try:
Token = self._GetToken()
if type(Token) == type('') and Token.startswith('{') and Token.endswith('}') and self._Idx >= self._Len:
return self._Expr
except BadExpression:
pass
self._Idx = 0
self._Token = ''
Val = self._OrExpr()
RealVal = Val
if type(Val) == type(''):
if Val == 'L""':
Val = False
elif not Val:
Val = False
RealVal = '""'
elif not Val.startswith('L"') and not Val.startswith('{'):
Val = True
RealVal = '"' + RealVal + '"'
# The expression has been parsed, but the end of expression is not reached
# It means the rest does not comply EBNF of <Expression>
if self._Idx != self._Len:
raise BadExpression(ERR_SNYTAX % self._Expr[self._Idx:])
if RealValue:
RetVal = str(RealVal)
elif Val:
RetVal = True
else:
RetVal = False
if self._WarnExcept:
self._WarnExcept.result = RetVal
raise self._WarnExcept
else:
return RetVal
# Template function to parse binary operators which have same precedence
# Expr [Operator Expr]*
def _ExprFuncTemplate(self, EvalFunc, OpLst):
Val = EvalFunc()
while self._IsOperator(OpLst):
Op = self._Token
try:
Val = self.Eval(Op, Val, EvalFunc())
except WrnExpression, Warn:
self._WarnExcept = Warn
Val = Warn.result
return Val
# A [|| B]*
def _OrExpr(self):
return self._ExprFuncTemplate(self._AndExpr, ["OR", "or", "||"])
# A [&& B]*
def _AndExpr(self):
return self._ExprFuncTemplate(self._BitOr, ["AND", "and", "&&"])
# A [ | B]*
def _BitOr(self):
return self._ExprFuncTemplate(self._BitXor, ["|"])
# A [ ^ B]*
def _BitXor(self):
return self._ExprFuncTemplate(self._BitAnd, ["XOR", "xor", "^"])
# A [ & B]*
def _BitAnd(self):
return self._ExprFuncTemplate(self._EqExpr, ["&"])
# A [ == B]*
def _EqExpr(self):
Val = self._RelExpr()
while self._IsOperator(["==", "!=", "EQ", "NE", "IN", "in", "!", "NOT", "not"]):
Op = self._Token
if Op in ["!", "NOT", "not"]:
if not self._IsOperator(["IN", "in"]):
raise BadExpression(ERR_REL_NOT_IN)
Op += ' ' + self._Token
try:
Val = self.Eval(Op, Val, self._RelExpr())
except WrnExpression, Warn:
self._WarnExcept = Warn
Val = Warn.result
return Val
# A [ > B]*
def _RelExpr(self):
return self._ExprFuncTemplate(self._ShiftExpr, ["<=", ">=", "<", ">", "LE", "GE", "LT", "GT"])
def _ShiftExpr(self):
return self._ExprFuncTemplate(self._AddExpr, ["<<", ">>"])
# A [ + B]*
def _AddExpr(self):
return self._ExprFuncTemplate(self._MulExpr, ["+", "-"])
# A [ * B]*
def _MulExpr(self):
return self._ExprFuncTemplate(self._UnaryExpr, ["*", "/", "%"])
# [!]*A
def _UnaryExpr(self):
if self._IsOperator(["!", "NOT", "not"]):
Val = self._UnaryExpr()
try:
return self.Eval('not', Val)
except WrnExpression, Warn:
self._WarnExcept = Warn
return Warn.result
if self._IsOperator(["~"]):
Val = self._UnaryExpr()
try:
return self.Eval('~', Val)
except WrnExpression, Warn:
self._WarnExcept = Warn
return Warn.result
return self._IdenExpr()
# Parse identifier or encapsulated expression
def _IdenExpr(self):
Tk = self._GetToken()
if Tk == '(':
Val = self._OrExpr()
try:
# _GetToken may also raise BadExpression
if self._GetToken() != ')':
raise BadExpression(ERR_MATCH)
except BadExpression:
raise BadExpression(ERR_MATCH)
return Val
return Tk
# Skip whitespace or tab
def __SkipWS(self):
for Char in self._Expr[self._Idx:]:
if Char not in ' \t':
break
self._Idx += 1
# Try to convert string to number
def __IsNumberToken(self):
Radix = 10
if self._Token.lower()[0:2] == '0x' and len(self._Token) > 2:
Radix = 16
try:
self._Token = int(self._Token, Radix)
return True
except ValueError:
return False
except TypeError:
return False
# Parse array: {...}
def __GetArray(self):
Token = '{'
self._Idx += 1
self.__GetNList(True)
Token += self._LiteralToken
if self._Idx >= self._Len or self._Expr[self._Idx] != '}':
raise BadExpression(ERR_ARRAY_TOKEN % Token)
Token += '}'
# All whitespace and tabs in array are already stripped.
IsArray = IsGuid = False
if len(Token.split(',')) == 11 and len(Token.split(',{')) == 2 \
and len(Token.split('},')) == 1:
HexLen = [11,6,6,5,4,4,4,4,4,4,6]
HexList= Token.split(',')
if HexList[3].startswith('{') and \
not [Index for Index, Hex in enumerate(HexList) if len(Hex) > HexLen[Index]]:
IsGuid = True
if Token.lstrip('{').rstrip('}').find('{') == -1:
if not [Hex for Hex in Token.lstrip('{').rstrip('}').split(',') if len(Hex) > 4]:
IsArray = True
if not IsArray and not IsGuid:
raise BadExpression(ERR_ARRAY_TOKEN % Token)
self._Idx += 1
self._Token = self._LiteralToken = Token
return self._Token
# Parse string, the format must be: "..."
def __GetString(self):
Idx = self._Idx
# Skip left quote
self._Idx += 1
# Replace escape \\\", \"
Expr = self._Expr[self._Idx:].replace('\\\\', '//').replace('\\\"', '\\\'')
for Ch in Expr:
self._Idx += 1
if Ch == '"':
break
self._Token = self._LiteralToken = self._Expr[Idx:self._Idx]
if not self._Token.endswith('"'):
raise BadExpression(ERR_STRING_TOKEN % self._Token)
self._Token = self._Token[1:-1]
return self._Token
# Get token that is comprised by alphanumeric, underscore or dot(used by PCD)
# @param IsAlphaOp: Indicate if parsing general token or script operator(EQ, NE...)
def __GetIdToken(self, IsAlphaOp = False):
IdToken = ''
for Ch in self._Expr[self._Idx:]:
if not self.__IsIdChar(Ch):
break
self._Idx += 1
IdToken += Ch
self._Token = self._LiteralToken = IdToken
if not IsAlphaOp:
self.__ResolveToken()
return self._Token
# Try to resolve token
def __ResolveToken(self):
if not self._Token:
raise BadExpression(ERR_EMPTY_TOKEN)
# PCD token
if self.PcdPattern.match(self._Token):
if self._Token not in self._Symb:
Ex = BadExpression(ERR_PCD_RESOLVE % self._Token)
Ex.Pcd = self._Token
raise Ex
self._Token = ValueExpression(self._Symb[self._Token], self._Symb)(True, self._Depth+1)
if type(self._Token) != type(''):
self._LiteralToken = hex(self._Token)
return
if self._Token.startswith('"'):
self._Token = self._Token[1:-1]
elif self._Token in ["FALSE", "false", "False"]:
self._Token = False
elif self._Token in ["TRUE", "true", "True"]:
self._Token = True
else:
self.__IsNumberToken()
def __GetNList(self, InArray=False):
self._GetSingleToken()
if not self.__IsHexLiteral():
if InArray:
raise BadExpression(ERR_ARRAY_ELE % self._Token)
return self._Token
self.__SkipWS()
Expr = self._Expr[self._Idx:]
if not Expr.startswith(','):
return self._Token
NList = self._LiteralToken
while Expr.startswith(','):
NList += ','
self._Idx += 1
self.__SkipWS()
self._GetSingleToken()
if not self.__IsHexLiteral():
raise BadExpression(ERR_ARRAY_ELE % self._Token)
NList += self._LiteralToken
self.__SkipWS()
Expr = self._Expr[self._Idx:]
self._Token = self._LiteralToken = NList
return self._Token
def __IsHexLiteral(self):
if self._LiteralToken.startswith('{') and \
self._LiteralToken.endswith('}'):
return True
if self.HexPattern.match(self._LiteralToken):
Token = self._LiteralToken[2:]
Token = Token.lstrip('0')
if not Token:
self._LiteralToken = '0x0'
else:
self._LiteralToken = '0x' + Token.lower()
return True
return False
def _GetToken(self):
return self.__GetNList()
@staticmethod
def __IsIdChar(Ch):
return Ch in '._:' or Ch.isalnum()
# Parse operand
def _GetSingleToken(self):
self.__SkipWS()
Expr = self._Expr[self._Idx:]
if Expr.startswith('L"'):
# Skip L
self._Idx += 1
UStr = self.__GetString()
self._Token = 'L"' + UStr + '"'
return self._Token
self._Token = ''
if Expr:
Ch = Expr[0]
Match = self.RegGuidPattern.match(Expr)
if Match and not Expr[Match.end():Match.end()+1].isalnum() \
and Expr[Match.end():Match.end()+1] != '_':
self._Idx += Match.end()
self._Token = ValueExpression(GuidStringToGuidStructureString(Expr[0:Match.end()]))(True, self._Depth+1)
return self._Token
elif self.__IsIdChar(Ch):
return self.__GetIdToken()
elif Ch == '"':
return self.__GetString()
elif Ch == '{':
return self.__GetArray()
elif Ch == '(' or Ch == ')':
self._Idx += 1
self._Token = Ch
return self._Token
raise BadExpression(ERR_VALID_TOKEN % Expr)
# Parse operator
def _GetOperator(self):
self.__SkipWS()
LegalOpLst = ['&&', '||', '!=', '==', '>=', '<='] + self.NonLetterOpLst
self._Token = ''
Expr = self._Expr[self._Idx:]
# Reach end of expression
if not Expr:
return ''
# Script operator: LT, GT, LE, GE, EQ, NE, and, or, xor, not
if Expr[0].isalpha():
return self.__GetIdToken(True)
# Start to get regular operator: +, -, <, > ...
if Expr[0] not in self.NonLetterOpLst:
return ''
OpToken = ''
for Ch in Expr:
if Ch in self.NonLetterOpLst:
if '!' == Ch and OpToken:
break
self._Idx += 1
OpToken += Ch
else:
break
if OpToken not in LegalOpLst:
raise BadExpression(ERR_OPERATOR_UNSUPPORT % OpToken)
self._Token = OpToken
return OpToken
# Check if current token matches the operators given from OpList
def _IsOperator(self, OpList):
Idx = self._Idx
self._GetOperator()
if self._Token in OpList:
if self._Token in self.LogicalOperators:
self._Token = self.LogicalOperators[self._Token]
return True
self._Idx = Idx
return False
if __name__ == '__main__':
pass
while True:
input = raw_input('Input expr: ')
if input in 'qQ':
break
try:
print ValueExpression(input)(True)
print ValueExpression(input)(False)
except WrnExpression, Ex:
print Ex.result
print str(Ex)
except Exception, Ex:
print str(Ex)
|
|
# all the imports
from flask import Flask, session, g, redirect, url_for, abort, \
render_template, flash
import os
from flask import jsonify, request
from faker import Factory
from twilio.access_token import AccessToken, IpMessagingGrant
from flask_mail import Message, Mail
from twilio.rest import TwilioRestClient
import sqlite3
app = Flask(__name__)
# create our little application :)
app.config.from_object(__name__)
app.config.update(
DEBUG=True,
)
# Load default config and override config from an environment variable
app.config.update(dict(
DATABASE=os.path.join(app.root_path, 'app.db'),
SECRET_KEY='development key',
USERNAME='admin',
PASSWORD='default',
MAIL_SERVER='smtp.gmail.com',
MAIL_PORT=587,
MAIL_USE_TLS=True,
MAIL_USE_SSL=False,
MAIL_USERNAME='demomododo@gmail.com',
MAIL_PASSWORD='hackathons',
))
app.config.from_envvar('APP_SETTINGS', silent=True)
mail = Mail(app)
fake = Factory.create()
#db functions
def connect_db():
"""Connects to the specific database."""
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
@app.teardown_appcontext
def close_db(error):
"""Closes the database again at the end of the request."""
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
def init_db():
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
# registers new command to flask command line tool called 'initdb' which runs init_db()
@app.cli.command('initdb')
def initdb_command():
"""Initializes the database."""
init_db()
print 'Initialized the database.'
# twilio stuff
@app.route('/')
def index():
return app.send_static_file('index.html')
@app.route('/text')
def send_text():
if not session.get('logged_in'):
return redirect(url_for('login'))
else:
sid = session.get('sid')
tkn = session.get('token')
print(sid)
print(tkn)
print("++++++++++++++++++++++++++++++")
client = TwilioRestClient(account=sid, token=tkn)
message = client.messages.create(to="+16073398907", from_="+16073912565", body="from a monkey with a typewriter")
return redirect(url_for('login'))
@app.route('/token')
def token():
if not session.get('logged_in'):
return redirect(url_for('login'))
else:
# get credentials for environment variables
account_sid = session.get('sid')
print(session.get('sid'))
api_key = os.environ['TWILIO_API_KEY']
api_secret = os.environ['TWILIO_API_SECRET']
service_sid = os.environ['TWILIO_IPM_SERVICE_SID']
# create a randomly generated username for the client
db = get_db()
username = session.get('user')
identity = username
# Create a unique endpoint ID for the
device_id = request.args.get('device')
endpoint = "TwilioChatDemo:{0}:{1}".format(identity, device_id)
# Create access token with credentials
token = AccessToken(account_sid, api_key, api_secret, identity)
# Create an IP Messaging grant and add to token
ipm_grant = IpMessagingGrant(endpoint_id=endpoint, service_sid=service_sid)
token.add_grant(ipm_grant)
# Return token info as JSON
return jsonify(identity=identity, token=token.to_jwt())
# messaging
@app.route('/mail')
def send_mail():
msg = Message('yo', sender="demomododo@gmail.com")
msg.add_recipient("demomododo@gmail.com")
mail.send(msg)
return redirect(url_for('login'))
# display table
@app.route('/view')
def show_entries():
if not session.get('logged_in'):
return render_template('login.html', error='Please login and try again')
db = get_db()
cur = db.execute('select location,description,time,date,venue_name,venue_type from reports order by id desc')
entries = cur.fetchall()
victim_lst = []
suspect_lst = []
cur = db.execute('select victims,suspects from reports order by id desc')
persons = cur.fetchall()
for case in persons:
case_victims = []
for victim in str.split(str(case[0][:-1])):
victim_query = db.execute('select * from persons where id='+victim)
case_victims.append(victim_query.fetchall())
victim_lst.append(case_victims)
case_suspects = []
for suspect in str.split(str(case[1][:-1])):
suspect_query = db.execute('select * from persons where id='+suspect)
case_suspects.append(suspect_query.fetchall())
suspect_lst.append(case_suspects)
return render_template('view.html', entries=entries, victims=victim_lst, suspects=suspect_lst)
# shows
@app.route('/add', methods=['POST'])
def add_entry():
if not session.get('logged_in'):
abort(401)
db = get_db()
db.execute('insert into entries (title, text) values (?, ?)',
[request.form['title'], request.form['text']])
db.commit()
flash('New entry was successfully posted')
return redirect(url_for('show_entries'))
#please don't hack :]
@app.route('/register', methods=['GET', 'POST'])
def add_user():
error = None
if request.method == 'POST':
db = get_db()
tax_id = request.form['tax_id']
_tax_id = db.execute('SELECT tax_id FROM users WHERE tax_id = %s' % tax_id)
row = _tax_id.fetchall()
#print(row)
if (row is None) | (len(row) == 0):
if len(request.form['account_sid']) < 34:
error = 'invalid account SID'
elif len(request.form['auth_token']) < 32:
error = 'invalid authentication token'
else:
db.execute('insert into users (tax_id, precinct, sector, password, first_name, last_name,' +
' account_sid, auth_token) values (?, ?, ?, ?, ?, ?, ?, ?)',
[request.form['tax_id'], request.form['precinct'], request.form['sector'],
request.form['password'], request.form['first_name'], request.form['last_name'],
request.form['account_sid'], request.form['auth_token']])
db.commit()
flash('Successfully added user')
return redirect(url_for('login'))
else:
flash('Username taken')
error = 'invalid username'
else:
error = 'invalid data'
return redirect(url_for('signup', error=error))
@app.route('/signup')
def signup():
return render_template('register.html')
@app.route('/login' , methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
db = get_db()
tax_id = request.form['tax_id']
user = db.execute('SELECT tax_id, password, account_sid, auth_token, last_name FROM users WHERE tax_id = %s' % tax_id)
row = user.fetchall()
if (row is not None) & (len(row) != 0):
row = row[0]
tax_id = row[0]
pword = row[1]
sid = row[2]
token = row[3]
last = row[4]
print(request.form['tax_id'])
print(tax_id)
if int(request.form['tax_id']) != int(tax_id):
error = 'Invalid tax_id'
elif request.form['password'] != pword:
error = 'Invalid password'
else:
session['logged_in'] = True
session['user'] = last
session['sid'] = sid
session['token'] = token
flash('You were logged in')
# return redirect(url_for('show_entries'))
else:
error = 'Invalid username'
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
session.pop('user', None)
session.pop('sid', None)
session.pop('token', None)
flash('You were logged out')
return redirect(url_for('login'))
if __name__ == '__main__':
app.run(debug=True)
|
|
"""Allow users to set and activate scenes."""
from __future__ import annotations
import logging
from typing import Any, NamedTuple
import voluptuous as vol
from homeassistant import config as conf_util
from homeassistant.components.light import ATTR_TRANSITION
from homeassistant.components.scene import DOMAIN as SCENE_DOMAIN, STATES, Scene
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_STATE,
CONF_ENTITIES,
CONF_ICON,
CONF_ID,
CONF_NAME,
CONF_PLATFORM,
SERVICE_RELOAD,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import DOMAIN as HA_DOMAIN, HomeAssistant, State, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import (
config_per_platform,
config_validation as cv,
entity_platform,
)
from homeassistant.helpers.state import async_reproduce_state
from homeassistant.loader import async_get_integration
def _convert_states(states):
"""Convert state definitions to State objects."""
result = {}
for entity_id, info in states.items():
entity_id = cv.entity_id(entity_id)
if isinstance(info, dict):
entity_attrs = info.copy()
state = entity_attrs.pop(ATTR_STATE, None)
attributes = entity_attrs
else:
state = info
attributes = {}
# YAML translates 'on' to a boolean
# http://yaml.org/type/bool.html
if isinstance(state, bool):
state = STATE_ON if state else STATE_OFF
elif not isinstance(state, str):
raise vol.Invalid(f"State for {entity_id} should be a string")
result[entity_id] = State(entity_id, state, attributes)
return result
def _ensure_no_intersection(value):
"""Validate that entities and snapshot_entities do not overlap."""
if (
CONF_SNAPSHOT not in value
or CONF_ENTITIES not in value
or all(
entity_id not in value[CONF_SNAPSHOT] for entity_id in value[CONF_ENTITIES]
)
):
return value
raise vol.Invalid("entities and snapshot_entities must not overlap")
CONF_SCENE_ID = "scene_id"
CONF_SNAPSHOT = "snapshot_entities"
DATA_PLATFORM = "homeassistant_scene"
EVENT_SCENE_RELOADED = "scene_reloaded"
STATES_SCHEMA = vol.All(dict, _convert_states)
PLATFORM_SCHEMA = vol.Schema(
{
vol.Required(CONF_PLATFORM): HA_DOMAIN,
vol.Required(STATES): vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Optional(CONF_ID): cv.string,
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_ICON): cv.icon,
vol.Required(CONF_ENTITIES): STATES_SCHEMA,
}
)
],
),
},
extra=vol.ALLOW_EXTRA,
)
CREATE_SCENE_SCHEMA = vol.All(
cv.has_at_least_one_key(CONF_ENTITIES, CONF_SNAPSHOT),
_ensure_no_intersection,
vol.Schema(
{
vol.Required(CONF_SCENE_ID): cv.slug,
vol.Optional(CONF_ENTITIES, default={}): STATES_SCHEMA,
vol.Optional(CONF_SNAPSHOT, default=[]): cv.entity_ids,
}
),
)
SERVICE_APPLY = "apply"
SERVICE_CREATE = "create"
_LOGGER = logging.getLogger(__name__)
class SceneConfig(NamedTuple):
"""Object for storing scene config."""
id: str
name: str
icon: str
states: dict
@callback
def scenes_with_entity(hass: HomeAssistant, entity_id: str) -> list[str]:
"""Return all scenes that reference the entity."""
if DATA_PLATFORM not in hass.data:
return []
platform = hass.data[DATA_PLATFORM]
return [
scene_entity.entity_id
for scene_entity in platform.entities.values()
if entity_id in scene_entity.scene_config.states
]
@callback
def entities_in_scene(hass: HomeAssistant, entity_id: str) -> list[str]:
"""Return all entities in a scene."""
if DATA_PLATFORM not in hass.data:
return []
platform = hass.data[DATA_PLATFORM]
if (entity := platform.entities.get(entity_id)) is None:
return []
return list(entity.scene_config.states)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up Home Assistant scene entries."""
_process_scenes_config(hass, async_add_entities, config)
# This platform can be loaded multiple times. Only first time register the service.
if hass.services.has_service(SCENE_DOMAIN, SERVICE_RELOAD):
return
# Store platform for later.
platform = hass.data[DATA_PLATFORM] = entity_platform.async_get_current_platform()
async def reload_config(call):
"""Reload the scene config."""
try:
conf = await conf_util.async_hass_config_yaml(hass)
except HomeAssistantError as err:
_LOGGER.error(err)
return
integration = await async_get_integration(hass, SCENE_DOMAIN)
conf = await conf_util.async_process_component_config(hass, conf, integration)
if not (conf and platform):
return
await platform.async_reset()
# Extract only the config for the Home Assistant platform, ignore the rest.
for p_type, p_config in config_per_platform(conf, SCENE_DOMAIN):
if p_type != HA_DOMAIN:
continue
_process_scenes_config(hass, async_add_entities, p_config)
hass.bus.async_fire(EVENT_SCENE_RELOADED, context=call.context)
hass.helpers.service.async_register_admin_service(
SCENE_DOMAIN, SERVICE_RELOAD, reload_config
)
async def apply_service(call):
"""Apply a scene."""
reproduce_options = {}
if ATTR_TRANSITION in call.data:
reproduce_options[ATTR_TRANSITION] = call.data.get(ATTR_TRANSITION)
await async_reproduce_state(
hass,
call.data[CONF_ENTITIES].values(),
context=call.context,
reproduce_options=reproduce_options,
)
hass.services.async_register(
SCENE_DOMAIN,
SERVICE_APPLY,
apply_service,
vol.Schema(
{
vol.Optional(ATTR_TRANSITION): vol.All(
vol.Coerce(float), vol.Clamp(min=0, max=6553)
),
vol.Required(CONF_ENTITIES): STATES_SCHEMA,
}
),
)
async def create_service(call):
"""Create a scene."""
snapshot = call.data[CONF_SNAPSHOT]
entities = call.data[CONF_ENTITIES]
for entity_id in snapshot:
if (state := hass.states.get(entity_id)) is None:
_LOGGER.warning(
"Entity %s does not exist and therefore cannot be snapshotted",
entity_id,
)
continue
entities[entity_id] = State(entity_id, state.state, state.attributes)
if not entities:
_LOGGER.warning("Empty scenes are not allowed")
return
scene_config = SceneConfig(None, call.data[CONF_SCENE_ID], None, entities)
entity_id = f"{SCENE_DOMAIN}.{scene_config.name}"
if (old := platform.entities.get(entity_id)) is not None:
if not old.from_service:
_LOGGER.warning("The scene %s already exists", entity_id)
return
await platform.async_remove_entity(entity_id)
async_add_entities([HomeAssistantScene(hass, scene_config, from_service=True)])
hass.services.async_register(
SCENE_DOMAIN, SERVICE_CREATE, create_service, CREATE_SCENE_SCHEMA
)
def _process_scenes_config(hass, async_add_entities, config):
"""Process multiple scenes and add them."""
# Check empty list
if not (scene_config := config[STATES]):
return
async_add_entities(
HomeAssistantScene(
hass,
SceneConfig(
scene.get(CONF_ID),
scene[CONF_NAME],
scene.get(CONF_ICON),
scene[CONF_ENTITIES],
),
)
for scene in scene_config
)
class HomeAssistantScene(Scene):
"""A scene is a group of entities and the states we want them to be."""
def __init__(self, hass, scene_config, from_service=False):
"""Initialize the scene."""
self.hass = hass
self.scene_config = scene_config
self.from_service = from_service
@property
def name(self):
"""Return the name of the scene."""
return self.scene_config.name
@property
def icon(self):
"""Return the icon of the scene."""
return self.scene_config.icon
@property
def unique_id(self):
"""Return unique ID."""
return self.scene_config.id
@property
def extra_state_attributes(self):
"""Return the scene state attributes."""
attributes = {ATTR_ENTITY_ID: list(self.scene_config.states)}
if (unique_id := self.unique_id) is not None:
attributes[CONF_ID] = unique_id
return attributes
async def async_activate(self, **kwargs: Any) -> None:
"""Activate scene. Try to get entities into requested state."""
await async_reproduce_state(
self.hass,
self.scene_config.states.values(),
context=self._context,
reproduce_options=kwargs,
)
|
|
import datetime
from flask import (
abort,
Blueprint,
jsonify,
make_response,
redirect,
render_template,
request,
session,
url_for,
)
from pyre_extensions import none_throws
from werkzeug.wrappers import Response
from backend.common.auth import (
create_session_cookie,
current_user,
revoke_session_cookie,
)
from backend.common.consts.auth_type import (
WRITE_TYPE_NAMES as AUTH_TYPE_WRITE_TYPE_NAMES,
)
from backend.common.consts.model_type import ModelType
from backend.common.environment import Environment
from backend.common.helpers.event_helper import EventHelper
from backend.common.helpers.match_helper import MatchHelper
from backend.common.helpers.season_helper import SeasonHelper
from backend.common.sitevars.notifications_enable import NotificationsEnable
from backend.web.decorators import enforce_login, require_login, require_login_only
from backend.web.redirect import is_safe_url, safe_next_redirect
blueprint = Blueprint("account", __name__, url_prefix="/account")
@blueprint.route("")
@require_login
def overview() -> str:
template_values = {
"status": session.pop("account_status", None),
"webhook_verification_success": request.args.get(
"webhook_verification_success"
),
"ping_sent": request.args.get("ping_sent"),
"ping_enabled": NotificationsEnable.notifications_enabled(),
"auth_write_type_names": AUTH_TYPE_WRITE_TYPE_NAMES,
}
return render_template("account_overview.html", **template_values)
@blueprint.route("/register", methods=["GET", "POST"])
@require_login_only
def register() -> Response:
response = safe_next_redirect(url_for("account.overview"))
user = none_throws(current_user())
# Redirects if already registered
if user.is_registered:
return response
if request.method == "POST":
error_response = redirect("/")
account_id = request.form.get("account_id")
if not account_id or not account_id == user.uid:
return error_response
display_name = request.form.get("display_name")
if not display_name:
return error_response
user.register(display_name)
return response
else:
next = request.args.get("next")
# Make sure `next` is safe - otherwise drop it
next = next if is_safe_url(next) else None
return make_response(render_template("account_register.html", next=next))
@blueprint.route("/edit", methods=["GET", "POST"])
@require_login
def edit() -> Response:
if request.method == "POST":
error_response = redirect(url_for("account.edit"))
user = none_throws(current_user())
account_id = request.form.get("account_id")
if not account_id or not account_id == user.uid:
session["account_edit_status"] = "account_edit_failure"
return error_response
display_name = request.form.get("display_name")
if not display_name:
session["account_edit_status"] = "account_edit_failure_name"
return error_response
user.update_display_name(display_name)
_set_account_status("account_edit_success")
return redirect(url_for("account.overview"))
return make_response(
render_template(
"account_edit.html", status=session.pop("account_edit_status", None)
)
)
@blueprint.route("/login", methods=["GET", "POST"])
def login() -> Response:
if request.method == "POST":
id_token = request.form.get("id_token")
if not id_token:
abort(400)
expires_in = datetime.timedelta(days=5)
response = jsonify({"status": "success"})
create_session_cookie(id_token, expires_in)
return response
else:
if current_user():
return redirect(url_for("account.overview"))
auth_emulator_host = Environment.auth_emulator_host()
return make_response(
render_template(
"account_login_required.html",
next=next,
auth_emulator_host=auth_emulator_host,
)
)
@blueprint.route("/logout")
@require_login_only
def logout() -> Response:
revoke_session_cookie()
return safe_next_redirect("/")
@blueprint.route("/api/read_key_add", methods=["POST"])
@enforce_login
def read_key_add() -> Response:
response = redirect(url_for("account.overview"))
description = request.form.get("description")
if not description:
_set_account_status("read_key_add_no_description")
return response
user = none_throws(current_user())
try:
user.add_api_read_key(description)
except Exception:
_set_account_status("read_key_add_failure")
return response
_set_account_status("read_key_add_success")
return response
@blueprint.route("/api/read_key_delete", methods=["POST"])
@enforce_login
def read_key_delete() -> Response:
response = redirect(url_for("account.overview"))
def _set_read_key_failure():
_set_account_status("read_key_delete_failure")
key_id = request.form.get("key_id")
if not key_id:
_set_read_key_failure()
return response
user = none_throws(current_user())
api_key = user.api_read_key(key_id)
if not api_key:
_set_read_key_failure()
return response
user.delete_api_key(api_key)
_set_account_status("read_key_delete_success")
return response
def _set_account_status(status: str) -> None:
session["account_status"] = status
@blueprint.route("/mytba")
@require_login
def mytba() -> str:
user = none_throws(current_user())
mytba = user.myTBA
mytba_events = EventHelper.sorted_events(mytba.events)
mytba_teams = sorted(mytba.teams, key=lambda team: team.team_number)
mytba_event_matches = mytba.event_matches
mytba_event_matches_events = EventHelper.sorted_events(
[event_key.get() for event_key in mytba_event_matches.keys()]
)
event_matches = [
(event, MatchHelper.natural_sorted_matches(mytba_event_matches[event.key]))
for event in mytba_event_matches_events
]
template_values = {
"event_fav_sub": [
(
event,
mytba.favorite(ModelType.EVENT, none_throws(event.key.string_id())),
mytba.subscription(ModelType.EVENT, none_throws(event.key.string_id())),
)
for event in mytba_events
],
"team_fav_sub": [
(
team,
mytba.favorite(ModelType.TEAM, none_throws(team.key.string_id())),
mytba.subscription(ModelType.TEAM, none_throws(team.key.string_id())),
)
for team in mytba_teams
],
"event_match_fav_sub": [
(
event,
[
(
match,
mytba.favorite(
ModelType.MATCH, none_throws(match.key.string_id())
),
mytba.subscription(
ModelType.MATCH, none_throws(match.key.string_id())
),
)
for match in matches
],
)
for (event, matches) in event_matches
],
# "status": request.get('status'),
"year": SeasonHelper.effective_season_year(),
}
return render_template("mytba.html", **template_values)
# class myTBAAddHotMatchesController(LoggedInHandler):
# def get(self, event_key=None):
# self._require_registration()
#
# if event_key is None:
# events = EventHelper.getEventsWithinADay()
# EventHelper.sorted_events(events)
# self.template_values['events'] = events
# self.response.out.write(jinja2_engine.render('mytba_add_hot_matches_base.html', self.template_values))
# return
#
# event = Event.get_by_id(event_key)
# if not event:
# self.abort(404)
#
# subscriptions_future = Subscription.query(
# Subscription.model_type==ModelType.MATCH,
# Subscription.notification_types==NotificationType.UPCOMING_MATCH,
# ancestor=self.user_bundle.account.key).fetch_async(projection=[Subscription.model_key])
#
# matches = []
# if event.details and event.details.predictions and event.details.predictions['match_predictions']:
# match_predictions = dict(
# event.details.predictions['match_predictions']['qual'].items() +
# event.details.predictions['match_predictions']['playoff'].items())
# max_hotness = 0
# min_hotness = float('inf')
# for match in event.matches:
# if not match.has_been_played and match.key.id() in match_predictions:
# prediction = match_predictions[match.key.id()]
# red_score = prediction['red']['score']
# blue_score = prediction['blue']['score']
# if red_score > blue_score:
# winner_score = red_score
# loser_score = blue_score
# else:
# winner_score = blue_score
# loser_score = red_score
#
# hotness = winner_score + 2.0*loser_score # Favor close high scoring matches
#
# max_hotness = max(max_hotness, hotness)
# min_hotness = min(min_hotness, hotness)
# match.hotness = hotness
# matches.append(match)
#
# existing_subscriptions = set()
# for sub in subscriptions_future.get_result():
# existing_subscriptions.add(sub.model_key)
#
# hot_matches = []
# for match in matches:
# match.hotness = 100 * (match.hotness - min_hotness) / (max_hotness - min_hotness)
# match.already_subscribed = match.key.id() in existing_subscriptions
# hot_matches.append(match)
# hot_matches = sorted(hot_matches, key=lambda match: -match.hotness)
# matches_dict = {'qm': hot_matches[:25]}
#
# self.template_values['event'] = event
# self.template_values['matches'] = matches_dict
#
# self.response.out.write(jinja2_engine.render('mytba_add_hot_matches.html', self.template_values))
#
# def post(self, event_key):
# self._require_registration()
#
# current_user_id = self.user_bundle.account.key.id()
#
# event = Event.get_by_id(event_key)
# subscribed_matches = set(self.request.get_all('subscribed_matches'))
#
# for match in event.matches:
# if not match.has_been_played:
# match_key = match.key.id()
# if match.key.id() in subscribed_matches:
# sub = Subscription(
# parent=ndb.Key(Account, current_user_id),
# user_id=current_user_id,
# model_type=ModelType.MATCH,
# model_key=match_key,
# notification_types=[NotificationType.UPCOMING_MATCH]
# )
# MyTBAHelper.add_subscription(sub)
# else:
# MyTBAHelper.remove_subscription(current_user_id, match_key, ModelType.MATCH)
#
# self.redirect('/account/mytba?status=match_updated#my-matches'.format(event_key))
#
#
# class MyTBAEventController(LoggedInHandler):
# def get(self, event_key):
# self._require_registration()
#
# # Handle wildcard for all events in a year
# event = None
# is_wildcard = False
# if event_key.endswith('*'):
# try:
# year = int(event_key[:-1])
# except:
# year = None
# if year and year in tba_config.VALID_YEARS:
# event = Event( # fake event for rendering
# name="ALL {} EVENTS".format(year),
# year=year,
# )
# is_wildcard = True
# else:
# event = Event.get_by_id(event_key)
#
# if not event:
# self.abort(404)
#
# user = self.user_bundle.account.key
# favorite = Favorite.query(Favorite.model_key==event_key, Favorite.model_type==ModelType.EVENT, ancestor=user).get()
# subscription = Subscription.query(Favorite.model_key==event_key, Favorite.model_type==ModelType.EVENT, ancestor=user).get()
#
# if not favorite and not subscription: # New entry; default to being a favorite
# is_favorite = True
# else:
# is_favorite = favorite is not None
#
# enabled_notifications = [(en, NotificationType.render_names[en]) for en in NotificationType.enabled_event_notifications]
#
# self.template_values['event'] = event
# self.template_values['is_wildcard'] = is_wildcard
# self.template_values['is_favorite'] = is_favorite
# self.template_values['subscription'] = subscription
# self.template_values['enabled_notifications'] = enabled_notifications
#
# self.response.out.write(jinja2_engine.render('mytba_event.html', self.template_values))
#
# def post(self, event_key):
# self._require_registration()
#
# current_user_id = self.user_bundle.account.key.id()
#
# if self.request.get('favorite'):
# favorite = Favorite(
# parent=ndb.Key(Account, current_user_id),
# user_id=current_user_id,
# model_type=ModelType.EVENT,
# model_key=event_key
# )
# MyTBAHelper.add_favorite(favorite)
# else:
# MyTBAHelper.remove_favorite(current_user_id, event_key, ModelType.EVENT)
#
# subs = self.request.get_all('notification_types')
# if subs:
# subscription = Subscription(
# parent=ndb.Key(Account, current_user_id),
# user_id=current_user_id,
# model_type=ModelType.EVENT,
# model_key=event_key,
# notification_types=[int(s) for s in subs]
# )
# MyTBAHelper.add_subscription(subscription)
# else:
# MyTBAHelper.remove_subscription(current_user_id, event_key, ModelType.EVENT)
#
# self.redirect('/account/mytba?status=event_updated#my-events')
#
#
# class MyTBAMatchController(LoggedInHandler):
# def get(self, match_key):
# self._require_registration()
#
# match = Match.get_by_id(match_key)
#
# if not match:
# self.abort(404)
#
# user = self.user_bundle.account.key
# favorite = Favorite.query(Favorite.model_key==match_key, Favorite.model_type==ModelType.MATCH, ancestor=user).get()
# subscription = Subscription.query(Favorite.model_key==match_key, Favorite.model_type==ModelType.MATCH, ancestor=user).get()
#
# if not favorite and not subscription: # New entry; default to being a favorite
# is_favorite = True
# else:
# is_favorite = favorite is not None
#
# enabled_notifications = [(en, NotificationType.render_names[en]) for en in NotificationType.enabled_match_notifications]
#
# self.template_values['match'] = match
# self.template_values['is_favorite'] = is_favorite
# self.template_values['subscription'] = subscription
# self.template_values['enabled_notifications'] = enabled_notifications
#
# self.response.out.write(jinja2_engine.render('mytba_match.html', self.template_values))
#
# def post(self, match_key):
# self._require_registration()
#
# current_user_id = self.user_bundle.account.key.id()
# match = Match.get_by_id(match_key)
#
# if self.request.get('favorite'):
# favorite = Favorite(
# parent=ndb.Key(Account, current_user_id),
# user_id=current_user_id,
# model_type=ModelType.MATCH,
# model_key=match_key
# )
# MyTBAHelper.add_favorite(favorite)
# else:
# MyTBAHelper.remove_favorite(current_user_id, match_key, ModelType.MATCH)
#
# subs = self.request.get_all('notification_types')
# if subs:
# subscription = Subscription(
# parent=ndb.Key(Account, current_user_id),
# user_id=current_user_id,
# model_type=ModelType.MATCH,
# model_key=match_key,
# notification_types=[int(s) for s in subs]
# )
# MyTBAHelper.add_subscription(subscription)
# else:
# MyTBAHelper.remove_subscription(current_user_id, match_key, ModelType.MATCH)
#
# self.redirect('/account/mytba?status=match_updated#my-matches')
#
#
# class MyTBATeamController(LoggedInHandler):
# def get(self, team_number):
# self._require_registration()
#
# team_key = 'frc{}'.format(team_number)
# team = Team.get_by_id(team_key)
#
# if not team:
# self.abort(404)
#
# user = self.user_bundle.account.key
# favorite = Favorite.query(Favorite.model_key==team_key, Favorite.model_type==ModelType.TEAM, ancestor=user).get()
# subscription = Subscription.query(Favorite.model_key==team_key, Favorite.model_type==ModelType.TEAM, ancestor=user).get()
#
# if not favorite and not subscription: # New entry; default to being a favorite
# is_favorite = True
# else:
# is_favorite = favorite is not None
#
# enabled_notifications = [(en, NotificationType.render_names[en]) for en in NotificationType.enabled_team_notifications]
#
# self.template_values['team'] = team
# self.template_values['is_favorite'] = is_favorite
# self.template_values['subscription'] = subscription
# self.template_values['enabled_notifications'] = enabled_notifications
#
# self.response.out.write(jinja2_engine.render('mytba_team.html', self.template_values))
#
# def post(self, team_number):
# self._require_registration()
#
# current_user_id = self.user_bundle.account.key.id()
# team_key = 'frc{}'.format(team_number)
#
# if self.request.get('favorite'):
# favorite = Favorite(
# parent=ndb.Key(Account, current_user_id),
# user_id=current_user_id,
# model_type=ModelType.TEAM,
# model_key=team_key
# )
# MyTBAHelper.add_favorite(favorite)
# else:
# MyTBAHelper.remove_favorite(current_user_id, team_key, ModelType.TEAM)
#
# subs = self.request.get_all('notification_types')
# if subs:
# subscription = Subscription(
# parent=ndb.Key(Account, current_user_id),
# user_id=current_user_id,
# model_type=ModelType.TEAM,
# model_key=team_key,
# notification_types=[int(s) for s in subs]
# )
# MyTBAHelper.add_subscription(subscription)
# else:
# MyTBAHelper.remove_subscription(current_user_id, team_key, ModelType.TEAM)
#
# self.redirect('/account/mytba?status=team_updated#my-teams')
|
|
import os
import re
import sys
import json
import time
import cutil
import urllib
import logging
import requests
from PIL import Image # pip install pillow
from io import BytesIO
from parsel import Selector
from bs4 import BeautifulSoup
from web_wrapper.selenium_utils import SeleniumHTTPError
logger = logging.getLogger(__name__)
"""
Things to add:
- (selenium) Scroll to load page
- (selenium) check if elem is on view and clickable
- (requests) screenshot
"""
class Web:
"""
Web related functions
Need to be on its own that way each profile can have its own instance of it for proxy support
"""
def __init__(self, headers={}, cookies={}, proxy=None, **driver_args):
self.scraper = None
self.driver = None
self.driver_args = driver_args
self.current_proxy = proxy
# Number of times to re-try a url
self._num_retries = 3
if headers is not None:
self.current_headers = headers
else:
self.current_headers = {}
if cookies is not None:
self.current_cookies = cookies
else:
self.current_cookies = {}
# Set the default response values
self._reset_response()
def _reset_response(self):
"""
Vars to track per request made
Run before ever get_site to clear previous values
"""
self.status_code = None
self.url = None
self.response = None
def get_image_dimension(self, url):
"""
Return a tuple that contains (width, height)
Pass in a url to an image and find out its size without loading the whole file
If the image wxh could not be found, the tuple will contain `None` values
"""
w_h = (None, None)
try:
if url.startswith('//'):
url = 'http:' + url
data = requests.get(url).content
im = Image.open(BytesIO(data))
w_h = im.size
except Exception:
logger.warning("Error getting image size {}".format(url), exc_info=True)
return w_h
def get_soup(self, raw_content, input_type='html'):
rdata = None
if input_type == 'html':
rdata = BeautifulSoup(raw_content, 'html.parser') # Other option: html5lib
elif input_type == 'xml':
rdata = BeautifulSoup(raw_content, 'lxml')
return rdata
def screenshot(self, save_path, element=None, delay=0):
"""
This can be used no matter what driver that is being used
* ^ Soon requests support will be added
Save screenshot to local dir with uuid as filename
then move the file to `filename` (path must be part of the file name)
Return the filepath of the image
"""
if save_path is None:
logger.error("save_path cannot be None")
return None
save_location = cutil.norm_path(save_path)
cutil.create_path(save_location)
logger.info("Taking screenshot: {filename}".format(filename=save_location))
if not self.driver_type.startswith('selenium'):
logger.debug("Create tmp phantomjs web driver for screenshot")
# Create a tmp phantom driver to take the screenshot for us
from web_wrapper import DriverSeleniumPhantomJS
headers = self.get_headers() # Get headers to pass to the driver
proxy = self.get_proxy() # Get the current proxy being used if any
# TODO: ^ Do the same thing for cookies
screenshot_web = DriverSeleniumPhantomJS(headers=headers, proxy=proxy)
screenshot_web.get_site(self.url, page_format='raw')
screenshot_driver = screenshot_web.driver
else:
screenshot_driver = self.driver
# If a background color does need to be set
# self.driver.execute_script('document.body.style.background = "{}"'.format('white'))
# Take screenshot
# Give the page some extra time to load
time.sleep(delay)
if self.driver_type == 'selenium_chrome':
# Need to do this for chrome to get a fullpage screenshot
self.chrome_fullpage_screenshot(save_location, delay)
else:
screenshot_driver.get_screenshot_as_file(save_location)
# Use .png extenstion for users save file
if not save_location.endswith('.png'):
save_location += '.png'
# If an element was passed, just get that element so crop the screenshot
if element is not None:
logger.debug("Crop screenshot")
# Crop the image
el_location = element.location
el_size = element.size
try:
cutil.crop_image(save_location,
output_file=save_location,
width=int(el_size['width']),
height=int(el_size['height']),
x=int(el_location['x']),
y=int(el_location['y']),
)
except Exception as e:
raise e.with_traceback(sys.exc_info()[2])
if not self.driver_type.startswith('selenium'):
# Quit the tmp driver created to take the screenshot
screenshot_web.quit()
return save_location
def new_proxy(self):
raise NotImplementedError
def new_headers(self):
raise NotImplementedError
def _try_new_proxy(self):
try:
new_proxy = self.new_proxy()
self.set_proxy(new_proxy)
except NotImplementedError:
logger.warning("No function new_proxy() found, not changing proxy")
except Exception:
logger.exception("Something went wrong when getting a new proxy")
def _try_new_headers(self):
try:
new_headers = self.new_headers()
self.set_headers(new_headers)
except NotImplementedError:
logger.warning("No function new_headers() found, not changing headers")
except Exception:
logger.exception("Something went wrong when getting a new header")
def new_profile(self):
logger.info("Create a new profile to use")
self._try_new_proxy()
self._try_new_headers()
###########################################################################
# Get/load page
###########################################################################
def get_site(self, url, cookies={}, page_format='html', return_on_error=[], retry_enabled=True,
num_tries=0, num_apikey_tries=0, headers={}, api=False, track_stat=True, timeout=30,
force_requests=False, driver_args=(), driver_kwargs={}, parser='beautifulsoup',
custom_source_checks=[]):
"""
headers & cookies - Will update to the current headers/cookies and just be for this request
driver_args & driver_kwargs - Gets passed and expanded out to the driver
"""
self._reset_response()
num_tries += 1
# Save args and kwargs so they can be used for trying the function again
tmp_args = locals().copy()
get_site_args = [tmp_args['url']]
# Remove keys that dont belong to the keywords passed in
del tmp_args['url']
del tmp_args['self']
get_site_kwargs = tmp_args
# Check driver_kwargs for anything that we already set
kwargs_cannot_be = ['headers', 'cookies', 'timeout']
for key_name in kwargs_cannot_be:
if driver_kwargs.get(key_name) is not None:
del driver_kwargs[key_name]
logger.warning("Cannot pass `{key}` in driver_kwargs to get_site(). `{key}` is already set by default"
.format(key=key_name))
# Check if a url is being passed in
if url is None:
logger.error("Url cannot be None")
return None
##
# url must start with http....
##
prepend = ''
if url.startswith('//'):
prepend = 'http:'
elif not url.startswith('http'):
prepend = 'http://'
url = prepend + url
##
# Try and get the page
##
rdata = None
try:
source_text = self._get_site(url, headers, cookies, timeout, driver_args, driver_kwargs)
if custom_source_checks:
# Check if there are any custom check to run
for re_text, status_code in custom_source_checks:
if re.search(re_text, source_text):
if self.response is None:
# This is needed when using selenium and we still need to pass in the 'response'
self.response = type('', (), {})()
self.response.status_code = status_code
self.status_code = status_code
raise requests.exceptions.HTTPError("Custom matched status code", response=self.response)
rdata = self.parse_source(source_text, page_format, parser)
##
# Exceptions from Selenium
##
# Nothing yet
##
# Exceptions from Requests
##
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as e:
"""
Try again with a new profile (do not get new apikey)
Wait n seconds before trying again
"""
e_name = type(e).__name__
if num_tries < self._num_retries and retry_enabled is True:
logger.info("{} [get_site]: try #{} on {} Error {}".format(e_name, num_tries, url, e))
time.sleep(2)
self.new_profile()
return self.get_site(*get_site_args, **get_site_kwargs)
else:
logger.error("{} [get_site]: try #{} on{}".format(e_name, num_tries, url))
except requests.exceptions.TooManyRedirects as e:
logger.exception("TooManyRedirects [get_site]: {}".format(url))
##
# Exceptions shared by Selenium and Requests
##
except (requests.exceptions.HTTPError, SeleniumHTTPError) as e:
"""
Check the status code returned to see what should be done
"""
status_code = str(e.response.status_code)
# If the client wants to handle the error send it to them
if int(status_code) in return_on_error:
raise e.with_traceback(sys.exc_info()[2])
try_again = self._get_site_status_code(url, status_code, api, num_tries, num_apikey_tries)
if try_again is True and retry_enabled is True:
# If True then try request again
return self.get_site(*get_site_args, **get_site_kwargs)
# Every other exceptions that were not caught
except Exception:
logger.exception("Unknown Exception [get_site]: {url}".format(url=url))
return rdata
def _get_site_status_code(self, url, status_code, api, num_tries, num_apikey_tries):
"""
Check the http status code and num_tries/num_apikey_tries to see if it should try again or not
Log any data as needed
"""
# Make status code an int
try:
status_code = int(status_code)
except ValueError:
logger.exception("Incorrect status code passed in")
return None
# TODO: Try with the same api key 3 times, then try with with a new apikey the same way for 3 times as well
# try_profile_again = False
# if api is True and num_apikey_tries < self._num_retries:
# # Try with the same apikey/profile again after a short wait
# try_profile_again = True
# Retry for any status code in the 400's or greater
if status_code >= 400 and num_tries < self._num_retries:
# Fail after 3 tries
logger.info("HTTP error, try #{}, Status: {} on url: {}".format(num_tries, status_code, url),
extra={'status_code': status_code,
'num_tries': num_tries,
'url': url})
time.sleep(.5)
self.new_profile()
return True
else:
logger.warning("HTTPError [get_site]\n\t# of Tries: {}\n\tCode: {} - {}"
.format(num_tries, status_code, url),
extra={'status_code': status_code,
'num_tries': num_tries,
'url': url})
return None
def parse_source(self, source, page_format, parser):
rdata = None
if page_format == 'html':
if parser == 'beautifulsoup':
rdata = self.get_soup(source, input_type='html')
elif parser == 'parsel':
rdata = Selector(text=source)
else:
logger.error("No parser passed for parsing html")
elif page_format == 'json':
if self.driver_type == 'requests':
rdata = json.loads(source)
else:
rdata = json.loads(self.driver.find_element_by_tag_name('body').text)
elif page_format == 'xml':
rdata = self.get_soup(source, input_type='xml')
elif page_format == 'raw':
# Return unparsed html
rdata = source
return rdata
def download(self, url, save_path, header={}, redownload=False):
"""
Currently does not use the proxied driver
TODO: Be able to use cookies just like headers is used here
:return: the path of the file that was saved
"""
if save_path is None:
logger.error("save_path cannot be None")
return None
# Get headers of current web driver
header = self.get_headers()
if len(header) > 0:
# Add more headers if needed
header.update(header)
logger.debug("Download {url} to {save_path}".format(url=url, save_path=save_path))
save_location = cutil.norm_path(save_path)
if redownload is False:
# See if we already have the file
if os.path.isfile(save_location):
logger.debug("File {save_location} already exists".format(save_location=save_location))
return save_location
# Create the dir path on disk
cutil.create_path(save_location)
if url.startswith('//'):
url = "http:" + url
try:
with urllib.request.urlopen(urllib.request.Request(url, headers=header)) as response,\
open(save_location, 'wb') as out_file:
data = response.read()
out_file.write(data)
except urllib.error.HTTPError as e:
save_location = None
# We do not need to show the user 404 errors
if e.code != 404:
logger.exception("Download Http Error {url}".format(url=url))
except Exception:
save_location = None
logger.exception("Download Error: {url}".format(url=url))
return save_location
|
|
# Weighted Constraint Satisfaction Problems -- MPE inference on MLNs
#
# (C) 2012 by Daniel Nyga (nyga@cs.tum.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from collections import defaultdict
from dnutils import logs
from .infer import Inference
from ..constants import infty, HARD
from ..errors import SatisfiabilityException, MRFValueException
from ..grounding.fastconj import FastConjunctionGrounding
from ..mrfvars import FuzzyVariable
from ..util import (combinations, dict_union, Interval, temporary_evidence)
from ...wcsp import Constraint, WCSP
from ...logic.common import Logic
logger = logs.getlogger(__name__)
class WCSPInference(Inference):
def __init__(self, mrf, queries, **params):
Inference.__init__(self, mrf, queries, **params)
def _run(self):
result_ = {}
with temporary_evidence(self.mrf):
self.converter = WCSPConverter(self.mrf, multicore=self.multicore, verbose=self.verbose)
result = self.result_dict(verbose=self.verbose)
for query in self.queries:
query = str(query)
result_[query] = result[query] if query in result else self.mrf[query]
return result_
def result_dict(self, verbose=False):
"""
Returns a Database object with the most probable truth assignment.
"""
wcsp = self.converter.convert()
solution, _ = wcsp.solve()
if solution is None:
raise Exception('MLN is unsatisfiable.')
result = {}
for varidx, validx in enumerate(solution):
value = self.converter.domains[varidx][validx]
result.update(self.converter.variables[varidx].value2dict(value))
return dict([(str(self.mrf.gndatom(idx)), val) for idx, val in result.items()])
class WCSPConverter(object):
"""
Class for converting an MLN into a WCSP problem for efficient
MPE inference.
"""
def __init__(self, mrf, verbose=False, multicore=False):
self.mrf = mrf
self.constraints = {} # mapping the signature of a constaint to its constraint object
self.verbose = verbose
self._createvars()
self.wcsp = WCSP()
self.wcsp.domsizes = [len(self.domains[i]) for i in self.variables]
self.multicore = multicore
def _createvars(self):
"""
Create the variables, one binary for each ground atom.
Considers also mutually exclusive blocks of ground atoms.
"""
self.variables = {} # maps an index to its MRF variable
self.domains = defaultdict(list) # maps a var index to a list of its MRF variable value tuples
self.atom2var = {} # maps ground atom indices to their variable index
self.val2idx = defaultdict(dict)
varidx = 0
for variable in self.mrf.variables:
if isinstance(variable, FuzzyVariable): # fuzzy variables are not subject to reasoning
continue
if variable.valuecount(self.mrf.evidence_dicti()) == 1: # the var is fully determined by the evidence
for _, value in variable.itervalues(self.mrf.evidence_dicti()):
break
self.mrf.set_evidence(variable.value2dict(value), erase=False)
continue
self.variables[varidx] = variable
for gndatom in variable.gndatoms:
self.atom2var[gndatom.idx] = varidx
for validx, (_, value) in enumerate(variable.itervalues(self.mrf.evidence_dicti())):
self.domains[varidx].append(value)
self.val2idx[varidx][value] = validx
varidx += 1
def convert(self):
"""
Performs a conversion from an MLN into a WCSP.
"""
# mln to be restored after inference
self._weights = list(self.mrf.mln.weights)
mln = self.mrf.mln
logic = mln.logic
# preprocess the formulas
formulas = []
for f in self.mrf.formulas:
if f.weight == 0:
continue
if f.weight < 0:
f = logic.negate(f)
f.weight = -f.weight
formulas.append(f.nnf())
# preprocess the ground formulas
# grounder = DefaultGroundingFactory(self.mrf, formulas=formulas, simplify=True, unsatfailure=True, multicore=self.multicore, verbose=self.verbose)
grounder = FastConjunctionGrounding(self.mrf, simplify=True, unsatfailure=True, formulas=formulas, multicore=self.multicore, verbose=self.verbose, cache=0)
for gf in grounder.itergroundings():
if isinstance(gf, Logic.TrueFalse):
if gf.weight == HARD and gf.truth() == 0:
raise SatisfiabilityException('MLN is unsatisfiable: hard constraint %s violated' % self.mrf.mln.formulas[gf.idx])
else:# formula is rendered true/false by the evidence -> equal in every possible world
continue
self.generate_constraint(gf)
self.mrf.mln.weights = self._weights
return self.wcsp
def generate_constraint(self, wf):
"""
Generates and adds a constraint from a given weighted formula.
"""
varindices = tuple([self.atom2var[x] for x in wf.gndatom_indices()])
seen = set()
varindices_ = []
for v in varindices:
if v in seen: continue
varindices_.append(v)
seen.add(v)
varindices = tuple(varindices_)
# collect the constraint tuples
cost2assignments = self._gather_constraint_tuples(varindices, wf)
if cost2assignments is None:
return
defcost = max(cost2assignments, key=lambda x: infty if cost2assignments[x] == 'else' else len(cost2assignments[x]))
del cost2assignments[defcost] # remove the default cost values
constraint = Constraint(varindices, defcost=defcost)
for cost, tuples in cost2assignments.items():
for t in tuples:
constraint.tuple(t, cost)
self.wcsp.constraint(constraint)
def _gather_constraint_tuples(self, varindices, formula):
"""
Collects and evaluates all tuples that belong to the constraint
given by a formula. In case of disjunctions and conjunctions,
this is fairly efficient since not all combinations
need to be evaluated. Returns a dictionary mapping the constraint
costs to the list of respective variable assignments.
"""
logic = self.mrf.mln.logic
# we can treat conjunctions and disjunctions fairly efficiently
defaultProcedure = False
conj = logic.islitconj(formula)
disj = False
if not conj:
disj = logic.isclause(formula)
if not varindices:
return None
if not conj and not disj:
defaultProcedure = True
if not defaultProcedure:
assignment = [None] * len(varindices)
children = list(formula.literals())
for gndlit in children:
# constants are handled in the maxtruth/mintruth calls below
if isinstance(gndlit, Logic.TrueFalse): continue
# get the value of the gndlit that renders the formula true (conj) or false (disj):
# for a conjunction, the literal must be true,
# for a disjunction, it must be false.
(gndatom, val) = (gndlit.gndatom, not gndlit.negated)
if disj: val = not val
val = 1 if val else 0
variable = self.variables[self.atom2var[gndatom.idx]]
# in case there are multiple values of a variable that may render the formula true
# we cannot apply this efficient implementation and have to fall back to the naive impl.
tmp_evidence = variable.value2dict(variable.evidence_value())
evval = tmp_evidence.get(gndatom.idx)
if evval is not None and evval != val:
# the supposed value of the variable and the evidence value mismatch,
# so the conjunction (disjunction) can never be rendered true (false)
return
tmp_evidence = dict_union(tmp_evidence, {gndatom.idx: val})
if variable.valuecount(tmp_evidence) > 1:
defaultProcedure = True
break
# there is only one value remaining
for _, value in variable.itervalues(tmp_evidence):
varidx = self.atom2var[gndatom.idx]
validx = self.val2idx[varidx][value]
# if there are two different values needed to render the formula true...
if assignment[varindices.index(varidx)] is not None and assignment[varindices.index(varidx)] != value:
if formula.weight == HARD:
if conj: # ...if it's a hard conjunction, the MLN is unsatisfiable -- e.g. foo(x) ^ !foo(x)
raise SatisfiabilityException('Knowledge base is unsatisfiable due to hard constraint violation: %s' % formula)
elif disj: # ...if it's a hard disjunction, it's a tautology -- e.g. foo(x) v !foo(x)
continue
else: # for soft constraints, unsatisfiable formulas and tautologies can be ignored
return None
assignment[varindices.index(varidx)] = validx
if not defaultProcedure:
maxtruth = formula.maxtruth(self.mrf.evidence)
mintruth = formula.mintruth(self.mrf.evidence)
if formula.weight == HARD and (maxtruth in Interval(']0,1[') or mintruth in Interval(']0,1[')):
raise MRFValueException('No fuzzy truth values are allowed in hard constraints.')
if conj:
if formula.weight == HARD:
cost = 0
defcost = self.wcsp.top
else:
cost = formula.weight * (1 - maxtruth)
defcost = formula.weight
else:
if formula.weight == HARD:
cost = self.wcsp.top
defcost = 0
else:
defcost = 0
cost = formula.weight * (1 - mintruth)
if len(assignment) != len(varindices):
raise MRFValueException('Illegal variable assignments. Variables: %s, Assignment: %s' % (varindices, assignment))
return {cost: [tuple(assignment)], defcost: 'else'}
if defaultProcedure:
# fallback: go through all combinations of truth assignments
domains = [self.domains[v] for v in varindices]
cost2assignments = defaultdict(list)
# compute number of worlds to be examined and print a warning
worlds = 1
for d in domains: worlds *= len(d)
if worlds > 1000000:
logger.warning('!!! WARNING: %d POSSIBLE WORLDS ARE GOING TO BE EVALUATED. KEEP IN SIGHT YOUR MEMORY CONSUMPTION !!!' % worlds)
for c in combinations(domains):
world = [0] * len(self.mrf.gndatoms)
assignment = []
for varidx, value in zip(varindices, c):
world = self.variables[varidx].setval(value, world)
assignment.append(self.val2idx[varidx][value])
# the MRF feature imposed by this formula
truth = formula(world)
if truth is None:
print('POSSIBLE WORLD:')
print('===============')
self.mrf.print_world_vars(world)
print('GROUND FORMULA:')
print('===============')
formula.print_structure(world)
raise Exception('Something went wrong: Truth of ground formula cannot be evaluated (see above)')
if truth in Interval(']0,1[') and formula.weight == HARD:
raise MRFValueException('No fuzzy truth values are allowed in hard constraints.')
if formula.weight == HARD:
if truth == 1:
cost = 0
else:
cost = self.wcsp.top
else:
cost = ((1 - truth) * formula.weight)
cost2assignments[cost].append(tuple(assignment))
return cost2assignments
assert False # unreachable
def forbid_gndatom(self, atom, truth=True):
"""
Adds a unary constraint that prohibits the given ground atom
being true.
"""
atomidx = atom if type(atom) is int else (self.mrf.gndatom(atom).idx if type(atom) is str else atom.idx)
varidx = self.atom2var[atomidx]
variable = self.variables[varidx]
evidence = list(self.mrf.evidence)
evidence[atomidx] = {True: 1, False: 0}[truth]
c = Constraint((varidx,))
for _, value in variable.itervalues(evidence):
validx = self.val2idx[varidx][value]
c.tuple((validx,), self.wcsp.top)
self.wcsp.constraint(c)
def getPseudoDistributionForGndAtom(self, gndAtom):
"""
Computes a relative "distribution" for all possible variable assignments of
a mutex constraint. This can be used to determine the confidence in particular
most probable world by comparing the score with the second-most probable one.
"""
if isinstance(gndAtom, str):
gndAtom = self.mrf.gndAtoms[gndAtom]
if not isinstance(gndAtom, Logic.GroundAtom):
raise Exception('Argument must be a ground atom')
varIdx = self.gndAtom2VarIndex[gndAtom]
valIndices = list(range(len(self.varIdx2GndAtom[varIdx])))
mutex = len(self.varIdx2GndAtom[varIdx]) > 1
if not mutex:
raise Exception("Pseudo distribution is provided for mutex constraints only.")
wcsp = self.convert()
atoms = []
cost = []
try:
while len(valIndices) > 0:
s, c = wcsp.solve()
if s is None: raise
val = s[varIdx]
atom = self.varIdx2GndAtom[varIdx][val]
self.forbidGndAtom(atom, wcsp)
valIndices.remove(val)
cost.append(c)
atoms.append(atom)
except: pass
c_max = max(cost)
for i, c in enumerate(cost):
cost[i] = c_max - c
c_sum = sum(cost)
for i, c in enumerate(cost):
cost[i] = float(c) / c_sum
return dict([(a,c) for a, c in zip(atoms, cost)])
# for debugging only
if __name__ == '__main__':
pass
# mln = MLN('/home/nyga/code/prac/models/experimental/deep_sense/priors.mln')
# db = Database(mln, '/home/nyga/code/prac/models/experimental/deep_sense/db/1.db')
# mrf = mln.groundMRF(db)
#
# conv = WCSPConverter(mrf)
# wcsp = conv.convert()
# wcsp.write(sys.stdout)
# solution = wcsp.solve()
# for i, s in enumerate(solution):
# print conv.varIdx2GndAtom[i][0], s,
# for ga in conv.varIdx2GndAtom[i]: print ga,
# print
|
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from django.core.urlresolvers import reverse
from django import http
from django.test.utils import override_settings
from django.utils import timezone
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard import usage
INDEX_URL = reverse('horizon:project:overview:index')
class UsageViewTests(test.TestCase):
def _stub_nova_api_calls(self, nova_stu_enabled=True):
self.mox.StubOutWithMock(api.nova, 'usage_get')
self.mox.StubOutWithMock(api.nova, 'tenant_absolute_limits')
self.mox.StubOutWithMock(api.nova, 'extension_supported')
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(nova_stu_enabled)
def _stub_cinder_api_calls(self):
self.mox.StubOutWithMock(api.cinder, 'tenant_absolute_limits')
api.cinder.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.cinder_limits['absolute'])
def _stub_neutron_api_calls(self, neutron_sg_enabled=True):
self.mox.StubOutWithMock(api.neutron, 'is_extension_supported')
self.mox.StubOutWithMock(api.network, 'tenant_floating_ip_list')
if neutron_sg_enabled:
self.mox.StubOutWithMock(api.network, 'security_group_list')
api.neutron.is_extension_supported(
IsA(http.HttpRequest),
'security-group').AndReturn(neutron_sg_enabled)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
if neutron_sg_enabled:
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.q_secgroups.list())
def test_usage(self):
self._test_usage(nova_stu_enabled=True)
def test_usage_disabled(self):
self._test_usage(nova_stu_enabled=False)
def _test_usage(self, nova_stu_enabled):
now = timezone.now()
usage_obj = api.nova.NovaUsage(self.usages.first())
self._stub_nova_api_calls(nova_stu_enabled)
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(nova_stu_enabled)
if nova_stu_enabled:
api.nova.usage_get(IsA(http.HttpRequest), self.tenant.id,
datetime.datetime(now.year,
now.month,
1, 0, 0, 0, 0),
datetime.datetime(now.year,
now.month,
now.day, 23, 59, 59, 0)) \
.AndReturn(usage_obj)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.limits['absolute'])
self._stub_neutron_api_calls()
self._stub_cinder_api_calls()
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:overview:index'))
usages = res.context['usage']
self.assertTemplateUsed(res, 'project/overview/usage.html')
self.assertTrue(isinstance(usages, usage.ProjectUsage))
self.assertEqual(nova_stu_enabled,
res.context['simple_tenant_usage_enabled'])
if nova_stu_enabled:
self.assertContains(res, 'form-inline')
else:
self.assertNotContains(res, 'form-inline')
self.assertEqual(usages.limits['maxTotalFloatingIps'], float("inf"))
def test_usage_nova_network(self):
self._test_usage_nova_network(nova_stu_enabled=True)
def test_usage_nova_network_disabled(self):
self._test_usage_nova_network(nova_stu_enabled=False)
def _test_usage_nova_network(self, nova_stu_enabled):
now = timezone.now()
usage_obj = api.nova.NovaUsage(self.usages.first())
self.mox.StubOutWithMock(api.base, 'is_service_enabled')
self._stub_nova_api_calls(nova_stu_enabled)
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(nova_stu_enabled)
if nova_stu_enabled:
api.nova.usage_get(IsA(http.HttpRequest), self.tenant.id,
datetime.datetime(now.year,
now.month,
1, 0, 0, 0, 0),
datetime.datetime(now.year,
now.month,
now.day, 23, 59, 59, 0)) \
.AndReturn(usage_obj)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.limits['absolute'])
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.MultipleTimes().AndReturn(False)
api.base.is_service_enabled(IsA(http.HttpRequest), 'volume') \
.MultipleTimes().AndReturn(False)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:overview:index'))
usages = res.context['usage']
self.assertTemplateUsed(res, 'project/overview/usage.html')
self.assertTrue(isinstance(usages, usage.ProjectUsage))
self.assertEqual(nova_stu_enabled,
res.context['simple_tenant_usage_enabled'])
if nova_stu_enabled:
self.assertContains(res, 'form-inline')
else:
self.assertNotContains(res, 'form-inline')
self.assertEqual(usages.limits['maxTotalFloatingIps'], 10)
def test_unauthorized(self):
exc = self.exceptions.nova_unauthorized
now = timezone.now()
self._stub_nova_api_calls()
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.usage_get(IsA(http.HttpRequest), self.tenant.id,
datetime.datetime(now.year,
now.month,
1, 0, 0, 0, 0),
datetime.datetime(now.year,
now.month,
now.day, 23, 59, 59, 0)) \
.AndRaise(exc)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest))\
.AndReturn(self.limits['absolute'])
self._stub_neutron_api_calls()
self._stub_cinder_api_calls()
self.mox.ReplayAll()
url = reverse('horizon:project:overview:index')
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/overview/usage.html')
self.assertMessageCount(res, error=1)
self.assertContains(res, 'Unauthorized:')
def test_usage_csv(self):
self._test_usage_csv(nova_stu_enabled=True)
def test_usage_csv_disabled(self):
self._test_usage_csv(nova_stu_enabled=False)
def _test_usage_csv(self, nova_stu_enabled=True):
now = timezone.now()
usage_obj = api.nova.NovaUsage(self.usages.first())
self._stub_nova_api_calls(nova_stu_enabled)
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(nova_stu_enabled)
start = datetime.datetime(now.year, now.month, 1, 0, 0, 0, 0)
end = datetime.datetime(now.year, now.month, now.day, 23, 59, 59, 0)
if nova_stu_enabled:
api.nova.usage_get(IsA(http.HttpRequest),
self.tenant.id,
start, end).AndReturn(usage_obj)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest))\
.AndReturn(self.limits['absolute'])
self._stub_neutron_api_calls()
self._stub_cinder_api_calls()
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:overview:index') +
"?format=csv")
self.assertTemplateUsed(res, 'project/overview/usage.csv')
self.assertTrue(isinstance(res.context['usage'], usage.ProjectUsage))
def test_usage_exception_usage(self):
now = timezone.now()
start = datetime.datetime(now.year, now.month, 1, 0, 0, 0, 0)
end = datetime.datetime(now.year, now.month, now.day, 23, 59, 59, 0)
self._stub_nova_api_calls()
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.usage_get(IsA(http.HttpRequest),
self.tenant.id,
start, end).AndRaise(self.exceptions.nova)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest))\
.AndReturn(self.limits['absolute'])
self._stub_neutron_api_calls()
self._stub_cinder_api_calls()
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:overview:index'))
self.assertTemplateUsed(res, 'project/overview/usage.html')
self.assertEqual(res.context['usage'].usage_list, [])
def test_usage_exception_quota(self):
now = timezone.now()
usage_obj = api.nova.NovaUsage(self.usages.first())
self._stub_nova_api_calls()
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(True)
start = datetime.datetime(now.year, now.month, 1, 0, 0, 0, 0)
end = datetime.datetime(now.year, now.month, now.day, 23, 59, 59, 0)
api.nova.usage_get(IsA(http.HttpRequest),
self.tenant.id,
start, end).AndReturn(usage_obj)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest))\
.AndRaise(self.exceptions.nova)
self._stub_neutron_api_calls()
self._stub_cinder_api_calls()
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:overview:index'))
self.assertTemplateUsed(res, 'project/overview/usage.html')
self.assertEqual(res.context['usage'].quotas, {})
def test_usage_default_tenant(self):
now = timezone.now()
usage_obj = api.nova.NovaUsage(self.usages.first())
self._stub_nova_api_calls()
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(True)
start = datetime.datetime(now.year, now.month, 1, 0, 0, 0, 0)
end = datetime.datetime(now.year, now.month, now.day, 23, 59, 59, 0)
api.nova.usage_get(IsA(http.HttpRequest),
self.tenant.id,
start, end).AndReturn(usage_obj)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest))\
.AndReturn(self.limits['absolute'])
self._stub_neutron_api_calls()
self._stub_cinder_api_calls()
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:overview:index'))
self.assertTemplateUsed(res, 'project/overview/usage.html')
self.assertTrue(isinstance(res.context['usage'], usage.ProjectUsage))
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_usage_with_neutron(self):
self._test_usage_with_neutron(neutron_sg_enabled=True)
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_usage_with_neutron_nova_security_group(self):
self._test_usage_with_neutron(neutron_sg_enabled=False)
def _test_usage_with_neutron_prepare(self):
now = timezone.now()
usage_obj = api.nova.NovaUsage(self.usages.first())
self._stub_nova_api_calls()
self._stub_cinder_api_calls()
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(True)
self.mox.StubOutWithMock(api.neutron, 'tenant_quota_get')
self.mox.StubOutWithMock(api.neutron, 'is_extension_supported')
self.mox.StubOutWithMock(api.network, 'tenant_floating_ip_list')
self.mox.StubOutWithMock(api.network, 'security_group_list')
start = datetime.datetime(now.year, now.month, 1, 0, 0, 0, 0)
end = datetime.datetime(now.year, now.month, now.day, 23, 59, 59, 0)
api.nova.usage_get(IsA(http.HttpRequest),
self.tenant.id,
start, end).AndReturn(usage_obj)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest))\
.AndReturn(self.limits['absolute'])
def _test_usage_with_neutron(self, neutron_sg_enabled=True):
self._test_usage_with_neutron_prepare()
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'quotas').AndReturn(True)
api.neutron.is_extension_supported(
IsA(http.HttpRequest),
'security-group').AndReturn(neutron_sg_enabled)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
if neutron_sg_enabled:
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.q_secgroups.list())
api.neutron.tenant_quota_get(IsA(http.HttpRequest), self.tenant.id) \
.AndReturn(self.neutron_quotas.first())
self.mox.ReplayAll()
self._test_usage_with_neutron_check(neutron_sg_enabled)
def _test_usage_with_neutron_check(self, neutron_sg_enabled=True,
max_fip_expected=50,
max_sg_expected=20):
res = self.client.get(reverse('horizon:project:overview:index'))
self.assertContains(res, 'Floating IPs')
self.assertContains(res, 'Security Groups')
res_limits = res.context['usage'].limits
# Make sure the floating IPs comes from Neutron (50 vs. 10)
max_floating_ips = res_limits['maxTotalFloatingIps']
self.assertEqual(max_floating_ips, max_fip_expected)
if neutron_sg_enabled:
# Make sure the security group limit comes from Neutron (20 vs. 10)
max_security_groups = res_limits['maxSecurityGroups']
self.assertEqual(max_security_groups, max_sg_expected)
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_usage_with_neutron_quotas_ext_error(self):
self._test_usage_with_neutron_prepare()
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'quotas').AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
self._test_usage_with_neutron_check(max_fip_expected=float("inf"),
max_sg_expected=float("inf"))
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_usage_with_neutron_sg_ext_error(self):
self._test_usage_with_neutron_prepare()
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'quotas').AndReturn(True)
api.neutron.is_extension_supported(
IsA(http.HttpRequest),
'security-group').AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
self._test_usage_with_neutron_check(max_fip_expected=float("inf"),
max_sg_expected=float("inf"))
def test_usage_with_cinder(self):
self._test_usage_cinder(cinder_enabled=True)
def test_usage_without_cinder(self):
self._test_usage_cinder(cinder_enabled=False)
def _test_usage_cinder(self, cinder_enabled):
now = timezone.now()
usage_obj = api.nova.NovaUsage(self.usages.first())
self.mox.StubOutWithMock(api.base, 'is_service_enabled')
self._stub_nova_api_calls(True)
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(True)
start = datetime.datetime(now.year, now.month, 1, 0, 0, 0, 0)
end = datetime.datetime(now.year, now.month, now.day, 23, 59, 59, 0)
api.nova.usage_get(IsA(http.HttpRequest),
self.tenant.id,
start, end).AndReturn(usage_obj)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.limits['absolute'])
if cinder_enabled:
self._stub_cinder_api_calls()
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.MultipleTimes().AndReturn(False)
api.base.is_service_enabled(IsA(http.HttpRequest), 'volume') \
.MultipleTimes().AndReturn(cinder_enabled)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:overview:index'))
usages = res.context['usage']
self.assertTemplateUsed(res, 'project/overview/usage.html')
self.assertTrue(isinstance(usages, usage.ProjectUsage))
if cinder_enabled:
self.assertEqual(usages.limits['totalVolumesUsed'], 1)
self.assertEqual(usages.limits['maxTotalVolumes'], 10)
self.assertEqual(usages.limits['totalGigabytesUsed'], 5)
self.assertEqual(usages.limits['maxTotalVolumeGigabytes'], 1000)
else:
self.assertNotIn('totalVolumesUsed', usages.limits)
|
|
# ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# @author: Li Li (lili@bnl.gov) #
# created on 07/16/2014 #
# #
# Original code: #
# @author: Mirna Lerotic, 2nd Look Consulting #
# http://www.2ndlookconsulting.com/ #
# Copyright (c) 2013, Stefan Vogt, Argonne National Laboratory #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
from __future__ import absolute_import, division, print_function
import scipy.signal
import numpy as np
_defaults = {'con_val_no_bin': 3,
'con_val_bin': 5,
'iter_num_no_bin': 3,
'iter_num_bin': 5}
def snip_method(spectrum,
e_off, e_lin, e_quad,
xmin=0, xmax=4096, epsilon=2.96,
width=0.5, decrease_factor=np.sqrt(2),
spectral_binning=None,
con_val=None,
iter_num=None,
width_threshold=0.5):
"""
use snip algorithm to obtain background
Parameters
----------
spectrum : array
intensity spectrum
e_off : float
energy calibration, such as e_off + e_lin * energy + e_quad * energy^2
e_lin : float
energy calibration, such as e_off + e_lin * energy + e_quad * energy^2
e_quad : float
energy calibration, such as e_off + e_lin * energy + e_quad * energy^2
xmin : float, optional
smallest index to define the range
xmax : float, optional
largest index to define the range
epsilon : float, optional
energy to create a hole-electron pair
for Ge 2.96, for Si 3.61 at 300K
needs to double check this value
width : int, optional
window size to adjust how much to shift background
decrease_factor : float, optional
gradually decrease of window size, default as sqrt(2)
spectral_binning : float, optional
bin the data into different size
con_val : int, optional
size of scipy.signal.boxcar to convolve the spectrum.
Default value is controlled by the keys `con_val_no_bin`
and `con_val_bin` in the defaults dictionary, depending
on if spectral_binning is used or not
iter_num : int, optional
Number of iterations.
Default value is controlled by the keys `iter_num_no_bin`
and `iter_num_bin` in the defaults dictionary, depending
on if spectral_binning is used or not
width_threshold : float, optional
stop point of the algorithm
Returns
-------
background : array
output results with peak removed
References
----------
.. [1] C.G. Ryan etc, "SNIP, a statistics-sensitive background
treatment for the quantitative analysis of PIXE spectra in
geoscience applications", Nuclear Instruments and Methods in
Physics Research Section B, vol. 34, 1998.
"""
# clean input a bit
if con_val is None:
if spectral_binning is None:
con_val = _defaults['con_val_no_bin']
else:
con_val = _defaults['con_val_bin']
if iter_num is None:
if spectral_binning is None:
iter_num = _defaults['iter_num_no_bin']
else:
iter_num = _defaults['iter_num_bin']
background = np.array(spectrum)
n_background = background.size
energy = np.arange(n_background, dtype=np.float)
if spectral_binning is not None:
energy = energy * spectral_binning
energy = e_off + energy * e_lin + energy**2 * e_quad
# transfer from std to fwhm
std_fwhm = 2 * np.sqrt(2 * np.log(2))
tmp = (e_off / std_fwhm)**2 + energy * epsilon * e_lin
tmp[tmp < 0] = 0
fwhm = std_fwhm * np.sqrt(tmp)
# smooth the background
s = scipy.signal.boxcar(con_val)
# For background remove, we only care about the central parts
# where there are peaks. On the boundary part, we don't care
# the accuracy so much. But we need to pay attention to edge
# effects in general convolution.
A = s.sum()
background = scipy.signal.convolve(background, s, mode='same')/A
window_p = width * fwhm / e_lin
if spectral_binning is not None and spectral_binning > 0:
window_p = window_p/2.
background = np.log(np.log(background + 1) + 1)
index = np.arange(n_background)
# FIRST SNIPPING
for j in range(iter_num):
lo_index = np.clip(index - window_p,
np.max([xmin, 0]),
np.min([xmax, n_background - 1]))
hi_index = np.clip(index + window_p,
np.max([xmin, 0]),
np.min([xmax, n_background - 1]))
temp = (background[lo_index.astype(np.int)] +
background[hi_index.astype(np.int)]) / 2.
bg_index = background > temp
background[bg_index] = temp[bg_index]
current_width = window_p
max_current_width = np.amax(current_width)
while max_current_width >= width_threshold:
lo_index = np.clip(index - current_width,
np.max([xmin, 0]),
np.min([xmax, n_background - 1]))
hi_index = np.clip(index + current_width,
np.max([xmin, 0]),
np.min([xmax, n_background - 1]))
temp = (background[lo_index.astype(np.int)] +
background[hi_index.astype(np.int)]) / 2.
bg_index = background > temp
background[bg_index] = temp[bg_index]
# decrease the width and repeat
current_width = current_width / decrease_factor
max_current_width = np.amax(current_width)
background = np.exp(np.exp(background) - 1) - 1
inf_ind = np.where(~np.isfinite(background))
background[inf_ind] = 0.0
return background
|
|
from decimal import Decimal
import pytest
import saleor.payment.gateway as gateway
from saleor.payment import ChargeStatus, TransactionKind
from saleor.payment.interface import GatewayResponse
from saleor.payment.utils import create_payment_information
RAW_RESPONSE = {"test": "abcdefgheijklmn"}
PROCESS_PAYMENT_RESPONSE = GatewayResponse(
is_success=True,
customer_id="test_customer",
action_required=False,
kind=TransactionKind.CAPTURE,
amount=Decimal(10.0),
currency="usd",
transaction_id="1234",
error=None,
raw_response=RAW_RESPONSE,
)
AUTHORIZE_RESPONSE = GatewayResponse(
is_success=True,
customer_id="test_customer",
action_required=False,
kind=TransactionKind.AUTH,
amount=Decimal(10.0),
currency="usd",
transaction_id="1234",
error=None,
raw_response=RAW_RESPONSE,
)
VOID_AMOUNT = Decimal("98.40")
VOID_RESPONSE = GatewayResponse(
is_success=True,
customer_id="test_customer",
action_required=False,
kind=TransactionKind.VOID,
amount=VOID_AMOUNT,
currency="usd",
transaction_id="1234",
error=None,
raw_response=RAW_RESPONSE,
)
PARTIAL_REFUND_AMOUNT = Decimal(2.0)
PARTIAL_REFUND_RESPONSE = GatewayResponse(
is_success=True,
customer_id="test_customer",
action_required=False,
kind=TransactionKind.REFUND,
amount=PARTIAL_REFUND_AMOUNT,
currency="usd",
transaction_id="1234",
error=None,
raw_response=RAW_RESPONSE,
)
FULL_REFUND_AMOUNT = Decimal("98.40")
FULL_REFUND_RESPONSE = GatewayResponse(
is_success=True,
customer_id="test_customer",
action_required=False,
kind=TransactionKind.REFUND,
amount=FULL_REFUND_AMOUNT,
currency="usd",
transaction_id="1234",
error=None,
raw_response=RAW_RESPONSE,
)
CONFIRM_AMOUNT = Decimal("98.40")
CONFIRM_RESPONSE = GatewayResponse(
is_success=True,
customer_id="test_customer",
action_required=False,
kind=TransactionKind.CONFIRM,
amount=CONFIRM_AMOUNT,
currency="usd",
transaction_id="1234",
error=None,
raw_response=RAW_RESPONSE,
)
TOKEN = "token"
USED_GATEWAY = "Dummy"
@pytest.fixture
def mock_payment_interface(mocker, fake_payment_interface):
mgr = mocker.patch(
"saleor.payment.gateway.get_extensions_manager",
autospec=True,
return_value=fake_payment_interface,
)
yield fake_payment_interface
mgr.assert_called_once()
def test_process_payment(mock_payment_interface, payment_txn_preauth):
PAYMENT_DATA = create_payment_information(
payment=payment_txn_preauth, payment_token=TOKEN
)
mock_payment_interface.process_payment.return_value = PROCESS_PAYMENT_RESPONSE
transaction = gateway.process_payment(payment=payment_txn_preauth, token=TOKEN)
mock_payment_interface.process_payment.assert_called_once_with(
USED_GATEWAY, PAYMENT_DATA
)
assert transaction.amount == PROCESS_PAYMENT_RESPONSE.amount
assert transaction.kind == TransactionKind.CAPTURE
assert transaction.currency == "usd"
assert transaction.gateway_response == RAW_RESPONSE
def test_store_source_when_processing_payment(
mock_payment_interface, payment_txn_preauth
):
PAYMENT_DATA = create_payment_information(
payment=payment_txn_preauth, payment_token=TOKEN, store_source=True
)
mock_payment_interface.process_payment.return_value = PROCESS_PAYMENT_RESPONSE
transaction = gateway.process_payment(
payment=payment_txn_preauth, token=TOKEN, store_source=True
)
mock_payment_interface.process_payment.assert_called_once_with(
USED_GATEWAY, PAYMENT_DATA
)
assert transaction.customer_id == PROCESS_PAYMENT_RESPONSE.customer_id
def test_authorize_payment(mock_payment_interface, payment_dummy):
PAYMENT_DATA = create_payment_information(
payment=payment_dummy, payment_token=TOKEN
)
mock_payment_interface.authorize_payment.return_value = AUTHORIZE_RESPONSE
transaction = gateway.authorize(payment=payment_dummy, token=TOKEN)
mock_payment_interface.authorize_payment.assert_called_once_with(
USED_GATEWAY, PAYMENT_DATA
)
assert transaction.amount == AUTHORIZE_RESPONSE.amount
assert transaction.kind == TransactionKind.AUTH
assert transaction.currency == "usd"
assert transaction.gateway_response == RAW_RESPONSE
def test_capture_payment(mock_payment_interface, payment_txn_preauth):
auth_transaction = payment_txn_preauth.transactions.get()
PAYMENT_DATA = create_payment_information(
payment=payment_txn_preauth, payment_token=auth_transaction.token
)
mock_payment_interface.capture_payment.return_value = PROCESS_PAYMENT_RESPONSE
transaction = gateway.capture(payment=payment_txn_preauth)
mock_payment_interface.capture_payment.assert_called_once_with(
USED_GATEWAY, PAYMENT_DATA
)
assert transaction.amount == PROCESS_PAYMENT_RESPONSE.amount
assert transaction.kind == TransactionKind.CAPTURE
assert transaction.currency == "usd"
assert transaction.gateway_response == RAW_RESPONSE
def test_partial_refund_payment(mock_payment_interface, payment_txn_captured):
capture_transaction = payment_txn_captured.transactions.get()
PAYMENT_DATA = create_payment_information(
payment=payment_txn_captured,
amount=PARTIAL_REFUND_AMOUNT,
payment_token=capture_transaction.token,
)
mock_payment_interface.refund_payment.return_value = PARTIAL_REFUND_RESPONSE
transaction = gateway.refund(
payment=payment_txn_captured, amount=PARTIAL_REFUND_AMOUNT
)
mock_payment_interface.refund_payment.assert_called_once_with(
USED_GATEWAY, PAYMENT_DATA
)
payment_txn_captured.refresh_from_db()
assert payment_txn_captured.charge_status == ChargeStatus.PARTIALLY_REFUNDED
assert transaction.amount == PARTIAL_REFUND_AMOUNT
assert transaction.kind == TransactionKind.REFUND
assert transaction.currency == "usd"
assert transaction.gateway_response == RAW_RESPONSE
def test_full_refund_payment(mock_payment_interface, payment_txn_captured):
capture_transaction = payment_txn_captured.transactions.get()
PAYMENT_DATA = create_payment_information(
payment=payment_txn_captured,
amount=FULL_REFUND_AMOUNT,
payment_token=capture_transaction.token,
)
mock_payment_interface.refund_payment.return_value = FULL_REFUND_RESPONSE
transaction = gateway.refund(payment=payment_txn_captured)
mock_payment_interface.refund_payment.assert_called_once_with(
USED_GATEWAY, PAYMENT_DATA
)
payment_txn_captured.refresh_from_db()
assert payment_txn_captured.charge_status == ChargeStatus.FULLY_REFUNDED
assert transaction.amount == FULL_REFUND_AMOUNT
assert transaction.kind == TransactionKind.REFUND
assert transaction.currency == "usd"
assert transaction.gateway_response == RAW_RESPONSE
def test_void_payment(mock_payment_interface, payment_txn_preauth):
auth_transaction = payment_txn_preauth.transactions.get()
PAYMENT_DATA = create_payment_information(
payment=payment_txn_preauth,
payment_token=auth_transaction.token,
amount=VOID_AMOUNT,
)
mock_payment_interface.void_payment.return_value = VOID_RESPONSE
transaction = gateway.void(payment=payment_txn_preauth)
mock_payment_interface.void_payment.assert_called_once_with(
USED_GATEWAY, PAYMENT_DATA
)
payment_txn_preauth.refresh_from_db()
assert not payment_txn_preauth.is_active
assert transaction.amount == VOID_RESPONSE.amount
assert transaction.kind == TransactionKind.VOID
assert transaction.currency == "usd"
assert transaction.gateway_response == RAW_RESPONSE
def test_confirm_payment(mock_payment_interface, payment_txn_preauth):
auth_transaction = payment_txn_preauth.transactions.get()
PAYMENT_DATA = create_payment_information(
payment=payment_txn_preauth,
payment_token=auth_transaction.token,
amount=CONFIRM_AMOUNT,
)
mock_payment_interface.confirm_payment.return_value = CONFIRM_RESPONSE
transaction = gateway.confirm(payment=payment_txn_preauth)
mock_payment_interface.confirm_payment.assert_called_once_with(
USED_GATEWAY, PAYMENT_DATA
)
assert transaction.amount == CONFIRM_RESPONSE.amount
assert transaction.kind == TransactionKind.CONFIRM
assert transaction.currency == "usd"
assert transaction.gateway_response == RAW_RESPONSE
def test_list_gateways(mock_payment_interface):
gateways = [{"name": "Stripe"}, {"name": "Braintree"}]
mock_payment_interface.list_payment_gateways.return_value = gateways
lst = gateway.list_gateways()
mock_payment_interface.list_payment_gateways.assert_called_once()
assert lst == gateways
|
|
#!/usr/bin/env python3
import ssl
import socket
from time import sleep
MODE_COMMAND = 0
MODE_DATA = 1
DEBUG_HEXDUMP = 0b0001
DEBUG_COMMAND = 0b0010
DEBUG_PROTOCOL = 0b0100
DEBUG_ALL = 0b1111
RECOVER_TIME = 1
_FRAME_COUNT = 15
_HIGHEST_BIT = 0x7F
_FILTER = ''.join([(len(repr(chr(x))) == 3) and chr(x) or '.' for x in range(256)])
_PAYLOADSIZE = 60
_PAYLOADMAP = {
# See http://tubifex.nl/wordpress/wp-content/uploads/2013/05/VBus-Protokollspezification_en_270111.pdf#53
# Did not implement mask
# Offset, size, factor
'temp1': (0, 2, 0.1),
'temp2': (2, 2, 0.1),
'temp3': (4, 2, 0.1),
'temp4': (6, 2, 0.1),
'temp5': (8, 2, 0.1),
'temprps': (10, 2, 0.1),
'presrps': (12, 2, 0.1),
'tempvfs': (14, 2, 0.1),
'flowvfs': (16, 2, 1),
'flowv40': (18, 2, 1),
'unit': (20, 1, 1),
'pwm1': (22, 1, 1), # Strange padding?
'pwm2': (23, 1, 1),
'pump1': (24, 1, 1),
'pump2': (25, 1, 1),
'pump3': (26, 1, 1),
'pump4': (27, 1, 1),
'opsec1': (28, 4, 1),
'opsec2': (32, 4, 1),
'opsec3': (36, 4, 1),
'opsec4': (40, 4, 1),
'error': (44, 2, 1),
'tatus': (46, 2, 1)
}
class _TERM:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
def _hexdump(src, length=16):
result = []
for i in xrange(0, len(src), length):
s = src[i:i + length]
hexa = ' '.join(["%02X" % ord(x) for x in s])
printable = s.translate(_FILTER)
result.append("%04X %-*s %s\n" % (i, length * 3, hexa, printable))
return "Len %iB\n%s" % (len(src), ''.join(result))
class VBUSException(Exception):
def __init__(self, *args):
super.__init__(*args)
class VBUSResponse(object):
"""
A response object that is generated by
the VBUSConnection when in COMMAND mode.
"""
def __init__(self, line):
assert len(line) > 2
self.positive = line[0] == "+"
spl = line[1:].split(":", 1)
self.type = spl[0]
self.message = None if len(spl) == 1 else spl[1][:1]
class VBUSConnection(object):
def __init__(self, host, port=7053, password="", debugmode=0b0000):
"""
:param host: The IP/DNS of the vbus host
:param port: The port the vbus is listening to
:param password: The optional password. Use "" or None for no password
:param debugmode: The debug flags to use
:type host: str
:type port: int
:type password: str
:type debugmode: int
"""
password = "" if password in [None, False] else password
assert isinstance(port, int)
assert isinstance(host, str)
assert isinstance(password, str)
assert isinstance(debugmode, int)
self.host = host
self.port = port
self.password = password or False
self.debugmode = debugmode
self._mode = MODE_COMMAND
self._sock = None
self._buffer = []
def connect(self, sslsock=False):
"""
Connects to the VBUS. It will try to authenticate
if a password has been set.
:raise VBUSException:
:type sslsock: bool
:param sslsock: Use ssl?
"""
assert not self._sock
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if sslsock: # Unlikely that we'll ever connect to the VBUS using an ssl socket but "why not?"
self._sock = ssl.wrap_socket(self._sock)
self._sock.connect((self.host, self.port))
assert VBUSResponse(self._lrecv()).type == "HELLO"
if self.password:
if not self.authenticate():
raise VBUSException("Could not authenticate")
def authenticate(self):
"""
Authenticate with the server using the set password. This
will return if the authentication attempt was acccepted.
:rtype : bool
"""
assert self.password
assert self._mode == MODE_COMMAND
self._lsend("PASS %s" % self.password)
resp = VBUSResponse(self._lrecv())
return resp.positive
def data(self, payloadmap=_PAYLOADMAP, framecount=_FRAME_COUNT, payloadsize=_PAYLOADSIZE):
"""
Listen to the server and get some data.
:param payloadmap:
:param payloadsize: The size of a
:param framecount: The amount of desired frames
:return: The requested data
:type payloadmap: dict
:type payloadsize: int
:type framecount: int
:rtype : dict
"""
payloadmap = payloadmap.copy()
#assert isinstance(payloadmap, dict)
assert isinstance(payloadsize, int)
assert self._sock
if self._mode is not MODE_DATA:
self._lsend("DATA")
resp = VBUSResponse(self._lrecv())
if not resp.positive:
raise VBUSException("Could create a data stream: %s" % resp.message)
self._mode = MODE_DATA
while True:
# Wait till we get the correct protocol
for d in self._brecv().split(chr(0xAA)):
# Check the protocol
if self._getbytes(d, 4, 5) is not 0x10:
continue
# Are we getting a payload?
if self._getbytes(d, 5, 7) is not 0x100:
continue
if self.debugmode & DEBUG_PROTOCOL:
print("Source map: 0X%02X" % self._getbytes(d, 2, 4))
# Is the checksum valid?
if self._checksum(d[0:8]) is not self._getbytes(d, 8, 9):
if self.debugmode & DEBUG_PROTOCOL:
print("Invalid checksum: got %02X expected %02X" % \
(self._checksum(d[0:8]), self._getbytes(d, 8, 9)))
continue
# Check payload length
frames = self._getbytes(d, 7, 8)
payload = d[9:9 + (6 * frames)]
if len(payload) is not 6 * frames:
if self.debugmode & DEBUG_PROTOCOL:
print("Unexpected payload length: %i != %i" % \
(len(payload), 6 * frames))
continue
r = self._parsepayload(payload, payloadmap, payloadsize, framecount)
if r:
return r
# The vbus freaks out when you send too many requests
# This can be solved by just waiting
sleep(RECOVER_TIME)
def getmode(self):
"""
Returns the current mode
"""
return self._mode
def _parsepayload(self, payload, payloadmap, payloadsize, framecount):
data = []
if len(payload) is not payloadsize and False:
if self.debugmode & DEBUG_PROTOCOL:
print("Payload size mismatch: expected %i got %i", payloadsize, len(payload))
return None
if True in [ord(i) > _HIGHEST_BIT for i in payload]:
if self.debugmode & DEBUG_PROTOCOL:
print("Found highest byte discarding payload")
print(' '.join(
"%02X" % ord(i) if ord(i) <= _HIGHEST_BIT else "%s%02X%s" % (_TERM.RED, ord(i), _TERM.END)
for i in payload
))
return None
if self.debugmode & DEBUG_PROTOCOL:
print("%i frames" % (len(payload)/6, ))
if (len(payload)/6) is not framecount:
if self.debugmode & DEBUG_PROTOCOL:
print("Invalid frame count")
return None
for i in range(len(payload) / 6):
frame = payload[i * 6:i * 6 + 6]
if self.debugmode & DEBUG_PROTOCOL:
print("Frame %i: %s" % (i, ' '.join("%02X" % ord(i) for i in frame)))
# Check frame checksum
if ord(frame[5]) is not self._checksum(frame[:-1]):
if self.debugmode & DEBUG_PROTOCOL:
print("Frame checksum mismatch: ", ord(frame[5]), self._checksum(frame[:-1]))
return None
septet = ord(frame[4])
for j in range(4):
if septet & (1 << j):
data.append(chr(ord(frame[j]) | 0x80))
else:
data.append(frame[j])
vals = {}
for i, rng in payloadmap.items():
vals[i] = self._getbytes(data, rng[0], rng[0] + rng[1])
# Temperatures can be negative (using two's complement)
if i.startswith('temp'):
bits = (rng[1]) * 8
if vals[i] >= 1 << (bits - 1):
vals[i] -= 1 << bits
# Apply factor
vals[i] *= rng[2]
if self.debugmode & DEBUG_PROTOCOL:
print(vals)
return vals
@staticmethod
def _checksum(data):
return reduce(lambda chk, b: ((chk - ord(b)) % 0x100) & 0x7F, data, 0x7F)
@staticmethod
def _getbytes(data, begin, end):
return sum([ord(b) << (i * 8) for i, b in enumerate(data[begin:end])])
def _lrecv(self):
c, s = '', ''
while c != b'\n':
c = self._sock.recv(1)
if c == '':
break
s += c.decode("utf-8")
s = s.strip('\r\n')
if self.debugmode & DEBUG_COMMAND:
print("< " + s)
return s
def _brecv(self, n=1024):
d = self._sock.recv(n)
if self.debugmode & DEBUG_HEXDUMP:
print(_hexdump(d))
return d
def _lsend(self, s):
if self.debugmode & DEBUG_COMMAND:
print("> " + s)
msg = s + "\r\n"
self._sock.send(msg.encode("utf-8"))
def _bsend(self, s):
if self.debugmode & DEBUG_HEXDUMP:
print(_hexdump(s))
self._sock.send(s)
|
|
"""
As we store content databases in separate SQLite files per channel, we need dynamic database connection routing.
This file contains a decorator/context manager, `using_content_database`, that allows a specific content
database to be specified for a block of code, as follows:
with using_content_database("nalanda"):
objects = ContentNode.objects.all()
return objects.count()
Thanks to https://github.com/ambitioninc/django-dynamic-db-router for inspiration behind the approach taken here.
"""
import os
import threading
from functools import wraps
from django.apps import apps
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.utils import ConnectionDoesNotExist
from .errors import ContentModelUsedOutsideDBContext
THREAD_LOCAL = threading.local()
_content_databases_with_attached_default_db = set()
# since Django uses pysqlite2 if available, we need to catch its OperationalError instead
try:
from pysqlite2.dbapi2 import OperationalError
except ImportError:
from sqlite3 import OperationalError
def default_database_is_attached():
try:
alias = get_active_content_database()
except ContentModelUsedOutsideDBContext:
return False
return alias in _content_databases_with_attached_default_db
def get_active_content_database(return_none_if_not_set=False):
# retrieve the temporary thread-local variable that `using_content_database` sets
alias = getattr(THREAD_LOCAL, 'ACTIVE_CONTENT_DB_ALIAS', None)
# if no content db alias has been activated, that's a problem
if not alias:
if return_none_if_not_set:
return None
else:
raise ContentModelUsedOutsideDBContext()
# retrieve the database connection to make sure it's been properly initialized
get_content_database_connection(alias)
return alias
def get_content_database_connection(alias=None):
if not alias:
alias = get_active_content_database()
# try to connect to the content database, and if connection doesn't exist, create it
try:
connections[alias]
except ConnectionDoesNotExist:
if alias.endswith(".sqlite3"):
filename = alias
else:
filename = os.path.join(settings.CONTENT_DATABASE_DIR, alias + '.sqlite3')
if not os.path.isfile(filename):
raise KeyError("Content DB '%s' doesn't exist!!" % alias)
connections.databases[alias] = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': filename,
}
# check that the content database is not empty
if not connections[alias].introspection.table_names():
raise KeyError("Content DB '%s' is empty!!" % alias)
# if possible, attach the default database to the content database connection to enable joins
_attach_default_database(alias)
return connections[alias].connection
def _attach_default_database(alias):
"""
Attach the default (primary) database file to the content database connection, if both use sqlite files.
This allows us to do direct joins between tables across the two databases, for efficiently integrating
data from the two sources -- e.g. annotating ContentNodes with progress info from ContentSummaryLogs.
"""
# if the default database uses a sqlite file, we can't attach it
default_db = connections.databases[DEFAULT_DB_ALIAS]
if default_db["ENGINE"].endswith(".sqlite3") and default_db["NAME"].endswith(".sqlite3"):
default_db_path = connections.databases[DEFAULT_DB_ALIAS]["NAME"]
try:
# ensure we're connected to the content database before attaching the default database
if not connections[alias].connection:
connections[alias].connect()
# attach the default database to the content db connection; this allows tables from both databases
# to be used together in the same query; see https://www.sqlite.org/lang_attach.html
connections[alias].connection.execute("ATTACH DATABASE '{}' AS defaultdb;".format(default_db_path))
# record the fact that the default database has been attached to this content database
_content_databases_with_attached_default_db.add(alias)
except OperationalError:
# this will happen if the database is already attached; we can safely ignore
pass
def set_active_content_database(alias):
setattr(THREAD_LOCAL, 'ACTIVE_CONTENT_DB_ALIAS', alias)
class ContentDBRouter(object):
"""A router that decides what content database to read from based on a thread-local variable."""
def _get_db(self, model, **hints):
from .models import ContentDatabaseModel
# if the model does not inherit from ContentDatabaseModel, leave it for the default database
if not issubclass(model, ContentDatabaseModel):
return None
# if the model is already associated with a database, use that database
if hasattr(hints.get("instance", None), "_state"):
return hints["instance"]._state.db
# determine the currently active content database, and return the alias
return get_active_content_database()
def db_for_read(self, model, **hints):
return self._get_db(model, **hints)
def db_for_write(self, model, **hints):
return self._get_db(model, **hints)
def allow_relation(self, obj1, obj2, **hints):
return True
def allow_migrate(self, db, app_label, model_name=None, **hints):
from .models import ContentDatabaseModel
model = apps.get_model(app_label=app_label, model_name=model_name) if model_name else None
# allow migrations for ContentDatabaseModels on non-default DBs, and for others only on default DB
if model and issubclass(model, ContentDatabaseModel):
val = db != DEFAULT_DB_ALIAS
else:
val = db == DEFAULT_DB_ALIAS
return val
class using_content_database(object):
"""A decorator and context manager to do queries on a specific content DB.
:type alias: str
:param alias: The alias for the content database to run queries on.
Usage as a context manager:
.. code-block:: python
from models import ContentNode
with using_content_database("nalanda"):
objects = ContentNode.objects.all()
return objects.count()
Usage as a decorator:
.. code-block:: python
from models import ContentNode
@using_content_database('nalanda')
def delete_all_the_nalanda_content():
ContentNode.objects.all().delete()
"""
def __init__(self, alias):
self.alias = alias
def __enter__(self):
self.previous_alias = getattr(THREAD_LOCAL, 'ACTIVE_CONTENT_DB_ALIAS', None)
set_active_content_database(self.alias)
return self
def __exit__(self, exc_type, exc_value, traceback):
set_active_content_database(self.previous_alias)
def __call__(self, querying_func):
# allow using the context manager as a decorator
@wraps(querying_func)
def inner(*args, **kwargs):
# Call the function in our context manager
with self:
return querying_func(*args, **kwargs)
return inner
|
|
"""The module provides functions to pack and unpack petlib Bn, EcGroup, and EcPt
strucures.
Example:
>>> # Define a custom class, encoder and decoder
>>> class CustomType:
... def __eq__(self, other):
... return isinstance(other, CustomType)
>>>
>>> def enc_custom(obj):
... return b''
>>>
>>> def dec_custom(data):
... return CustomType()
>>>
>>> register_coders(CustomType, 10, enc_custom, dec_custom)
>>>
>>> # Define a structure
>>> G = EcGroup()
>>> custom_obj = CustomType()
>>> test_data = [G, G.generator(), G.order(), custom_obj]
>>>
>>> # Encode and decode custom structure
>>> packed = encode(test_data)
>>> x = decode(packed)
>>> assert x == test_data
"""
try:
import msgpack
except:
print("Generate Documentation")
from .ec import EcGroup, EcPt
from .bn import Bn
__all__ = ["encode", "decode", "register_coders"]
_pack_reg = {}
_unpack_reg = {}
def register_coders(cls, num, enc_func, dec_func):
""" Register a new type for encoding and decoding.
Take a class type, a number, an encoding and a decoding function."""
if num in _unpack_reg or cls in _pack_reg:
raise Exception("Class or number already in use.")
coders = (cls, num, enc_func, dec_func)
_pack_reg[cls] = coders
_unpack_reg[num] = coders
def bn_enc(obj):
if obj < 0:
neg = b"-"
data = (-obj).binary()
else:
neg = b"+"
data = obj.binary()
return neg + data
def bn_dec(data):
num = Bn.from_binary(data[1:])
# Accomodate both Python 2 and Python 3
if data[0] == ord("-") or data[0] == "-":
return -num
return num
def ecg_enc(obj):
# Serialize EcGroup objects
nid = obj.nid()
packed_nid = msgpack.packb(nid)
return packed_nid
def ecg_dec(data):
# Decode EcGroup
nid = msgpack.unpackb(data)
return EcGroup(nid)
def ecpt_enc(obj):
# Serialize EcPt objects
nid = obj.group.nid()
data = obj.export()
packed_data = msgpack.packb((nid, data))
return packed_data
def ecpt_dec(data):
# Decode EcPt
nid, ptdata = msgpack.unpackb(data)
return EcPt.from_binary(ptdata, EcGroup(nid))
def _init_coders():
global _pack_reg, _unpack_reg
_pack_reg, _unpack_reg = {}, {}
register_coders(Bn, 0, bn_enc, bn_dec)
register_coders(EcGroup, 1, ecg_enc, ecg_dec)
register_coders(EcPt, 2, ecpt_enc, ecpt_dec)
# Register default coders
try:
_init_coders()
except:
print("Generate documentation")
def default(obj):
# Serialize Bn objects
for T in _pack_reg:
if isinstance(obj, T):
_, num, enc, _ = _pack_reg[T]
return msgpack.ExtType(num, enc(obj))
raise TypeError("Unknown type: %r" % (type(obj),))
def make_encoder(out_encoder=None):
if out_encoder is None:
return default
else:
def new_encoder(obj):
try:
encoded = default(obj)
return encoded
except BaseException:
return out_encoder(obj)
return new_encoder
def ext_hook(code, data):
if code in _unpack_reg:
_, _, _, dec = _unpack_reg[code]
return dec(data)
# Other
return msgpack.ExtType(code, data)
def make_decoder(custom_decoder=None):
if custom_decoder is None:
return ext_hook
else:
def new_decoder(code, data):
out = ext_hook(code, data)
if not isinstance(out, msgpack.ExtType):
return out
else:
return custom_decoder(code, data)
return new_decoder
def encode(structure, custom_encoder=None):
""" Encode a structure containing petlib objects to a binary format. May define a custom encoder for user classes. """
encoder = make_encoder(custom_encoder)
packed_data = msgpack.packb(structure, default=encoder, use_bin_type=True)
return packed_data
def decode(packed_data, custom_decoder=None):
""" Decode a binary byte sequence into a structure containing pelib objects. May define a custom decoder for custom classes. """
decoder = make_decoder(custom_decoder)
structure = msgpack.unpackb(
packed_data,
ext_hook=decoder,
encoding='utf-8')
return structure
# --- TESTS ---
def test_basic():
x = [b'spam', u'egg']
packed = msgpack.packb(x, use_bin_type=True)
y = msgpack.unpackb(packed, encoding='utf-8')
assert x == y
def test_bn():
bn1, bn2 = Bn(1), Bn(2)
test_data = [bn1, bn2, -bn1, -bn2]
packed = msgpack.packb(test_data, default=default, use_bin_type=True)
x = msgpack.unpackb(packed, ext_hook=ext_hook, encoding='utf-8')
assert x == test_data
def test_ecgroup():
G = EcGroup()
test_data = [G]
packed = msgpack.packb(test_data, default=default, use_bin_type=True)
x = msgpack.unpackb(packed, ext_hook=ext_hook, encoding='utf-8')
assert x == test_data
def test_ecpt():
G = EcGroup()
test_data = [G.generator()]
packed = msgpack.packb(test_data, default=default, use_bin_type=True)
x = msgpack.unpackb(packed, ext_hook=ext_hook, encoding='utf-8')
assert x == test_data
def test_mixed():
G = EcGroup()
test_data = [G, G.generator(), G.order()]
packed = msgpack.packb(test_data, default=default, use_bin_type=True)
x = msgpack.unpackb(packed, ext_hook=ext_hook, encoding='utf-8')
assert x == test_data
def test_enc_dec():
G = EcGroup()
test_data = [G, G.generator(), G.order()]
packed = encode(test_data)
x = decode(packed)
assert x == test_data
def test_enc_dec_dict():
G = EcGroup()
# , G.generator():"1", "2":G.order()}
test_data = {G.order(): [G, G.generator()]}
packed = encode(test_data)
x = decode(packed)
assert x[G.order()] == test_data[G.order()]
def test_enc_dec_custom():
# Define a custom class, encoder and decoder
class CustomClass:
def __eq__(self, other):
return isinstance(other, CustomClass)
def enc_CustomClass(obj):
if isinstance(obj, CustomClass):
return msgpack.ExtType(11, b'')
raise TypeError("Unknown type: %r" % (obj,))
def dec_CustomClass(code, data):
if code == 11:
return CustomClass()
return msgpack.ExtType(code, data)
# Define a structure
G = EcGroup()
custom_obj = CustomClass()
test_data = [G, G.generator(), G.order(), custom_obj]
# Encode and decode custom structure
packed = encode(test_data, enc_CustomClass)
x = decode(packed, dec_CustomClass)
assert x == test_data
def test_streaming():
# Define a custom class, encoder and decoder
class CustomClass2:
def __eq__(self, other):
return isinstance(other, CustomClass2)
def enc_CustomClass(obj):
if isinstance(obj, CustomClass2):
return msgpack.ExtType(12, b'')
raise TypeError("Unknown type: %r" % (obj,))
def dec_CustomClass(code, data):
if code == 12:
return CustomClass2()
return msgpack.ExtType(code, data)
# Define a structure
G = EcGroup()
custom_obj = CustomClass2()
test_data = [G, G.generator(), G.order(), custom_obj]
packed1 = encode(test_data, enc_CustomClass)
packed2 = encode(test_data, enc_CustomClass)
data = packed1 + packed2
decoder = make_decoder(dec_CustomClass)
Up = msgpack.Unpacker(ext_hook=decoder)
Up.feed(data)
for o in Up:
assert o == test_data
def test_docstring():
# Define a custom class, encoder and decoder
class CustomType:
def __eq__(self, other):
return isinstance(other, CustomType)
def enc_custom(obj):
return b''
def dec_custom(data):
return CustomType()
_init_coders()
register_coders(CustomType, 14, enc_custom, dec_custom)
assert CustomType in _pack_reg
# Define a structure
G = EcGroup()
custom_obj = CustomType()
test_data = [G, G.generator(), G.order(), custom_obj]
# Encode and decode custom structure
packed = encode(test_data)
x = decode(packed)
assert x == test_data
_init_coders()
|
|
# -*- coding: utf-8 -*-
"""
formlayout
==========
Module creating Qt form dialogs/layouts to edit various type of parameters
formlayout License Agreement (MIT License)
------------------------------------------
Copyright (c) 2009 Pierre Raybaut
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import print_function
# History:
# 1.0.10: added float validator (disable "Ok" and "Apply" button when not valid)
# 1.0.7: added support for "Apply" button
# 1.0.6: code cleaning
__version__ = '1.0.10'
__license__ = __doc__
DEBUG = False
import sys
STDERR = sys.stderr
from matplotlib.backends.qt4_compat import QtGui,QtCore
from matplotlib.colors import rgb2hex
if not hasattr(QtGui,'QFormLayout'):
raise ImportError, "Warning: formlayout requires PyQt4 >v4.3 or PySide"
(QWidget, QLineEdit, QComboBox, QLabel, QSpinBox, QIcon,QStyle,
QDialogButtonBox, QHBoxLayout, QVBoxLayout, QDialog, QColor, QPushButton,
QCheckBox, QColorDialog, QPixmap, QTabWidget, QApplication, QStackedWidget,
QDateEdit, QDateTimeEdit, QFont, QFontComboBox, QFontDatabase, QGridLayout,
QFormLayout, QDoubleValidator) =\
(QtGui.QWidget, QtGui.QLineEdit, QtGui.QComboBox, QtGui.QLabel,
QtGui.QSpinBox, QtGui.QIcon, QtGui.QStyle, QtGui.QDialogButtonBox,
QtGui.QHBoxLayout, QtGui.QVBoxLayout, QtGui.QDialog, QtGui.QColor,
QtGui.QPushButton, QtGui.QCheckBox, QtGui.QColorDialog, QtGui.QPixmap,
QtGui.QTabWidget, QtGui.QApplication, QtGui.QStackedWidget, QtGui.QDateEdit,
QtGui.QDateTimeEdit, QtGui.QFont, QtGui.QFontComboBox, QtGui.QFontDatabase,
QtGui.QGridLayout, QtGui.QFormLayout, QtGui.QDoubleValidator)
(Qt, SIGNAL, SLOT, QObject, QSize,pyqtSignature, pyqtProperty) =\
(QtCore.Qt, QtCore.SIGNAL, QtCore.SLOT, QtCore.QObject, QtCore.QSize,
QtCore.Slot, QtCore.Property)
import datetime
class ColorButton(QPushButton):
"""
Color choosing push button
"""
__pyqtSignals__ = ("colorChanged(QColor)",)
def __init__(self, parent=None):
QPushButton.__init__(self, parent)
self.setFixedSize(20, 20)
self.setIconSize(QSize(12, 12))
self.connect(self, SIGNAL("clicked()"), self.choose_color)
self._color = QColor()
def choose_color(self):
color = QColorDialog.getColor(self._color,self.parentWidget(),'')
if color.isValid():
self.set_color(color)
def get_color(self):
return self._color
@QtCore.Slot("QColor")
def set_color(self, color):
if color != self._color:
self._color = color
self.emit(SIGNAL("colorChanged(QColor)"), self._color)
pixmap = QPixmap(self.iconSize())
pixmap.fill(color)
self.setIcon(QtGui.QIcon(pixmap))
color = QtCore.Property("QColor", get_color, set_color)
def col2hex(color):
"""Convert matplotlib color to hex before passing to Qt"""
return rgb2hex(colorConverter.to_rgb(color))
def to_qcolor(color):
"""Create a QColor from a matplotlib color"""
qcolor = QtGui.QColor()
color = str(color)
try:
color = col2hex(color)
except ValueError:
#print('WARNING: ignoring invalid color %r' % color)
return qcolor # return invalid QColor
qcolor.setNamedColor(color) # set using hex color
return qcolor # return valid QColor
def text_to_qcolor(text):
"""
Create a QColor from specified string
Avoid warning from Qt when an invalid QColor is instantiated
"""
color = QColor()
if isinstance(text, QObject):
# actually a QString, which is not provided by the new PyQt4 API:
text = str(text)
if not isinstance(text, (unicode, str)):
return color
if text.startswith('#') and len(text)==7:
correct = '#0123456789abcdef'
for char in text:
if char.lower() not in correct:
return color
elif text not in list(QColor.colorNames()):
return color
color.setNamedColor(text)
return color
def is_matplotlib_color(value):
"""
Check if value is a color passed to us from matplotlib.
It could either be a valid color string or a 3-tuple of floats between 0. and 1.
"""
if text_to_qcolor(value).isValid():
return True
if isinstance(value,tuple) and len(value)==3 and all(map(lambda v: isinstance(v,float),value)):
for c in value:
if c < 0. or c > 1.:
return False
return True
return False
class ColorLayout(QHBoxLayout):
"""Color-specialized QLineEdit layout"""
def __init__(self, color, parent=None):
QHBoxLayout.__init__(self)
assert isinstance(color, QColor)
self.lineedit = QLineEdit(color.name(), parent)
self.connect(self.lineedit, SIGNAL("textChanged(QString)"),
self.update_color)
self.addWidget(self.lineedit)
self.colorbtn = ColorButton(parent)
self.colorbtn.color = color
self.connect(self.colorbtn, SIGNAL("colorChanged(QColor)"),
self.update_text)
self.addWidget(self.colorbtn)
def update_color(self, text):
color = text_to_qcolor(text)
if color.isValid():
self.colorbtn.color = color
def update_text(self, color):
self.lineedit.setText(color.name())
def text(self):
return self.lineedit.text()
def font_is_installed(font):
"""Check if font is installed"""
return [fam for fam in QFontDatabase().families() if unicode(fam)==font]
def tuple_to_qfont(tup):
"""
Create a QFont from tuple:
(family [string], size [int], italic [bool], bold [bool])
"""
if not isinstance(tup, tuple) or len(tup) != 4 \
or not font_is_installed(tup[0]) \
or not isinstance(tup[1], int) \
or not isinstance(tup[2], bool) \
or not isinstance(tup[3], bool):
return None
font = QFont()
family, size, italic, bold = tup
font.setFamily(family)
font.setPointSize(size)
font.setItalic(italic)
font.setBold(bold)
return font
def qfont_to_tuple(font):
return (unicode(font.family()), int(font.pointSize()),
font.italic(), font.bold())
class FontLayout(QGridLayout):
"""Font selection"""
def __init__(self, value, parent=None):
QGridLayout.__init__(self)
font = tuple_to_qfont(value)
assert font is not None
# Font family
self.family = QFontComboBox(parent)
self.family.setCurrentFont(font)
self.addWidget(self.family, 0, 0, 1, -1)
# Font size
self.size = QComboBox(parent)
self.size.setEditable(True)
sizelist = range(6, 12) + range(12, 30, 2) + [36, 48, 72]
size = font.pointSize()
if size not in sizelist:
sizelist.append(size)
sizelist.sort()
self.size.addItems([str(s) for s in sizelist])
self.size.setCurrentIndex(sizelist.index(size))
self.addWidget(self.size, 1, 0)
# Italic or not
self.italic = QCheckBox(self.tr("Italic"), parent)
self.italic.setChecked(font.italic())
self.addWidget(self.italic, 1, 1)
# Bold or not
self.bold = QCheckBox(self.tr("Bold"), parent)
self.bold.setChecked(font.bold())
self.addWidget(self.bold, 1, 2)
def get_font(self):
font = self.family.currentFont()
font.setItalic(self.italic.isChecked())
font.setBold(self.bold.isChecked())
font.setPointSize(int(self.size.currentText()))
return qfont_to_tuple(font)
def is_edit_valid(edit):
text = edit.text()
state = edit.validator().validate(text, 0)[0]
return state == QDoubleValidator.Acceptable
class FormWidget(QWidget):
def __init__(self, data, comment="", parent=None):
QWidget.__init__(self, parent)
from copy import deepcopy
self.data = deepcopy(data)
self.widgets = []
self.formlayout = QFormLayout(self)
if comment:
self.formlayout.addRow(QLabel(comment))
self.formlayout.addRow(QLabel(" "))
if DEBUG:
print("\n"+("*"*80))
print("DATA:", self.data)
print("*"*80)
print("COMMENT:", comment)
print("*"*80)
def get_dialog(self):
"""Return FormDialog instance"""
dialog = self.parent()
while not isinstance(dialog, QDialog):
dialog = dialog.parent()
return dialog
def setup(self):
for label, value in self.data:
if DEBUG:
print("value:", value)
if label is None and value is None:
# Separator: (None, None)
self.formlayout.addRow(QLabel(" "), QLabel(" "))
self.widgets.append(None)
continue
elif label is None:
# Comment
self.formlayout.addRow(QLabel(value))
self.widgets.append(None)
continue
elif tuple_to_qfont(value) is not None:
field = FontLayout(value, self)
elif is_matplotlib_color(value):
field = ColorLayout(QColor(value), self)
elif isinstance(value, (str, unicode)):
field = QLineEdit(value, self)
elif isinstance(value, (list, tuple)):
if isinstance(value, tuple):
value = list(value)
selindex = value.pop(0)
field = QComboBox(self)
if isinstance(value[0], (list, tuple)):
keys = [ key for key, _val in value ]
value = [ val for _key, val in value ]
else:
keys = value
field.addItems(value)
if selindex in value:
selindex = value.index(selindex)
elif selindex in keys:
selindex = keys.index(selindex)
elif not isinstance(selindex, int):
print("Warning: '%s' index is invalid (label: " \
"%s, value: %s)" % (selindex, label, value), file=STDERR)
selindex = 0
field.setCurrentIndex(selindex)
elif isinstance(value, bool):
field = QCheckBox(self)
if value:
field.setCheckState(Qt.Checked)
else :
field.setCheckState(Qt.Unchecked)
elif isinstance(value, float):
field = QLineEdit(repr(value), self)
field.setValidator(QDoubleValidator(field))
dialog = self.get_dialog()
dialog.register_float_field(field)
self.connect(field, SIGNAL('textChanged(QString)'),
lambda text: dialog.update_buttons())
elif isinstance(value, int):
field = QSpinBox(self)
field.setRange(-1e9, 1e9)
field.setValue(value)
elif isinstance(value, datetime.datetime):
field = QDateTimeEdit(self)
field.setDateTime(value)
elif isinstance(value, datetime.date):
field = QDateEdit(self)
field.setDate(value)
else:
field = QLineEdit(repr(value), self)
self.formlayout.addRow(label, field)
self.widgets.append(field)
def get(self):
valuelist = []
for index, (label, value) in enumerate(self.data):
field = self.widgets[index]
if label is None:
# Separator / Comment
continue
elif tuple_to_qfont(value) is not None:
value = field.get_font()
elif isinstance(value, (str, unicode)) or is_matplotlib_color(value):
value = unicode(field.text())
elif isinstance(value, (list, tuple)):
index = int(field.currentIndex())
if isinstance(value[0], (list, tuple)):
value = value[index][0]
else:
value = value[index]
elif isinstance(value, bool):
value = field.checkState() == Qt.Checked
elif isinstance(value, float):
value = float(str(field.text()))
elif isinstance(value, int):
value = int(field.value())
elif isinstance(value, datetime.datetime):
value = field.dateTime().toPyDateTime()
elif isinstance(value, datetime.date):
value = field.date().toPyDate()
else:
value = eval(str(field.text()))
valuelist.append(value)
return valuelist
class FormComboWidget(QWidget):
def __init__(self, datalist, comment="", parent=None):
QWidget.__init__(self, parent)
layout = QVBoxLayout()
self.setLayout(layout)
self.combobox = QComboBox()
layout.addWidget(self.combobox)
self.stackwidget = QStackedWidget(self)
layout.addWidget(self.stackwidget)
self.connect(self.combobox, SIGNAL("currentIndexChanged(int)"),
self.stackwidget, SLOT("setCurrentIndex(int)"))
self.widgetlist = []
for data, title, comment in datalist:
self.combobox.addItem(title)
widget = FormWidget(data, comment=comment, parent=self)
self.stackwidget.addWidget(widget)
self.widgetlist.append(widget)
def setup(self):
for widget in self.widgetlist:
widget.setup()
def get(self):
return [ widget.get() for widget in self.widgetlist]
class FormTabWidget(QWidget):
def __init__(self, datalist, comment="", parent=None):
QWidget.__init__(self, parent)
layout = QVBoxLayout()
self.tabwidget = QTabWidget()
layout.addWidget(self.tabwidget)
self.setLayout(layout)
self.widgetlist = []
for data, title, comment in datalist:
if len(data[0])==3:
widget = FormComboWidget(data, comment=comment, parent=self)
else:
widget = FormWidget(data, comment=comment, parent=self)
index = self.tabwidget.addTab(widget, title)
self.tabwidget.setTabToolTip(index, comment)
self.widgetlist.append(widget)
def setup(self):
for widget in self.widgetlist:
widget.setup()
def get(self):
return [ widget.get() for widget in self.widgetlist]
class FormDialog(QDialog):
"""Form Dialog"""
def __init__(self, data, title="", comment="",
icon=None, parent=None, apply=None):
QDialog.__init__(self, parent)
self.apply_callback = apply
# Form
if isinstance(data[0][0], (list, tuple)):
self.formwidget = FormTabWidget(data, comment=comment,
parent=self)
elif len(data[0])==3:
self.formwidget = FormComboWidget(data, comment=comment,
parent=self)
else:
self.formwidget = FormWidget(data, comment=comment,
parent=self)
layout = QVBoxLayout()
layout.addWidget(self.formwidget)
self.float_fields = []
self.formwidget.setup()
# Button box
self.bbox = bbox = QDialogButtonBox(QDialogButtonBox.Ok
|QDialogButtonBox.Cancel)
self.connect(self.formwidget, SIGNAL('update_buttons()'),
self.update_buttons)
if self.apply_callback is not None:
apply_btn = bbox.addButton(QDialogButtonBox.Apply)
self.connect(apply_btn, SIGNAL("clicked()"), self.apply)
self.connect(bbox, SIGNAL("accepted()"), SLOT("accept()"))
self.connect(bbox, SIGNAL("rejected()"), SLOT("reject()"))
layout.addWidget(bbox)
self.setLayout(layout)
self.setWindowTitle(title)
if not isinstance(icon, QIcon):
icon = QWidget().style().standardIcon(QStyle.SP_MessageBoxQuestion)
self.setWindowIcon(icon)
def register_float_field(self, field):
self.float_fields.append(field)
def update_buttons(self):
valid = True
for field in self.float_fields:
if not is_edit_valid(field):
valid = False
for btn_type in (QDialogButtonBox.Ok, QDialogButtonBox.Apply):
btn = self.bbox.button(btn_type)
if btn is not None:
btn.setEnabled(valid)
def accept(self):
self.data = self.formwidget.get()
QDialog.accept(self)
def reject(self):
self.data = None
QDialog.reject(self)
def apply(self):
self.apply_callback(self.formwidget.get())
def get(self):
"""Return form result"""
return self.data
def fedit(data, title="", comment="", icon=None, parent=None, apply=None):
"""
Create form dialog and return result
(if Cancel button is pressed, return None)
data: datalist, datagroup
title: string
comment: string
icon: QIcon instance
parent: parent QWidget
apply: apply callback (function)
datalist: list/tuple of (field_name, field_value)
datagroup: list/tuple of (datalist *or* datagroup, title, comment)
-> one field for each member of a datalist
-> one tab for each member of a top-level datagroup
-> one page (of a multipage widget, each page can be selected with a combo
box) for each member of a datagroup inside a datagroup
Supported types for field_value:
- int, float, str, unicode, bool
- colors: in Qt-compatible text form, i.e. in hex format or name (red,...)
(automatically detected from a string)
- list/tuple:
* the first element will be the selected index (or value)
* the other elements can be couples (key, value) or only values
"""
# Create a QApplication instance if no instance currently exists
# (e.g., if the module is used directly from the interpreter)
if QApplication.startingUp():
_app = QApplication([])
dialog = FormDialog(data, title, comment, icon, parent, apply)
if dialog.exec_():
return dialog.get()
if __name__ == "__main__":
def create_datalist_example():
return [('str', 'this is a string'),
('list', [0, '1', '3', '4']),
('list2', ['--', ('none', 'None'), ('--', 'Dashed'),
('-.', 'DashDot'), ('-', 'Solid'),
('steps', 'Steps'), (':', 'Dotted')]),
('float', 1.2),
(None, 'Other:'),
('int', 12),
('font', ('Arial', 10, False, True)),
('color', '#123409'),
('bool', True),
('date', datetime.date(2010, 10, 10)),
('datetime', datetime.datetime(2010, 10, 10)),
]
def create_datagroup_example():
datalist = create_datalist_example()
return ((datalist, "Category 1", "Category 1 comment"),
(datalist, "Category 2", "Category 2 comment"),
(datalist, "Category 3", "Category 3 comment"))
#--------- datalist example
datalist = create_datalist_example()
def apply_test(data):
print("data:", data)
print("result:", fedit(datalist, title="Example",
comment="This is just an <b>example</b>.",
apply=apply_test))
#--------- datagroup example
datagroup = create_datagroup_example()
print("result:", fedit(datagroup, "Global title"))
#--------- datagroup inside a datagroup example
datalist = create_datalist_example()
datagroup = create_datagroup_example()
print("result:", fedit(((datagroup, "Title 1", "Tab 1 comment"),
(datalist, "Title 2", "Tab 2 comment"),
(datalist, "Title 3", "Tab 3 comment")),
"Global title"))
|
|
# -*- coding: utf-8 -*-
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# google-cloud-source-context documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
# For plugins that can not read conf.py.
# See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85
sys.path.insert(0, os.path.abspath("."))
__version__ = ""
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.5.5"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"recommonmark",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_options = {"members": True}
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The root toctree document.
root_doc = "index"
# General information about the project.
project = "google-cloud-source-context"
copyright = "2019, Google"
author = "Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
"_build",
"**/.nox/**/*",
"samples/AUTHORING_GUIDE.md",
"samples/CONTRIBUTING.md",
"samples/snippets/README.rst",
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for google-cloud-source-context",
"github_user": "googleapis",
"github_repo": "python-source-context",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-source-context-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
root_doc,
"google-cloud-source-context.tex",
"google-cloud-source-context Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
root_doc,
"google-cloud-source-context",
"google-cloud-source-context Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
root_doc,
"google-cloud-source-context",
"google-cloud-source-context Documentation",
author,
"google-cloud-source-context",
"google-cloud-source-context Library",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://python.readthedocs.org/en/latest/", None),
"google-auth": ("https://googleapis.dev/python/google-auth/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,),
"grpc": ("https://grpc.github.io/grpc/python/", None),
"proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None),
"protobuf": ("https://googleapis.dev/python/protobuf/latest/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
|
|
from __future__ import division
from builtins import zip
from past.utils import old_div
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import MaxNLocator
import threeML.plugins.SpectrumLike
import threeML.plugins.PhotometryLike
from threeML.io.plotting.cmap_cycle import cmap_intervals
from threeML.exceptions.custom_exceptions import custom_warnings
from threeML.config.config import threeML_config
from threeML.io.plotting.step_plot import step_plot
from threeML.io.plotting.data_residual_plot import ResidualPlot
# This file contains plots which are plotted in data space after a model has been
# assigned to the plugin.
NO_REBIN = 1e-99
def display_spectrum_model_counts(analysis, data=(), **kwargs):
"""
Display the fitted model count spectrum of one or more Spectrum plugins
NOTE: all parameters passed as keyword arguments that are not in the list below, will be passed as keyword arguments
to the plt.subplots() constructor. So for example, you can specify the size of the figure using figsize = (20,10)
:param args: one or more instances of Spectrum plugin
:param min_rate: (optional) rebin to keep this minimum rate in each channel (if possible). If one number is
provided, the same minimum rate is used for each dataset, otherwise a list can be provided with the minimum rate
for each dataset
:param data_cmap: (str) (optional) the color map used to extract automatically the colors for the data
:param model_cmap: (str) (optional) the color map used to extract automatically the colors for the models
:param data_colors: (optional) a tuple or list with the color for each dataset
:param model_colors: (optional) a tuple or list with the color for each folded model
:param data_color: (optional) color for all datasets
:param model_color: (optional) color for all folded models
:param show_legend: (optional) if True (default), shows a legend
:param step: (optional) if True (default), show the folded model as steps, if False, the folded model is plotted
:param model_subplot: (optional) axe(s) to plot to for overplotting
with linear interpolation between each bin
:return: figure instance
"""
# If the user supplies a subset of the data, we will use that
if not data:
data_keys = list(analysis.data_list.keys())
else:
data_keys = data
# Now we want to make sure that we only grab OGIP plugins
new_data_keys = []
for key in data_keys:
# Make sure it is a valid key
if key in list(analysis.data_list.keys()):
if isinstance(
analysis.data_list[key], threeML.plugins.SpectrumLike.SpectrumLike
):
new_data_keys.append(key)
else:
custom_warnings.warn(
"Dataset %s is not of the SpectrumLike kind. Cannot be plotted by "
"display_spectrum_model_counts" % key
)
if not new_data_keys:
RuntimeError(
"There were no valid SpectrumLike data requested for plotting. Please use the detector names in the data list"
)
data_keys = new_data_keys
# default settings
# Default is to show the model with steps
step = True
data_cmap = threeML_config["ogip"]["data plot cmap"] # plt.cm.rainbow
model_cmap = threeML_config["ogip"]["model plot cmap"] # plt.cm.nipy_spectral_r
# Legend is on by default
show_legend = True
show_residuals = True
# Default colors
data_colors = cmap_intervals(len(data_keys), data_cmap)
model_colors = cmap_intervals(len(data_keys), model_cmap)
# Now override defaults according to the optional keywords, if present
if "show_data" in kwargs:
show_data = bool(kwargs.pop("show_data"))
else:
show_data = True
if "show_legend" in kwargs:
show_legend = bool(kwargs.pop("show_legend"))
if "show_residuals" in kwargs:
show_residuals = bool(kwargs.pop("show_residuals"))
if "step" in kwargs:
step = bool(kwargs.pop("step"))
if "min_rate" in kwargs:
min_rate = kwargs.pop("min_rate")
# If min_rate is a floating point, use the same for all datasets, otherwise use the provided ones
try:
min_rate = float(min_rate)
min_rates = [min_rate] * len(data_keys)
except TypeError:
min_rates = list(min_rate)
assert len(min_rates) >= len(data_keys), (
"If you provide different minimum rates for each data set, you need"
"to provide an iterable of the same length of the number of datasets"
)
else:
# This is the default (no rebinning)
min_rates = [NO_REBIN] * len(data_keys)
if "data_cmap" in kwargs:
data_cmap = plt.get_cmap(kwargs.pop("data_cmap"))
data_colors = cmap_intervals(len(data_keys), data_cmap)
if "model_cmap" in kwargs:
model_cmap = kwargs.pop("model_cmap")
model_colors = cmap_intervals(len(data_keys), model_cmap)
if "data_colors" in kwargs:
data_colors = kwargs.pop("data_colors")
assert len(data_colors) >= len(data_keys), (
"You need to provide at least a number of data colors equal to the "
"number of datasets"
)
elif "data_color" in kwargs:
data_colors = [kwargs.pop("data_color")] * len(data_keys)
if "model_colors" in kwargs:
model_colors = kwargs.pop("model_colors")
assert len(model_colors) >= len(data_keys), (
"You need to provide at least a number of model colors equal to the "
"number of datasets"
)
ratio_residuals = False
if "ratio_residuals" in kwargs:
ratio_residuals = bool(kwargs["ratio_residuals"])
elif "model_color" in kwargs:
model_colors = [kwargs.pop("model_color")] * len(data_keys)
if "model_labels" in kwargs:
model_labels = kwargs.pop("model_labels")
assert len(model_labels) == len(
data_keys
), "you must have the same number of model labels as data sets"
else:
model_labels = ["%s Model" % analysis.data_list[key]._name for key in data_keys]
# fig, (ax, ax1) = plt.subplots(2, 1, sharex=True, gridspec_kw={'height_ratios': [2, 1]}, **kwargs)
residual_plot = ResidualPlot(show_residuals=show_residuals, **kwargs)
if show_residuals:
axes = [residual_plot.data_axis, residual_plot.residual_axis]
else:
axes = residual_plot.data_axis
# go thru the detectors
for key, data_color, model_color, min_rate, model_label in zip(
data_keys, data_colors, model_colors, min_rates, model_labels
):
# NOTE: we use the original (unmasked) vectors because we need to rebin ourselves the data later on
data = analysis.data_list[
key
] # type: threeML.plugins.SpectrumLike.SpectrumLike
data.display_model(
data_color=data_color,
model_color=model_color,
min_rate=min_rate,
step=step,
show_residuals=show_residuals,
show_data=show_data,
show_legend=show_legend,
ratio_residuals=ratio_residuals,
model_label=model_label,
model_subplot=axes,
)
return residual_plot.figure
def display_photometry_model_magnitudes(analysis, data=(), **kwargs):
"""
Display the fitted model count spectrum of one or more Spectrum plugins
NOTE: all parameters passed as keyword arguments that are not in the list below, will be passed as keyword arguments
to the plt.subplots() constructor. So for example, you can specify the size of the figure using figsize = (20,10)
:param args: one or more instances of Spectrum plugin
:param min_rate: (optional) rebin to keep this minimum rate in each channel (if possible). If one number is
provided, the same minimum rate is used for each dataset, otherwise a list can be provided with the minimum rate
for each dataset
:param data_cmap: (str) (optional) the color map used to extract automatically the colors for the data
:param model_cmap: (str) (optional) the color map used to extract automatically the colors for the models
:param data_colors: (optional) a tuple or list with the color for each dataset
:param model_colors: (optional) a tuple or list with the color for each folded model
:param show_legend: (optional) if True (default), shows a legend
:param step: (optional) if True (default), show the folded model as steps, if False, the folded model is plotted
with linear interpolation between each bin
:return: figure instance
"""
# If the user supplies a subset of the data, we will use that
if not data:
data_keys = list(analysis.data_list.keys())
else:
data_keys = data
# Now we want to make sure that we only grab OGIP plugins
new_data_keys = []
for key in data_keys:
# Make sure it is a valid key
if key in list(analysis.data_list.keys()):
if isinstance(
analysis.data_list[key], threeML.plugins.PhotometryLike.PhotometryLike
):
new_data_keys.append(key)
else:
custom_warnings.warn(
"Dataset %s is not of the Photometery kind. Cannot be plotted by "
"display_photometry_model_magnitudes" % key
)
if not new_data_keys:
RuntimeError(
"There were no valid Photometry data requested for plotting. Please use the detector names in the data list"
)
data_keys = new_data_keys
# Default is to show the model with steps
step = True
data_cmap = threeML_config["photo"]["data plot cmap"] # plt.cm.rainbow
model_cmap = threeML_config["photo"]["model plot cmap"] # plt.cm.nipy_spectral_r
# Legend is on by default
show_legend = True
# Default colors
data_colors = cmap_intervals(len(data_keys), data_cmap)
model_colors = cmap_intervals(len(data_keys), model_cmap)
# Now override defaults according to the optional keywords, if present
if "show_legend" in kwargs:
show_legend = bool(kwargs.pop("show_legend"))
if "step" in kwargs:
step = bool(kwargs.pop("step"))
if "data_cmap" in kwargs:
data_cmap = plt.get_cmap(kwargs.pop("data_cmap"))
data_colors = cmap_intervals(len(data_keys), data_cmap)
if "model_cmap" in kwargs:
model_cmap = kwargs.pop("model_cmap")
model_colors = cmap_intervals(len(data_keys), model_cmap)
if "data_colors" in kwargs:
data_colors = kwargs.pop("data_colors")
assert len(data_colors) >= len(data_keys), (
"You need to provide at least a number of data colors equal to the "
"number of datasets"
)
if "model_colors" in kwargs:
model_colors = kwargs.pop("model_colors")
assert len(model_colors) >= len(data_keys), (
"You need to provide at least a number of model colors equal to the "
"number of datasets"
)
residual_plot = ResidualPlot(**kwargs)
# go thru the detectors
for key, data_color, model_color in zip(data_keys, data_colors, model_colors):
data = analysis.data_list[
key
] # type: threeML.plugins.PhotometryLike.PhotometryLike
# get the expected counts
avg_wave_length = (
data._filter_set.effective_wavelength.value
) # type: np.ndarray
# need to sort because filters are not always in order
sort_idx = avg_wave_length.argsort()
expected_model_magnitudes = data._get_total_expectation()[sort_idx]
magnitudes = data.magnitudes[sort_idx]
mag_errors = data.magnitude_errors[sort_idx]
avg_wave_length = avg_wave_length[sort_idx]
residuals = old_div((expected_model_magnitudes - magnitudes), mag_errors)
widths = data._filter_set.wavelength_bounds.widths[sort_idx]
residual_plot.add_data(
x=avg_wave_length,
y=magnitudes,
xerr=widths,
yerr=mag_errors,
residuals=residuals,
label=data._name,
color=data_color,
)
residual_plot.add_model(
avg_wave_length,
expected_model_magnitudes,
label="%s Model" % data._name,
color=model_color,
)
return residual_plot.finalize(
xlabel="Wavelength\n(%s)" % data._filter_set.waveunits,
ylabel="Magnitudes",
xscale="linear",
yscale="linear",
invert_y=True,
)
# def display_histogram_fit(analysis, data=(), **kwargs):
# if not data:
#
# data_keys = analysis.data_list.keys()
#
# else:
#
# data_keys = data
#
# # Now we want to make sure that we only grab OGIP plugins
#
# new_data_keys = []
#
# for key in data_keys:
#
# # Make sure it is a valid key
# if key in analysis.data_list.keys():
#
# if isinstance(analysis.data_list[key], threeML.plugins.HistLike.HistLike):
#
# new_data_keys.append(key)
#
# else:
#
# custom_warnings.warn("Dataset %s is not of the HistLike kind. Cannot be plotted by "
# "display_histogram_fit" % key)
#
# if not new_data_keys:
# RuntimeError(
# 'There were no valid HistLike data requested for plotting. Please use the names in the data list')
#
# data_keys = new_data_keys
#
# # default settings
#
# # Default is to show the model with steps
# step = True
#
# data_cmap = plt.get_cmap(threeML_config['ogip']['data plot cmap']) # plt.cm.rainbow
# model_cmap = plt.get_cmap(threeML_config['ogip']['model plot cmap']) # plt.cm.nipy_spectral_r
#
# # Legend is on by default
# show_legend = True
#
# log_axes = False
#
# # Default colors
#
# data_colors = map(lambda x: data_cmap(x), np.linspace(0.0, 1.0, len(data_keys)))
# model_colors = map(lambda x: model_cmap(x), np.linspace(0.0, 1.0, len(data_keys)))
#
# # Now override defaults according to the optional keywords, if present
#
# if 'show_legend' in kwargs:
# show_legend = bool(kwargs.pop('show_legend'))
#
# if 'step' in kwargs:
# step = bool(kwargs.pop('step'))
#
# if 'log_axes' in kwargs:
# log_axes = True
#
# if 'data_cmap' in kwargs:
# data_cmap = plt.get_cmap(kwargs.pop('data_cmap'))
# data_colors = map(lambda x: data_cmap(x), np.linspace(0.0, 1.0, len(data_keys)))
#
# if 'model_cmap' in kwargs:
# model_cmap = kwargs.pop('model_cmap')
# model_colors = map(lambda x: model_cmap(x), np.linspace(0.0, 1.0, len(data_keys)))
#
# if 'data_colors' in kwargs:
# data_colors = kwargs.pop('data_colors')
#
# assert len(data_colors) >= len(data_keys), "You need to provide at least a number of data colors equal to the " \
# "number of datasets"
#
# if 'model_colors' in kwargs:
# model_colors = kwargs.pop('model_colors')
#
# assert len(model_colors) >= len(
# data_keys), "You need to provide at least a number of model colors equal to the " \
# "number of datasets"
#
# fig, (ax, ax1) = plt.subplots(2, 1, sharex=True, gridspec_kw={'height_ratios': [2, 1]}, **kwargs)
#
# # go thru the detectors
# for key, data_color, model_color in zip(data_keys, data_colors, model_colors):
#
# data = analysis.data_list[key]
#
# x_min, x_max = data.histogram.absolute_start, data.histogram.absolute_stop
#
# # Observed counts
# observed_counts = data.histogram.contents
#
# if data.is_poisson:
#
# cnt_err = np.sqrt(observed_counts)
#
# elif data.has_errors:
#
# cnt_err = data.histogram.errors
#
# width = data.histogram.widths
#
# expected_model = data.get_model()
#
# mean_x = []
#
# # For each bin find the weighted average of the channel center
#
# delta_x = [[], []]
#
# for bin in data.histogram:
#
# # Find all channels in this rebinned bin
# idx = (data.histogram.mid_points >= bin.start) & (data.histogram.mid_points <= bin.stop)
#
# # Find the rates for these channels
# r = expected_model[idx]
#
# if r.max() == 0:
#
# # All empty, cannot weight
# this_mean = bin.mid_point
#
# else:
#
# # Do the weighted average of the mean energies
# weights = r / np.sum(r)
#
# this_mean = np.average(data.histogram.mid_points[idx], weights=weights)
#
# # Compute "errors" for X (which aren't really errors, just to mark the size of the bin)
#
# delta_x[0].append(this_mean - bin.start)
# delta_x[1].append(bin.stop - this_mean)
# mean_x.append(this_mean)
#
# if data.has_errors:
#
# ax.errorbar(mean_x,
# data.histogram.contents / width,
# yerr=cnt_err / width,
# xerr=delta_x,
# fmt='.',
# markersize=3,
# linestyle='',
# # elinewidth=.5,
# alpha=.9,
# capsize=0,
# label=data._name,
# color=data_color)
#
# else:
#
# ax.errorbar(mean_x,
# data.histogram.contents / width,
# xerr=delta_x,
# fmt='.',
# markersize=3,
# linestyle='',
# # elinewidth=.5,
# alpha=.9,
# capsize=0,
# label=data._name,
# color=data_color)
#
# if step:
#
# step_plot(data.histogram.bin_stack,
# expected_model / width,
# ax, alpha=.8,
# label='%s Model' % data._name, color=model_color)
#
# else:
#
# ax.plot(data.histogram.mid_points, expected_model / width, alpha=.8, label='%s Model' % data._name,
# color=model_color)
#
# if data.is_poisson:
#
# # this is not correct I believe
#
# residuals = data.histogram.contents - expected_model
#
# else:
#
# if data.has_errors:
#
# residuals = (data.histogram.contents - expected_model) / data.histogram.errors
#
# else:
#
# residuals = data.histogram.contents - expected_model
#
# ax1.axhline(0, linestyle='--', color='k')
# ax1.errorbar(mean_x,
# residuals,
# yerr=np.ones_like(residuals),
# capsize=0,
# fmt='.',
# markersize=3,
# color=data_color)
#
# if show_legend:
# ax.legend(fontsize='x-small', loc=0)
#
# ax.set_ylabel("Y")
#
# if log_axes:
# ax.set_xscale('log')
# ax.set_yscale('log', nonposy='clip')
#
# ax1.set_xscale("log")
#
# locator = MaxNLocator(prune='upper', nbins=5)
# ax1.yaxis.set_major_locator(locator)
#
# ax1.set_xlabel("X")
# ax1.set_ylabel("Residuals\n($\sigma$)")
#
# # This takes care of making space for all labels around the figure
#
# fig.tight_layout()
#
# # Now remove the space between the two subplots
# # NOTE: this must be placed *after* tight_layout, otherwise it will be ineffective
#
# fig.subplots_adjust(hspace=0)
#
# return fig
#
#
|
|
"""
@name: Modules/Families/UPB/UPB_Pim.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2011-2020 by D. Brian Kimmel
@license: MIT License
@note: Created on Mar 27, 2011
@summary: This module is for communicating with UPB controllers.
/srv/backup/home/briank/svn/smarthouse/trunk/Modules/parts/upb/usbhidserial.cpp
"""
__updated__ = '2020-02-21'
# Import system type stuff
try:
import Queue
except ImportError:
import queue as Queue
# Import PyMh files
from Modules.Families.UPB.UPB_data import UPBData
from Modules.Families.UPB.UPB_constants import pim_commands
from Modules.Core.Utilities.debug_tools import FormatBytes
from Modules.Core.Utilities.debug_tools import PrettyFormatAny
from Modules.Core import logging_pyh as Logger
LOG = Logger.getLogger('PyHouse.UPB_PIM ')
# UPB Control Word
# Page 15
LINK_PKT = 0x80
LOW_REQ = 0x02
ACK_REQ = 0x10
ACK_ID = 0x20
ACK_MSG = 0x40
# Timeouts for send/receive delays
SEND_TIMEOUT = 0.8
RECEIVE_TIMEOUT = 0.9 # this is for fetching data in the RX buffer
# Command types
CTL_T = 0x14 # transmit a UPB Message
CTL_R = 0x12 # Read PIM Registers
CTL_W = 0x17 # Write PIM Registers
class BuildCommand(object):
"""
This class will take a command bytearray and convert it to a bytearray for sending.
Write register commands:
The command for changing register 70's value to 03 is ==> 70 03.
First we add a checksum (8D in this case) to the bytearray ==> 70 03 8D.
next 70 03 8D is converted to 37 30 30 33 38 44 by converting each nibble to an ascii hex value.
Finally the command becomes 14 37 30 30 33 38 44 0D and is queued for sending 14 '70038D' 0D
"""
@staticmethod
def _nibble_to_hex(p_nibble):
"""
Take the low order nibble and convert it to a single byte that is the ASCII code for the nibble.
0x01 ==> 0x31 ('1')
@return: an int
"""
l_ret = 0x30 + p_nibble
if l_ret > 0x39:
l_ret += 0x07
l_ret = chr(l_ret)
return ord(l_ret)
@staticmethod
def _byte_to_2chars(p_byte):
"""
Take a single byte and return 2 bytes that are the ascii hex equivalent.
0x12 ==> 0x3132 ('12')
@return: a 2 byte array of ints that are ASCII encoded.
"""
l_ret = bytearray(2)
l_ret[0] = BuildCommand._nibble_to_hex(p_byte / 16)
l_ret [1] = BuildCommand._nibble_to_hex(p_byte % 16)
return l_ret
@staticmethod
def _calculate_checksum(p_byteArray):
"""Take a ByteArray of arbitrary length and return the checksum.
When added the total of the bytes should be b'\x00'
@param p_bytearray: is the byte array we will checksum.
@return: the checksum byte
"""
l_cs = 0
for l_ix in range(len(p_byteArray)):
try:
l_byte = ord(p_byteArray[l_ix])
except:
l_byte = p_byteArray[l_ix]
l_cs = (l_cs + l_byte) % 256
l_cs = 256 - l_cs
return l_cs
@staticmethod
def _append_checksum(p_byteArray):
"""Take a ByteArray of arbitrary length and return a byte array with the checksum appended to the original.
b'\x70\x03' ==> b'\x70\x03\x8D'
@return: a bytearray with the checksum byte appended
"""
l_out = bytearray(0)
for l_ix in range(len(p_byteArray)):
l_out.append(p_byteArray[l_ix])
l_out.append(BuildCommand._calculate_checksum(p_byteArray))
return l_out
@staticmethod
def _assemble_regwrite(p_reg, p_args):
"""Take the command and the args and make a ByteArray with the checksum appended
@param p_reg: is the register number where we will start writing.
@param p_args: is the one or more values that we will write into the registers
@return: the ByteArray body of the register write command
"""
l_cmd = bytearray(len(p_args) + 1)
l_cmd[0] = p_reg
for l_ix in range(len(p_args)):
l_cmd[1 + l_ix] = p_args[l_ix]
l_cmd = BuildCommand._append_checksum(l_cmd)
return l_cmd
@staticmethod
def _convert_pim(p_array):
"""Take a command ByteArray and convert it for the serial interface of the pim.
I think this means taking each nibble of the command and converting it to an ASCII byte.
"""
# return p_array
l_ret = bytearray(0)
for l_byte in p_array:
l_str = BuildCommand._byte_to_2chars(l_byte)
l_ret.append(l_str[0])
l_ret.append(l_str[1])
LOG.debug("Convert_pim - {}".format(FormatBytes(l_ret)))
return l_ret
@staticmethod
def XXX_create_packet_header(self, p_network_id, p_address):
l_ph = bytearray(5)
l_ph[0] = 0
l_ph[1] = 0
l_ph[2] = p_network_id
l_ph[3] = p_address
l_ph[4] = 0xff
return l_ph
@staticmethod
def _queue_pim_command(p_controller_obj, p_command):
l_msg = "Queue_pim_command {}".format(FormatBytes(p_command))
LOG.debug(l_msg)
p_controller_obj._Queue.put(p_command)
@staticmethod
def write_register_command(p_controller_obj, p_reg, p_args):
"""Take a starting register and one or more values and write them into the controller.
Use a 0x14 header to create the command
"""
l_cmd = BuildCommand._assemble_regwrite(p_reg, p_args)
l_cmd[1:] = BuildCommand._convert_pim(l_cmd)
l_cmd[0] = CTL_W
l_cmd.append(0x0d)
BuildCommand._queue_pim_command(p_controller_obj, l_cmd)
return l_cmd
# @staticmethod
# def write_pim_command(p_controller_obj, _p_command, _p_device_id, *p_args):
# """Send a command to some UPB device thru the controller
# """
# l_cmd = BuildCommand._assemble_regwrite(p_reg, p_args)
# l_cmd[1:] = BuildCommand._convert_pim(l_cmd)
# l_cmd[0] = CTL_T
# l_cmd.append(0x0d)
# BuildCommand._queue_pim_command(p_controller_obj, l_cmd)
# return l_cmd
# pass
class UpbPimUtility(object):
def _compose_command(self, _p_controller_obj, _p_command, _p_device_id, *p_args):
"""Build the command.
@param p_controller_obj: is the controller information.
@param p_command: is the command
@param p_device_id: Is the UPB address of the target.
@param p_args: is the data for the command
"""
l_hdr = bytearray(0 + len(p_args))
# l_hdr[0] = 0x14
for l_ix in range(len(p_args)):
l_hdr[0 + l_ix] = p_args[l_ix]
l_hdr = self._append_checksum(l_hdr)
l_msg = "Ctl:{:#02x} ".format(l_hdr[0])
LOG.debug('Compose Command - {}'.format(l_msg))
# self.queue_pim_command(p_controller_obj, l_hdr)
pass
class DecodeResponses(object):
def _get_rest(self, p_message):
l_rest = p_message[2:]
return l_rest
def _extract_one_message(self, p_controller_obj):
"""Valid messages start with a 'P' (0x50) and end with a NewLine (0x0dD).
Remove any leading Junk characters
Skip over any 0xFx characters as they are a USB HID length of data byte.
Find the next Newline - If none we do not have a command so leave things in the _Message buffer.
"""
l_start = p_controller_obj._Message.find('P')
l_end = p_controller_obj._Message.find('\r')
if l_end < 0:
return '' # Not a complete message yet.
if l_start > 0:
LOG.warning('Decoding result - discarding leading junk {}'.format(FormatBytes(p_controller_obj._Message[0:l_start])))
p_controller_obj._Message = p_controller_obj._Message[l_start:]
l_start = 0
l_end = p_controller_obj._Message.find('\r')
if l_end < 0:
return '' # Not a complete message yet.
l_message = p_controller_obj._Message[l_start:l_end]
p_controller_obj._Message = p_controller_obj._Message[l_end + 1:]
LOG.debug('Extracted message {}'.format(FormatBytes(l_message)))
return l_message
def _dispatch_decode(self, p_message):
"""
Dispatch to the various message received methods
See Page 12 of - UPB Powerline Interface Module (PIM) Description Ver 1.6
"""
l_hdr = p_message[1]
if l_hdr == 0x41: # 'A'
self._decode_A()
elif l_hdr == 0x42: # 'B'
self._decode_B()
elif l_hdr == 0x45: # 'E'
self._decode_E()
elif l_hdr == 0x4B: # 'K'
self._decode_K()
elif l_hdr == 0x4E: # 'N'
self._decode_N()
elif l_hdr == 0x52: # 'R'
self._decode_R()
elif l_hdr == 0x55: # 'U'
self._decode_U()
else:
LOG.error("UPB_Pim.decode_response() found unknown code {} {}".format(l_hdr, FormatBytes(p_message)))
def decode_response(self, p_controller_obj):
"""A response message starts with a 'P' (0x50) and ends with a '\r' (0x0D).
"""
LOG.debug('DecodeResponse A - {}'.format(FormatBytes(p_controller_obj._Message)))
l_message = self._extract_one_message(p_controller_obj)
LOG.debug('DecodeResponse B - {}'.format(FormatBytes(l_message)))
if len(l_message) < 2:
return
self._dispatch_decode(l_message)
self.decode_response(p_controller_obj)
def _decode_A(self):
LOG.error("UPB_Pim - Previous command was accepted")
def _decode_B(self):
LOG.error("UPB_Pim - Previous command was rejected because device is busy.")
def _decode_E(self):
LOG.error("UPB_Pim - Previous command was rejected with a command error.")
def _decode_K(self):
LOG.error("UPB_Pim.decode_response() found 'K' (0x4b) - ACK pulse also received.")
def _decode_N(self):
LOG.error("UPB_Pim.decode_response() found 'N' (0x4E) - No ACK pulse received from device.")
def _decode_R(self):
LOG.error("UPB_Pim.decode_response() found 'R' (0x52) - Register report received")
self._get_rest()
def _decode_U(self):
LOG.error("UPB_Pim.decode_response() found 'U' (0x55) - Message report received.")
self._get_rest()
class PimDriverInterface(DecodeResponses):
def driver_loop_start(self, p_pyhouse_obj, p_controller_obj):
LOG.info('Start driver loop')
self.m_pyhouse_obj = p_pyhouse_obj
LOG.info('Sending first command')
self.dequeue_and_send(p_controller_obj)
LOG.info('About to start RX loop')
self.receive_loop(p_controller_obj)
def XXXqueue_pim_command(self, p_controller_obj, p_command):
l_msg = "Queue_pim_command {}".format(FormatBytes(p_command))
LOG.debug(l_msg)
p_controller_obj._Queue.put(p_command)
def dequeue_and_send(self, p_controller_obj):
self.m_pyhouse_obj._Twisted.Reactor.callLater(SEND_TIMEOUT, self.dequeue_and_send, p_controller_obj)
try:
l_command = p_controller_obj._Queue.get(False)
except Queue.Empty:
return
if p_controller_obj.Interface._DriverApi != None:
LOG.debug('Sending to controller:{}, Message: {} '.format(p_controller_obj.Name, FormatBytes(l_command)))
p_controller_obj.Interface._DriverApi.Write(l_command)
def receive_loop(self, p_controller_obj):
"""Periodically, get the current RX data from the driver.
"""
self.m_pyhouse_obj._Twisted.Reactor.callLater(RECEIVE_TIMEOUT, self.receive_loop, p_controller_obj)
if p_controller_obj.Interface._DriverApi != None:
l_msg = p_controller_obj.Interface._DriverApi.Read()
if len(l_msg) == 0:
return
LOG.debug('Fetched message {}'.format(FormatBytes(l_msg)))
p_controller_obj._Message += l_msg
self.decode_response(p_controller_obj)
else:
LOG.info('No driver defined ')
class CreateCommands(UpbPimUtility, PimDriverInterface, BuildCommand):
"""
"""
def set_register_value(self, p_controller_obj, p_register, p_values):
"""Set one of the device's registers.
"""
LOG.debug("Setting register {:#0x} to value {}".format(p_register, p_values))
BuildCommand.write_register_command(p_controller_obj, p_register, p_values)
pass
def set_pim_mode(self):
"""
Set the PIM operating mode:
Page 6 of UPB Powerline Interface Module (PIM) Description Version 1.6
The PIM mode register is 0x70
Bit 0 (lsb) set to 1 is "No Idles Sent"
Bit 1 set to 1 puts the PIM into "Message Mode"
Send a write register 70 to set PIM mode
Command to be sent is <17> 70 03 8D <0D>
"""
l_val = bytearray(1)
l_val[0] = 0x03
self.set_register_value(0xFF, 0x70, l_val)
# def null_command(self, p_controller_obj):
# self.write_pim_command(p_controller_obj, pim_commands['null'], '0xFF')
# pass
class UpbPimApi(CreateCommands):
@staticmethod
def _initilaize_pim(p_controller_obj):
"""Initialize a new UPBData object.
"""
l_pim = UPBData()
l_pim.InterfaceType = p_controller_obj.InterfaceType
l_pim.Name = p_controller_obj.Name
l_pim.UPBAddress = p_controller_obj.UPBAddress
l_pim.UPBPassword = p_controller_obj.UPBPassword
l_pim.UPBNetworkID = p_controller_obj.UPBNetworkID
LOG.info('Initializing UPB PIM named: {}, Type={}'.format(l_pim.Name, l_pim.InterfaceType))
LOG.debug(PrettyFormatAny.form(l_pim, 'PIM data'))
return l_pim
def start_controller(self, p_pyhouse_obj, p_controller_obj):
"""We must now find a driver for the type of PIM we have and initialize that driver
"""
p_controller_obj._Queue = Queue.Queue(300)
LOG.info("start:{} - InterfaceType:{}".format(p_controller_obj.Name, p_controller_obj.InterfaceType))
self.m_pim = UpbPimApi._initilaize_pim(p_controller_obj)
try:
l_driver.Start(p_pyhouse_obj, p_controller_obj)
self.set_register_value(p_controller_obj, 0x70, [0x03])
except Exception as e_err:
LOG.error('Driver failed to load properly - {}'.format(e_err))
return False
return True
def get_response(self):
pass
class Api(UpbPimApi):
m_pyhouse_obj = None
m_controller_obj = None
def __init__(self, p_pyhouse_obj):
self.m_pyhouse_obj = p_pyhouse_obj
LOG.info('Initialized.')
def Start(self, p_pyhouse_obj, p_controller_obj):
self.m_pyhouse_obj = p_pyhouse_obj
self.m_controller_obj = p_controller_obj
# if not p_controller_obj.Active:
# return False
if self.start_controller(p_pyhouse_obj, p_controller_obj):
LOG.info('Starting driver loop')
self.driver_loop_start(p_pyhouse_obj, p_controller_obj)
return True
return False
def Stop(self, p_controller_obj):
pass
def Control(self, p_device_obj, p_controller_obj, p_control):
for l_obj in self.m_house_obj.Lights.values():
# if l_obj.Active == False:
# continue
l_name = p_device_obj.Name
if l_obj.Name == l_name:
l_id = self._get_id_from_name(l_name)
LOG.info('Change light {} to Level {}'.format(l_name, p_level))
self._compose_command(self.m_controller_obj, pim_commands['goto'], l_id, p_level, 0x01)
return
"""
Sent to PIM <17>70 03 8D <0D> <2005-09-24 20:58:55 75535.86>
PA <20:58:55 75535.94>
Sent to PIM <14>07 10 01 03 FF 30 B6 <0D> <2005-09-24 20:58:55 75535.94>
PA <20:58:56 75536.03>
PK <20:58:56 75536.24>
PU080001FF03864629 <20:58:56 75536.47>
Sent to PIM <14>07 10 01 02 FF 30 B7 <0D> <2005-09-24 20:58:57 75537.47>
PA <20:58:57 75537.51>
PK <20:58:57 75537.66>
PU080001FF02860070 <20:58:57 75537.86>
Sent to PIM <14>07 10 01 01 FF 30 B8 <0D> <2005-09-24 20:58:58 75538.86>
PA <20:58:58 75538.9>
PK <20:58:59 75539.05>
PU8904010001860600E5 <20:58:59 75539.29>
PU8905010001860600E4 <20:58:59 75539.45>
Sent to PIM <14>07 10 01 00 FF 30 B9 <0D> <2005-09-24 20:59:00 75540.45>
PA <20:59:00 75540.58>
PK <20:59:00 75540.63>
PU8905010001860600E4 <20:59:01 75541.04>
PU080001FF03864629 <20:59:01 75541.26>
Sent to PIM <14>87 10 01 82 FF 20 C7 <0D> <2005-09-24 20:59:21 75561.36>
PA <20:59:21 75561.48>
PK <20:59:21 75561.77>
Sent to PIM <14>07 10 01 03 FF 30 B6 <0D> <2005-09-24 20:59:31 75571.77>
PA <20:59:31 75571.83>
PK <20:59:32 75572.13>
PU080001FF0386006F <20:59:32 75572.58>
Sent to PIM <14>87 10 01 81 FF 20 C8 <0D> <2005-09-24 21:00:33 75633.89>
PA <21:00:33 75633.97>
PK <21:00:34 75634.27>
Sent to PIM <14>07 10 01 03 FF 30 B6 <0D> <2005-09-24 21:00:44 75644.27>
PA <21:00:44 75644.36>
PK <21:00:44 75644.64>
PU080001FF03864629 <21:00:45 75645>
Sent to PIM <14>87 10 01 83 FF 20 C6 <0D> <2005-09-24 21:00:53 75653.08>
PA <21:00:53 75653.16>
PK <21:00:53 75653.47>
Sent to PIM <14>87 10 01 82 FF 20 C7 <0D> <2005-09-24 21:00:53 75653.47>
PA <21:00:53 75653.58>
PK <21:00:53 75653.91>
Sent to PIM <14>07 10 01 02 FF 30 B7 <0D> <2005-09-24 21:01:03 75663.47>
PA <21:01:03 75663.56>
PK <21:01:03 75663.84>
PU080001FF02864B25 <21:01:04 75664.25>
"""
# ## END DBK
|
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
import os
import os.path
import tempfile
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../')
# Import Salt libs
import salt.loader
import salt.config
import integration
from salt.exceptions import SaltRenderError
from salt.ext.six.moves import StringIO
# Import 3rd-party libs
import salt.ext.six as six
REQUISITES = ['require', 'require_in', 'use', 'use_in', 'watch', 'watch_in']
class StateConfigRendererTestCase(TestCase):
def setUp(self):
self.root_dir = tempfile.mkdtemp(dir=integration.TMP)
self.state_tree_dir = os.path.join(self.root_dir, 'state_tree')
self.cache_dir = os.path.join(self.root_dir, 'cachedir')
if not os.path.isdir(self.root_dir):
os.makedirs(self.root_dir)
if not os.path.isdir(self.state_tree_dir):
os.makedirs(self.state_tree_dir)
if not os.path.isdir(self.cache_dir):
os.makedirs(self.cache_dir)
self.config = salt.config.minion_config(None)
self.config['root_dir'] = self.root_dir
self.config['state_events'] = False
self.config['id'] = 'match'
self.config['file_client'] = 'local'
self.config['file_roots'] = dict(base=[self.state_tree_dir])
self.config['cachedir'] = self.cache_dir
self.config['test'] = False
self._renderers = salt.loader.render(
self.config,
{'config.get': lambda a, b: False}
)
def _render_sls(self,
content,
sls='',
saltenv='base',
argline='-G yaml . jinja',
**kws):
return self._renderers['stateconf'](
StringIO(content), saltenv=saltenv, sls=sls,
argline=argline,
renderers=salt.loader.render(self.config, {}),
**kws
)
def test_state_config(self):
result = self._render_sls('''
.sls_params:
stateconf.set:
- name1: value1
- name2: value2
.extra:
stateconf:
- set
- name: value
# --- end of state config ---
test:
cmd.run:
- name: echo name1={{sls_params.name1}} name2={{sls_params.name2}} {{extra.name}}
- cwd: /
''', sls='test')
self.assertEqual(len(result), 3)
self.assertTrue('test::sls_params' in result and 'test' in result)
self.assertTrue('test::extra' in result)
self.assertEqual(result['test']['cmd.run'][0]['name'],
'echo name1=value1 name2=value2 value')
def test_sls_dir(self):
result = self._render_sls('''
test:
cmd.run:
- name: echo sls_dir={{sls_dir}}
- cwd: /
''', sls='path.to.sls')
self.assertEqual(result['test']['cmd.run'][0]['name'],
'echo sls_dir=path/to')
def test_states_declared_with_shorthand_no_args(self):
result = self._render_sls('''
test:
cmd.run:
- name: echo testing
- cwd: /
test1:
pkg.installed
test2:
user.present
''')
self.assertEqual(len(result), 3)
for args in (result['test1']['pkg.installed'],
result['test2']['user.present']):
self.assertTrue(isinstance(args, list))
self.assertEqual(len(args), 0)
self.assertEqual(result['test']['cmd.run'][0]['name'], 'echo testing')
def test_adding_state_name_arg_for_dot_state_id(self):
result = self._render_sls('''
.test:
pkg.installed:
- cwd: /
.test2:
pkg.installed:
- name: vim
''', sls='test')
self.assertEqual(
result['test::test']['pkg.installed'][0]['name'], 'test'
)
self.assertEqual(
result['test::test2']['pkg.installed'][0]['name'], 'vim'
)
def test_state_prefix(self):
result = self._render_sls('''
.test:
cmd.run:
- name: echo renamed
- cwd: /
state_id:
cmd:
- run
- name: echo not renamed
- cwd: /
''', sls='test')
self.assertEqual(len(result), 2)
self.assertTrue('test::test' in result)
self.assertTrue('state_id' in result)
def test_dot_state_id_in_requisites(self):
for req in REQUISITES:
result = self._render_sls('''
.test:
cmd.run:
- name: echo renamed
- cwd: /
state_id:
cmd.run:
- name: echo not renamed
- cwd: /
- {0}:
- cmd: .test
'''.format(req), sls='test')
self.assertEqual(len(result), 2)
self.assertTrue('test::test' in result)
self.assertTrue('state_id' in result)
self.assertEqual(
result['state_id']['cmd.run'][2][req][0]['cmd'], 'test::test'
)
def test_relative_include_with_requisites(self):
for req in REQUISITES:
result = self._render_sls('''
include:
- some.helper
- .utils
state_id:
cmd.run:
- name: echo test
- cwd: /
- {0}:
- cmd: .utils::some_state
'''.format(req), sls='test.work')
self.assertEqual(result['include'][1], {'base': 'test.utils'})
self.assertEqual(
result['state_id']['cmd.run'][2][req][0]['cmd'],
'test.utils::some_state'
)
def test_relative_include_and_extend(self):
result = self._render_sls('''
include:
- some.helper
- .utils
extend:
.utils::some_state:
cmd.run:
- name: echo overridden
''', sls='test.work')
self.assertTrue('test.utils::some_state' in result['extend'])
def test_multilevel_relative_include_with_requisites(self):
for req in REQUISITES:
result = self._render_sls('''
include:
- .shared
- ..utils
- ...helper
state_id:
cmd.run:
- name: echo test
- cwd: /
- {0}:
- cmd: ..utils::some_state
'''.format(req), sls='test.nested.work')
self.assertEqual(result['include'][0],
{'base': 'test.nested.shared'})
self.assertEqual(result['include'][1], {'base': 'test.utils'})
self.assertEqual(result['include'][2], {'base': 'helper'})
self.assertEqual(
result['state_id']['cmd.run'][2][req][0]['cmd'],
'test.utils::some_state'
)
def test_multilevel_relative_include_beyond_top_level(self):
self.assertRaises(SaltRenderError, self._render_sls, '''
include:
- ...shared
''', sls='test.work')
def test_start_state_generation(self):
result = self._render_sls('''
A:
cmd.run:
- name: echo hello
- cwd: /
B:
cmd.run:
- name: echo world
- cwd: /
''', sls='test', argline='-so yaml . jinja')
self.assertEqual(len(result), 4)
self.assertEqual(
result['test::start']['stateconf.set'][0]['require_in'][0]['cmd'],
'A'
)
def test_goal_state_generation(self):
result = self._render_sls('''
{% for sid in "ABCDE": %}
{{sid}}:
cmd.run:
- name: echo this is {{sid}}
- cwd: /
{% endfor %}
''', sls='test.goalstate', argline='yaml . jinja')
self.assertEqual(len(result), len('ABCDE') + 1)
reqs = result['test.goalstate::goal']['stateconf.set'][0]['require']
self.assertEqual(
set([next(six.itervalues(i)) for i in reqs]), set('ABCDE')
)
def test_implicit_require_with_goal_state(self):
result = self._render_sls('''
{% for sid in "ABCDE": %}
{{sid}}:
cmd.run:
- name: echo this is {{sid}}
- cwd: /
{% endfor %}
F:
cmd.run:
- name: echo this is F
- cwd: /
- require:
- cmd: A
- cmd: B
G:
cmd.run:
- name: echo this is G
- cwd: /
- require:
- cmd: D
- cmd: F
''', sls='test', argline='-o yaml . jinja')
sids = 'ABCDEFG'[::-1]
for i, sid in enumerate(sids):
if i < len(sids) - 1:
self.assertEqual(
result[sid]['cmd.run'][2]['require'][0]['cmd'],
sids[i + 1]
)
F_args = result['F']['cmd.run']
self.assertEqual(len(F_args), 3)
F_req = F_args[2]['require']
self.assertEqual(len(F_req), 3)
self.assertEqual(F_req[1]['cmd'], 'A')
self.assertEqual(F_req[2]['cmd'], 'B')
G_args = result['G']['cmd.run']
self.assertEqual(len(G_args), 3)
G_req = G_args[2]['require']
self.assertEqual(len(G_req), 3)
self.assertEqual(G_req[1]['cmd'], 'D')
self.assertEqual(G_req[2]['cmd'], 'F')
goal_args = result['test::goal']['stateconf.set']
self.assertEqual(len(goal_args), 1)
self.assertEqual(
[next(six.itervalues(i)) for i in goal_args[0]['require']],
list('ABCDEFG')
)
def test_slsdir(self):
result = self._render_sls('''
formula/woot.sls:
cmd.run:
- name: echo {{ slspath }}
- cwd: /
''', sls='formula.woot', argline='yaml . jinja')
r = result['formula/woot.sls']['cmd.run'][0]['name']
self.assertEqual(r, 'echo formula/woot')
if __name__ == '__main__':
from integration import run_tests
run_tests(StateConfigRendererTestCase, needs_daemon=False)
|
|
# Copyright (c) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six.moves.urllib.parse as urlparse
from keystoneauth1 import discover
from keystoneauth1 import exceptions as ka_exc
from keystoneauth1.identity import v2 as v2_auth
from keystoneauth1.identity import v3 as v3_auth
from keystoneauth1 import session
from zaqarclient.auth import base
from zaqarclient import errors
# NOTE(flaper87): Some of the code below
# was brought to you by the very unique
# work of ceilometerclient and glanceclient.
class KeystoneAuth(base.AuthBackend):
"""Keystone Auth backend
:params conf: A dictionary with Keystone's
custom parameters:
- os_username
- os_password
- os_project_id
- os_project_name
- os_auth_url
- os_auth_token
- os_region_name
- os_service_type
- os_endpoint_type
:type conf: `dict`
"""
def _get_keystone_session(self, **kwargs):
cacert = kwargs.pop('cacert', None)
cert = kwargs.pop('cert', None)
key = kwargs.pop('key', None)
insecure = kwargs.pop('insecure', False)
auth_url = kwargs.pop('auth_url', None)
project_id = kwargs.pop('project_id', None)
project_name = kwargs.pop('project_name', None)
token = kwargs.get('token')
if insecure:
verify = False
else:
verify = cacert or True
if cert and key:
# passing cert and key together is deprecated in favour of the
# requests lib form of having the cert and key as a tuple
cert = (cert, key)
# create the keystone client session
ks_session = session.Session(verify=verify, cert=cert)
v2_auth_url, v3_auth_url = self._discover_auth_versions(ks_session,
auth_url)
username = kwargs.pop('username', None)
user_id = kwargs.pop('user_id', None)
user_domain_name = kwargs.pop('user_domain_name', None)
user_domain_id = kwargs.pop('user_domain_id', None)
project_domain_name = kwargs.pop('project_domain_name', None)
project_domain_id = kwargs.pop('project_domain_id', None)
auth = None
use_domain = (user_domain_id or user_domain_name or
project_domain_id or project_domain_name)
use_v3 = v3_auth_url and (use_domain or (not v2_auth_url))
use_v2 = v2_auth_url and not use_domain
if use_v3 and token:
auth = v3_auth.Token(
v3_auth_url,
token=token,
project_name=project_name,
project_id=project_id,
project_domain_name=project_domain_name,
project_domain_id=project_domain_id)
elif use_v2 and token:
auth = v2_auth.Token(
v2_auth_url,
token=token,
tenant_id=project_id,
tenant_name=project_name)
elif use_v3:
# The auth_url as v3 specified
# e.g. http://no.where:5000/v3
# Keystone will return only v3 as viable option
auth = v3_auth.Password(
v3_auth_url,
username=username,
password=kwargs.pop('password', None),
user_id=user_id,
user_domain_name=user_domain_name,
user_domain_id=user_domain_id,
project_name=project_name,
project_id=project_id,
project_domain_name=project_domain_name,
project_domain_id=project_domain_id)
elif use_v2:
# The auth_url as v2 specified
# e.g. http://no.where:5000/v2.0
# Keystone will return only v2 as viable option
auth = v2_auth.Password(
v2_auth_url,
username,
kwargs.pop('password', None),
tenant_id=project_id,
tenant_name=project_name)
else:
raise errors.ZaqarError('Unable to determine the Keystone version '
'to authenticate with using the given '
'auth_url.')
ks_session.auth = auth
return ks_session
def _discover_auth_versions(self, session, auth_url):
# Discover the API versions the server is supporting based on the
# given URL
v2_auth_url = None
v3_auth_url = None
try:
ks_discover = discover.Discover(session=session, url=auth_url)
v2_auth_url = ks_discover.url_for('2.0')
v3_auth_url = ks_discover.url_for('3.0')
except ka_exc.DiscoveryFailure:
raise
except ka_exc.ClientException:
# Identity service may not support discovery. In that case,
# try to determine version from auth_url
url_parts = urlparse.urlparse(auth_url)
(scheme, netloc, path, params, query, fragment) = url_parts
path = path.lower()
if path.startswith('/v3'):
v3_auth_url = auth_url
elif path.startswith('/v2'):
v2_auth_url = auth_url
else:
raise errors.ZaqarError('Unable to determine the Keystone '
'version to authenticate with '
'using the given auth_url.')
return v2_auth_url, v3_auth_url
def _get_endpoint(self, ks_session, **kwargs):
"""Get an endpoint using the provided keystone session."""
# Set service specific endpoint types
endpoint_type = kwargs.get('endpoint_type') or 'publicURL'
service_type = kwargs.get('service_type') or 'messaging'
region_name = kwargs.get('region_name')
endpoint = ks_session.get_endpoint(service_type=service_type,
interface=endpoint_type,
region_name=region_name)
return endpoint
def authenticate(self, api_version, request):
"""Get an authtenticated client using credentials in the keyword args.
:param api_version: the API version to use ('1' or '2')
:param request: The request spec instance to modify with
the auth information.
"""
def get_options(k):
return self.conf.get(k, self.conf.get("os_%s" % k))
token = get_options('auth_token')
if not token or not request.endpoint:
ks_kwargs = {}
keys = ("username", "password", "project_id",
"project_name", "auth_url", "insecure",
"cacert", "region_name", "user_domain_name",
"user_domain_id", "project_domain_name",
"project_domain_id")
for k in keys:
ks_kwargs.update({k: get_options(k)})
ks_session = (request.session or
self._get_keystone_session(**ks_kwargs))
if not token:
token = ks_session.get_token()
if not request.endpoint:
request.endpoint = self._get_endpoint(ks_session, **ks_kwargs)
# NOTE(flaper87): Update the request spec
# with the final token.
request.headers['X-Auth-Token'] = token
# NOTE(flwang): We also need to apply the insecure and cacert when
# talking with Zaqar server.
request.verify = not get_options('insecure')
request.cert = get_options('cacert')
return request
|
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
r"""
Source: `pytorch imagenet example <https://github.com/pytorch/examples/blob/master/imagenet/main.py>`_ # noqa B950
Modified and simplified to make the original pytorch example compatible with
torchelastic.distributed.launch.
Changes:
1. Removed ``rank``, ``gpu``, ``multiprocessing-distributed``, ``dist_url`` options.
These are obsolete parameters when using ``torchelastic.distributed.launch``.
2. Removed ``seed``, ``evaluate``, ``pretrained`` options for simplicity.
3. Removed ``resume``, ``start-epoch`` options.
Loads the most recent checkpoint by default.
4. ``batch-size`` is now per GPU (worker) batch size rather than for all GPUs.
5. Defaults ``workers`` (num data loader workers) to ``0``.
Usage
::
>>> python -m torchelastic.distributed.launch
--nnodes=$NUM_NODES
--nproc_per_node=$WORKERS_PER_NODE
--rdzv_id=$JOB_ID
--rdzv_backend=etcd
--rdzv_endpoint=$ETCD_HOST:$ETCD_PORT
main.py
--arch resnet18
--epochs 20
--batch-size 32
<DATA_DIR>
"""
import argparse
import io
import os
import shutil
import time
from contextlib import contextmanager
from datetime import timedelta
from typing import List, Tuple
import numpy
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.datasets as datasets
import torchvision.models as models
import torchvision.transforms as transforms
from torch.distributed.elastic.utils.data import ElasticDistributedSampler
from torch.distributed.elastic.multiprocessing.errors import record
from torch.nn.parallel import DistributedDataParallel
from torch.optim import SGD
from torch.utils.data import DataLoader
model_names = sorted(
name
for name in models.__dict__
if name.islower() and not name.startswith("__") and callable(models.__dict__[name])
)
parser = argparse.ArgumentParser(description="PyTorch Elastic ImageNet Training")
parser.add_argument("data", metavar="DIR", help="path to dataset")
parser.add_argument(
"-a",
"--arch",
metavar="ARCH",
default="resnet18",
choices=model_names,
help="model architecture: " + " | ".join(model_names) + " (default: resnet18)",
)
parser.add_argument(
"-j",
"--workers",
default=0,
type=int,
metavar="N",
help="number of data loading workers",
)
parser.add_argument(
"--epochs", default=90, type=int, metavar="N", help="number of total epochs to run"
)
parser.add_argument(
"-b",
"--batch-size",
default=32,
type=int,
metavar="N",
help="mini-batch size (default: 32), per worker (GPU)",
)
parser.add_argument(
"--lr",
"--learning-rate",
default=0.1,
type=float,
metavar="LR",
help="initial learning rate",
dest="lr",
)
parser.add_argument("--momentum", default=0.9, type=float, metavar="M", help="momentum")
parser.add_argument(
"--wd",
"--weight-decay",
default=1e-4,
type=float,
metavar="W",
help="weight decay (default: 1e-4)",
dest="weight_decay",
)
parser.add_argument(
"-p",
"--print-freq",
default=10,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument(
"--dist-backend",
default="gloo",
choices=["nccl", "gloo"],
type=str,
help="distributed backend",
)
parser.add_argument(
"--checkpoint-file",
default="/tmp/checkpoint.pth.tar",
type=str,
help="checkpoint file path, to load and save to",
)
@record
def main():
args = parser.parse_args()
device = torch.device("cpu")
dist.init_process_group(
backend=args.dist_backend, init_method="env://", timeout=timedelta(seconds=10)
)
model, criterion, optimizer = initialize_model(
args.arch, args.lr, args.momentum, args.weight_decay, device
)
train_loader, val_loader = initialize_data_loader(
args.data, args.batch_size, args.workers
)
# resume from checkpoint if one exists;
state = load_checkpoint(
args.checkpoint_file, args.arch, model, optimizer
)
start_epoch = state.epoch + 1
print(f"=> start_epoch: {start_epoch}, best_acc1: {state.best_acc1}")
print_freq = args.print_freq
for epoch in range(start_epoch, args.epochs):
state.epoch = epoch
train_loader.batch_sampler.sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args.lr)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, print_freq)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, print_freq)
# remember best acc@1 and save checkpoint
is_best = acc1 > state.best_acc1
state.best_acc1 = max(acc1, state.best_acc1)
save_checkpoint(state, is_best, args.checkpoint_file)
class State:
"""
Container for objects that we want to checkpoint. Represents the
current "state" of the worker. This object is mutable.
"""
def __init__(self, arch, model, optimizer):
self.epoch = -1
self.best_acc1 = 0
self.arch = arch
self.model = model
self.optimizer = optimizer
def capture_snapshot(self):
"""
Essentially a ``serialize()`` function, returns the state as an
object compatible with ``torch.save()``. The following should work
::
snapshot = state_0.capture_snapshot()
state_1.apply_snapshot(snapshot)
assert state_0 == state_1
"""
return {
"epoch": self.epoch,
"best_acc1": self.best_acc1,
"arch": self.arch,
"state_dict": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
}
def apply_snapshot(self, obj):
"""
The complimentary function of ``capture_snapshot()``. Applies the
snapshot object that was returned by ``capture_snapshot()``.
This function mutates this state object.
"""
self.epoch = obj["epoch"]
self.best_acc1 = obj["best_acc1"]
self.state_dict = obj["state_dict"]
self.model.load_state_dict(obj["state_dict"])
self.optimizer.load_state_dict(obj["optimizer"])
def save(self, f):
torch.save(self.capture_snapshot(), f)
def load(self, f):
# Map model to be loaded to specified single gpu.
snapshot = torch.load(f)
self.apply_snapshot(snapshot)
def initialize_model(
arch: str, lr: float, momentum: float, weight_decay: float, device
):
print(f"=> creating model: {arch}")
model = models.__dict__[arch]()
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
model.to(device)
model = nn.parallel.DistributedDataParallel(model)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = SGD(
model.parameters(), lr, momentum=momentum, weight_decay=weight_decay
)
return model, criterion, optimizer
def initialize_data_loader(
data_dir, batch_size, num_data_workers
) -> Tuple[DataLoader, DataLoader]:
traindir = os.path.join(data_dir, "train")
valdir = os.path.join(data_dir, "val")
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose(
[
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
),
)
train_sampler = ElasticDistributedSampler(train_dataset)
train_loader = DataLoader(
train_dataset,
batch_size=batch_size,
num_workers=num_data_workers,
# pin_memory=True,
sampler=train_sampler,
)
val_loader = DataLoader(
datasets.ImageFolder(
valdir,
transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]
),
),
batch_size=batch_size,
shuffle=False,
num_workers=num_data_workers,
# pin_memory=True,
)
return train_loader, val_loader
def load_checkpoint(
checkpoint_file: str,
arch: str,
model: DistributedDataParallel,
optimizer, # SGD
) -> State:
"""
Loads a local checkpoint (if any). Otherwise, checks to see if any of
the neighbors have a non-zero state. If so, restore the state
from the rank that has the most up-to-date checkpoint.
.. note:: when your job has access to a globally visible persistent storage
(e.g. nfs mount, S3) you can simply have all workers load
from the most recent checkpoint from such storage. Since this
example is expected to run on vanilla hosts (with no shared
storage) the checkpoints are written to local disk, hence
we have the extra logic to broadcast the checkpoint from a
surviving node.
"""
state = State(arch, model, optimizer)
if os.path.isfile(checkpoint_file):
print(f"=> loading checkpoint file: {checkpoint_file}")
state.load(checkpoint_file)
print(f"=> loaded checkpoint file: {checkpoint_file}")
# logic below is unnecessary when the checkpoint is visible on all nodes!
# create a temporary cpu pg to broadcast most up-to-date checkpoint
with tmp_process_group(backend="gloo") as pg:
rank = dist.get_rank(group=pg)
# get rank that has the largest state.epoch
epochs = torch.zeros(dist.get_world_size(), dtype=torch.int32)
epochs[rank] = state.epoch
dist.all_reduce(epochs, op=dist.ReduceOp.SUM, group=pg)
t_max_epoch, t_max_rank = torch.max(epochs, dim=0)
max_epoch = t_max_epoch.item()
max_rank = t_max_rank.item()
# max_epoch == -1 means no one has checkpointed return base state
if max_epoch == -1:
print(f"=> no workers have checkpoints, starting from epoch 0")
return state
# broadcast the state from max_rank (which has the most up-to-date state)
# pickle the snapshot, convert it into a byte-blob tensor
# then broadcast it, unpickle it and apply the snapshot
print(f"=> using checkpoint from rank: {max_rank}, max_epoch: {max_epoch}")
with io.BytesIO() as f:
torch.save(state.capture_snapshot(), f)
raw_blob = numpy.frombuffer(f.getvalue(), dtype=numpy.uint8)
blob_len = torch.tensor(len(raw_blob))
dist.broadcast(blob_len, src=max_rank, group=pg)
print(f"=> checkpoint broadcast size is: {blob_len}")
if rank != max_rank:
blob = torch.zeros(blob_len.item(), dtype=torch.uint8)
else:
blob = torch.as_tensor(raw_blob, dtype=torch.uint8)
dist.broadcast(blob, src=max_rank, group=pg)
print(f"=> done broadcasting checkpoint")
if rank != max_rank:
with io.BytesIO(blob.numpy()) as f:
snapshot = torch.load(f)
state.apply_snapshot(snapshot)
# wait till everyone has loaded the checkpoint
dist.barrier(group=pg)
print(f"=> done restoring from previous checkpoint")
return state
@contextmanager
def tmp_process_group(backend):
cpu_pg = dist.new_group(backend=backend)
try:
yield cpu_pg
finally:
dist.destroy_process_group(cpu_pg)
def save_checkpoint(state: State, is_best: bool, filename: str):
checkpoint_dir = os.path.dirname(filename)
os.makedirs(checkpoint_dir, exist_ok=True)
# save to tmp, then commit by moving the file in case the job
# gets interrupted while writing the checkpoint
tmp_filename = filename + ".tmp"
torch.save(state.capture_snapshot(), tmp_filename)
os.rename(tmp_filename, filename)
print(f"=> saved checkpoint for epoch {state.epoch} at {filename}")
if is_best:
best = os.path.join(checkpoint_dir, "model_best.pth.tar")
print(f"=> best model found at epoch {state.epoch} saving to {best}")
shutil.copyfile(filename, best)
def train(
train_loader: DataLoader,
model: DistributedDataParallel,
criterion, # nn.CrossEntropyLoss
optimizer, # SGD,
epoch: int,
print_freq: int,
):
batch_time = AverageMeter("Time", ":6.3f")
data_time = AverageMeter("Data", ":6.3f")
losses = AverageMeter("Loss", ":.4e")
top1 = AverageMeter("Acc@1", ":6.2f")
top5 = AverageMeter("Acc@5", ":6.2f")
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch),
)
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % print_freq == 0:
progress.display(i)
def validate(
val_loader: DataLoader,
model: DistributedDataParallel,
criterion, # nn.CrossEntropyLoss
print_freq: int,
):
batch_time = AverageMeter("Time", ":6.3f")
losses = AverageMeter("Loss", ":.4e")
top1 = AverageMeter("Acc@1", ":6.2f")
top5 = AverageMeter("Acc@5", ":6.2f")
progress = ProgressMeter(
len(val_loader), [batch_time, losses, top1, top5], prefix="Test: "
)
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
print(
" * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}".format(top1=top1, top5=top5)
)
return top1.avg
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name: str, fmt: str = ":f"):
self.name = name
self.fmt = fmt
self.reset()
def reset(self) -> None:
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1) -> None:
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches: int, meters: List[AverageMeter], prefix: str = ""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch: int) -> None:
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print("\t".join(entries))
def _get_batch_fmtstr(self, num_batches: int) -> str:
num_digits = len(str(num_batches // 1))
fmt = "{:" + str(num_digits) + "d}"
return "[" + fmt + "/" + fmt.format(num_batches) + "]"
def adjust_learning_rate(optimizer, epoch: int, lr: float) -> None:
"""
Sets the learning rate to the initial LR decayed by 10 every 30 epochs
"""
learning_rate = lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group["lr"] = learning_rate
def accuracy(output, target, topk=(1,)):
"""
Computes the accuracy over the k top predictions for the specified values of k
"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(1, -1).view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == "__main__":
main()
|
|
import random
from .. import common
from .. import term
VERSION = '0.0.1'
# Constants for the current possession.
HOME = 0
AWAY = 1
# Constants for the possible plays.
PLAY_30 = 1
PLAY_15 = 2
PLAY_LAYUP = 3
PLAY_SET = 4
# Constants for the possible defenses.
DEFENSE_PRESS = 1
DEFENSE_MAN_TO_MAN = 2
DEFENSE_ZONE = 3
DEFENSE_NONE = 4
def AddPoints(which, how_many):
score[which] += how_many
PrintScore()
def AwayJump():
term.WriteLn(term.BOLD_WHITE, 'Jump shot!')
if (random.random() * 16.0 / (defense + 11)) <= 0.35:
term.WriteLn(term.BOLD_BLUE, 'Shot is good.')
AddPoints(AWAY, 2)
return HOME
else:
return AwayJumpMiss()
def AwayJumpFoul():
if (random.random() * 16.0 / (defense + 11)) <= 0.90:
term.WriteLn('Player fouled. Two shots.')
DoFoul(AWAY)
else:
term.WriteLn(term.BOLD_RED, "Offensive foul. IU's ball.")
return HOME
def AwayJumpMiss():
if (random.random() * 16.0 / (defense + 11)) <= 0.75:
term.WriteLn('Shot is off rim.')
return AwayJumpRebound()
else:
return AwayJumpFoul()
def AwayJumpRebound():
if (random.random() * (defense + 11) / 12.0) <= 0.50:
term.WriteLn(term.BOLD_RED, 'IU controls the rebound.')
return HOME
else:
term.WriteLn(term.BOLD_BLUE, opponent, ' controls the rebound.',
term.BOLD_WHITE)
return AwayJumpSteal()
def AwayJumpSteal():
if (defense == DEFENSE_PRESS) and (random.random() > 0.75):
term.WriteLn(term.BOLD_RED, 'Ball stolen. Easy lay-up for IU!')
AddPoints(HOME, 2)
return AWAY
elif random.random() <= 0.50:
term.WriteLn('Pass back to ', opponent, ' guard.')
return AWAY
else:
return AwayLayup(PLAY_LAYUP)
def AwayLayup(play):
if play > PLAY_LAYUP:
term.WriteLn(term.BOLD_WHITE, 'Set shot!')
else:
term.WriteLn(term.BOLD_WHITE, 'Lay-up!')
if (random.random() * 14.0 / (defense + 11)) <= 0.413:
term.WriteLn(term.BOLD_BLUE, 'Shot is good.')
AddPoints(AWAY, 2)
return HOME
else:
return AwayLayupMiss()
def AwayLayupMiss():
term.WriteLn('Shot missed.')
return AwayJumpRebound()
def CheckHalftime(time):
if time == 50:
term.WriteLn(term.BOLD_WHITE)
term.WriteLn('*** END OF FIRST HALF ***')
PrintScore()
raise Exception
return time
def CheckOvertime(time):
if (time >= 100) and (score[HOME] == score[AWAY]):
term.WriteLn(term.BOLD_WHITE)
term.WriteLn('*** END OF SECOND HALF ***')
PrintScore()
term.WriteLn(term.BOLD_WHITE, 'Two-minute overtime!')
term.WriteLn()
time = 93
return time
def CheckWarning(time):
if time == 92:
term.WriteLn(term.BOLD_WHITE)
term.WriteLn('Two minutes left in the game!')
term.WriteLn()
return time
def DoFoul(which):
if random.random() <= 0.49:
term.WriteLn('Shooter makes both shots.')
AddPoints(which, 2)
elif random.random() <= 0.75:
term.WriteLn('Shooter makes one shot and misses one.')
AddPoints(which, 1)
else:
term.WriteLn('Both shots missed.')
PrintScore()
def GetDefense():
term.WriteLn(term.BOLD_WHITE)
term.WriteLn('Select a defense:')
term.WriteLn(term.BOLD_YELLOW, ' (1) ', term.RESET, 'Press')
term.WriteLn(term.BOLD_YELLOW, ' (2) ', term.RESET, 'Man-to-Man')
term.WriteLn(term.BOLD_YELLOW, ' (3) ', term.RESET, 'Zone')
term.WriteLn(term.BOLD_YELLOW, ' (4) ', term.RESET, 'None')
term.WriteLn()
return common.InputInt('Your choice?', 1, 4)
def GetPlay():
term.WriteLn(term.BOLD_WHITE)
term.WriteLn('Select a play:')
term.WriteLn(term.BOLD_YELLOW, ' (1) ', term.RESET, "Long Jump Shot (30')")
term.WriteLn(term.BOLD_YELLOW, ' (2) ', term.RESET, "Short Jump Shot (15')")
term.WriteLn(term.BOLD_YELLOW, ' (3) ', term.RESET, 'Lay Up')
term.WriteLn(term.BOLD_YELLOW, ' (4) ', term.RESET, 'Set Shot')
term.WriteLn(term.BOLD_YELLOW, ' (0) ', term.RESET, 'Change Defense')
term.WriteLn()
result = common.InputInt('Your choice?', 0, 4)
if result == 0:
defense = GetDefense()
return GetPlay()
term.WriteLn()
return result
def GetOpponent():
term.WriteLn()
return common.Input('And who will be your opponent today?')
def HomeJump():
Tick()
term.WriteLn(term.BOLD_WHITE, 'Jump shot!')
if random.random() <= (0.341 * (defense + 11) / 16.0):
print term.WriteLn(term.BOLD_RED, 'Shot is good!')
AddPoints(HOME, 2)
return AWAY
else:
return HomeJumpMiss()
def HomeJumpBlock():
if random.random() <= (0.782 * (defense + 11) / 16.0):
term.WriteLn('Shot is blocked!')
if random.random() <= 0.50:
term.WriteLn(term.BOLD_BLUE, 'Ball controlled by ', opponent, '.')
return AWAY
else:
term.WriteLn(term.BOLD_RED, 'Ball controlled by IU.')
return HOME
else:
return HomeJumpFoul()
def HomeJumpFoul():
if random.random() <= (0.843 * (defense + 11) / 16.0):
term.WriteLn('Shooter is fouled. Two shots.')
DoFoul(HOME)
else:
term.WriteLn('Charging foul. IU loses the ball.')
return AWAY
def HomeJumpMiss():
if random.random() <= (0.682 * (defense + 11) / 16.0):
term.WriteLn(term.BOLD_WHITE, 'Shot is off-target.')
return HomeJumpRebound()
else:
return HomeJumpBlock()
def HomeJumpRebound():
if ((defense + 11) / 12.0 * random.random()) <= 0.45:
term.WriteLn(term.BOLD_BLUE, 'Rebound to ', opponent, '...')
return AWAY
else:
term.WriteLn(term.BOLD_RED, 'IU controls the rebound!', term.BOLD_WHITE)
if random.random() <= 0.40:
return HomeLayup(PLAY_LAYUP)
else:
return HomeJumpSteal()
def HomeJumpSteal():
if (defense == DEFENSE_PRESS) and (random.random() > 0.6):
term.WriteLn(term.BOLD_BLUE,
'Pass stolen by ', opponent, ' -- easy lay-up!')
AddPoints(AWAY, 2)
else:
term.WriteLn('Ball passed back to you.')
return HOME
def HomeLayup(play):
Tick()
if play == PLAY_SET:
term.WriteLn(term.BOLD_WHITE, 'Set shot!')
else:
term.WriteLn(term.BOLD_WHITE, 'Lay-up!')
if (random.random() * 14.0 / (defense + 11)) <= 0.40:
term.WriteLn(term.BOLD_RED, 'Shot is good! Two points.')
AddPoints(HOME, 2)
return AWAY
else:
return HomeLayupMiss()
def HomeLayupBlock():
if (random.random() * 14.0 / (defense + 11)) <= 0.925:
term.WriteLn(term.BOLD_BLUE, 'Shot blocked. ', opponent, "'s ball.")
else:
term.WriteLn(term.BOLD_BLUE, 'Charging foul. IU loses the ball.')
return AWAY
def HomeLayupFoul():
if (random.random() * 14.0 / (defense + 11)) <= 0.875:
term.WriteLn('Shooter fouled. Two shots.')
DoFoul(HOME)
return AWAY
else:
return HomeLayupBlock()
def HomeLayupMiss():
if (random.random() * 14.0 / (defense + 11)) <= 0.70:
term.WriteLn('Shot is off the rim.')
return HomeLayupRebound()
else:
return HomeLayupFoul()
def HomeLayupRebound():
if random.random() <= 0.66:
term.WriteLn(term.BOLD_BLUE, opponent, ' controls the rebound.')
return AWAY
else:
term.WriteLn(term.BOLD_RED, 'IU controls the rebound.', term.BOLD_WHITE)
if random.random() <= 0.40:
return HomeLayup(PLAY_LAYUP)
else:
term.WriteLn('Ball passed back to you.')
return HOME
def JumpBall():
term.WriteLn()
if random.random() <= 0.6:
term.WriteLn(term.BOLD_BLUE, opponent, ' controls the tap.')
return AWAY
else:
term.WriteLn(term.BOLD_RED, 'IU controls the tap!')
return HOME
def PrintScore():
term.WriteLn(term.RESET)
term.Write('Score: ')
term.Write(term.BOLD_RED, 'IU ', score[HOME], ' ')
term.WriteLn(term.BOLD_BLUE, opponent, ' ', score[AWAY])
def Tick():
global time
time += 1
time = CheckHalftime(time)
time = CheckWarning(time)
time = CheckOvertime(time)
def Instructions():
print "This is a (very loose) simulation of college basketball. You will"
print "play the role of Indiana University's team captain and call the plays."
print
print "Both teams will always use the same defense. If you want to change"
print "your defensive strategy during the game, just select '0' for your shot."
print
#------------------------------------------------------------------------
def Run():
common.Hello('Basketball', VERSION)
Instructions()
global defense, opponent, time, score
defense = GetDefense()
opponent = GetOpponent()
time = 0
score = [ 0, 0 ]
ball = JumpBall()
while time < 100:
try:
if ball == HOME:
term.WriteLn(term.BOLD_RED, 'IU has the ball.', term.RESET)
play = GetPlay()
if (play == PLAY_15) or (play == PLAY_30):
ball = HomeJump()
else:
ball = HomeLayup(play)
else:
term.WriteLn(term.BOLD_BLUE, opponent, ' has the ball.', term.RESET)
Tick()
play = (2.5 * random.random()) + 1
if play <= PLAY_30:
ball = AwayJump()
else:
ball = AwayLayup(play)
except Exception:
ball = JumpBall()
term.WriteLn(term.BOLD_WHITE)
term.WriteLn('*** GAME OVER ***')
PrintScore()
|
|
# coding: utf-8
"""
Wavefront REST API Documentation
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: chitimba@wavefront.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from wavefront_api_client.configuration import Configuration
class DashboardParameterValue(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'allow_all': 'bool',
'default_value': 'str',
'description': 'str',
'dynamic_field_type': 'str',
'hide_from_view': 'bool',
'label': 'str',
'multivalue': 'bool',
'order': 'int',
'parameter_type': 'str',
'query_value': 'str',
'reverse_dyn_sort': 'bool',
'tag_key': 'str',
'tags_black_list_regex': 'str',
'value_ordering': 'list[str]',
'values_to_readable_strings': 'dict(str, str)'
}
attribute_map = {
'allow_all': 'allowAll',
'default_value': 'defaultValue',
'description': 'description',
'dynamic_field_type': 'dynamicFieldType',
'hide_from_view': 'hideFromView',
'label': 'label',
'multivalue': 'multivalue',
'order': 'order',
'parameter_type': 'parameterType',
'query_value': 'queryValue',
'reverse_dyn_sort': 'reverseDynSort',
'tag_key': 'tagKey',
'tags_black_list_regex': 'tagsBlackListRegex',
'value_ordering': 'valueOrdering',
'values_to_readable_strings': 'valuesToReadableStrings'
}
def __init__(self, allow_all=None, default_value=None, description=None, dynamic_field_type=None, hide_from_view=None, label=None, multivalue=None, order=None, parameter_type=None, query_value=None, reverse_dyn_sort=None, tag_key=None, tags_black_list_regex=None, value_ordering=None, values_to_readable_strings=None, _configuration=None): # noqa: E501
"""DashboardParameterValue - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._allow_all = None
self._default_value = None
self._description = None
self._dynamic_field_type = None
self._hide_from_view = None
self._label = None
self._multivalue = None
self._order = None
self._parameter_type = None
self._query_value = None
self._reverse_dyn_sort = None
self._tag_key = None
self._tags_black_list_regex = None
self._value_ordering = None
self._values_to_readable_strings = None
self.discriminator = None
if allow_all is not None:
self.allow_all = allow_all
if default_value is not None:
self.default_value = default_value
if description is not None:
self.description = description
if dynamic_field_type is not None:
self.dynamic_field_type = dynamic_field_type
if hide_from_view is not None:
self.hide_from_view = hide_from_view
if label is not None:
self.label = label
if multivalue is not None:
self.multivalue = multivalue
if order is not None:
self.order = order
if parameter_type is not None:
self.parameter_type = parameter_type
if query_value is not None:
self.query_value = query_value
if reverse_dyn_sort is not None:
self.reverse_dyn_sort = reverse_dyn_sort
if tag_key is not None:
self.tag_key = tag_key
if tags_black_list_regex is not None:
self.tags_black_list_regex = tags_black_list_regex
if value_ordering is not None:
self.value_ordering = value_ordering
if values_to_readable_strings is not None:
self.values_to_readable_strings = values_to_readable_strings
@property
def allow_all(self):
"""Gets the allow_all of this DashboardParameterValue. # noqa: E501
:return: The allow_all of this DashboardParameterValue. # noqa: E501
:rtype: bool
"""
return self._allow_all
@allow_all.setter
def allow_all(self, allow_all):
"""Sets the allow_all of this DashboardParameterValue.
:param allow_all: The allow_all of this DashboardParameterValue. # noqa: E501
:type: bool
"""
self._allow_all = allow_all
@property
def default_value(self):
"""Gets the default_value of this DashboardParameterValue. # noqa: E501
:return: The default_value of this DashboardParameterValue. # noqa: E501
:rtype: str
"""
return self._default_value
@default_value.setter
def default_value(self, default_value):
"""Sets the default_value of this DashboardParameterValue.
:param default_value: The default_value of this DashboardParameterValue. # noqa: E501
:type: str
"""
self._default_value = default_value
@property
def description(self):
"""Gets the description of this DashboardParameterValue. # noqa: E501
:return: The description of this DashboardParameterValue. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this DashboardParameterValue.
:param description: The description of this DashboardParameterValue. # noqa: E501
:type: str
"""
self._description = description
@property
def dynamic_field_type(self):
"""Gets the dynamic_field_type of this DashboardParameterValue. # noqa: E501
:return: The dynamic_field_type of this DashboardParameterValue. # noqa: E501
:rtype: str
"""
return self._dynamic_field_type
@dynamic_field_type.setter
def dynamic_field_type(self, dynamic_field_type):
"""Sets the dynamic_field_type of this DashboardParameterValue.
:param dynamic_field_type: The dynamic_field_type of this DashboardParameterValue. # noqa: E501
:type: str
"""
allowed_values = ["SOURCE", "SOURCE_TAG", "METRIC_NAME", "TAG_KEY", "MATCHING_SOURCE_TAG"] # noqa: E501
if (self._configuration.client_side_validation and
dynamic_field_type not in allowed_values):
raise ValueError(
"Invalid value for `dynamic_field_type` ({0}), must be one of {1}" # noqa: E501
.format(dynamic_field_type, allowed_values)
)
self._dynamic_field_type = dynamic_field_type
@property
def hide_from_view(self):
"""Gets the hide_from_view of this DashboardParameterValue. # noqa: E501
:return: The hide_from_view of this DashboardParameterValue. # noqa: E501
:rtype: bool
"""
return self._hide_from_view
@hide_from_view.setter
def hide_from_view(self, hide_from_view):
"""Sets the hide_from_view of this DashboardParameterValue.
:param hide_from_view: The hide_from_view of this DashboardParameterValue. # noqa: E501
:type: bool
"""
self._hide_from_view = hide_from_view
@property
def label(self):
"""Gets the label of this DashboardParameterValue. # noqa: E501
:return: The label of this DashboardParameterValue. # noqa: E501
:rtype: str
"""
return self._label
@label.setter
def label(self, label):
"""Sets the label of this DashboardParameterValue.
:param label: The label of this DashboardParameterValue. # noqa: E501
:type: str
"""
self._label = label
@property
def multivalue(self):
"""Gets the multivalue of this DashboardParameterValue. # noqa: E501
:return: The multivalue of this DashboardParameterValue. # noqa: E501
:rtype: bool
"""
return self._multivalue
@multivalue.setter
def multivalue(self, multivalue):
"""Sets the multivalue of this DashboardParameterValue.
:param multivalue: The multivalue of this DashboardParameterValue. # noqa: E501
:type: bool
"""
self._multivalue = multivalue
@property
def order(self):
"""Gets the order of this DashboardParameterValue. # noqa: E501
:return: The order of this DashboardParameterValue. # noqa: E501
:rtype: int
"""
return self._order
@order.setter
def order(self, order):
"""Sets the order of this DashboardParameterValue.
:param order: The order of this DashboardParameterValue. # noqa: E501
:type: int
"""
self._order = order
@property
def parameter_type(self):
"""Gets the parameter_type of this DashboardParameterValue. # noqa: E501
:return: The parameter_type of this DashboardParameterValue. # noqa: E501
:rtype: str
"""
return self._parameter_type
@parameter_type.setter
def parameter_type(self, parameter_type):
"""Sets the parameter_type of this DashboardParameterValue.
:param parameter_type: The parameter_type of this DashboardParameterValue. # noqa: E501
:type: str
"""
allowed_values = ["SIMPLE", "LIST", "DYNAMIC"] # noqa: E501
if (self._configuration.client_side_validation and
parameter_type not in allowed_values):
raise ValueError(
"Invalid value for `parameter_type` ({0}), must be one of {1}" # noqa: E501
.format(parameter_type, allowed_values)
)
self._parameter_type = parameter_type
@property
def query_value(self):
"""Gets the query_value of this DashboardParameterValue. # noqa: E501
:return: The query_value of this DashboardParameterValue. # noqa: E501
:rtype: str
"""
return self._query_value
@query_value.setter
def query_value(self, query_value):
"""Sets the query_value of this DashboardParameterValue.
:param query_value: The query_value of this DashboardParameterValue. # noqa: E501
:type: str
"""
self._query_value = query_value
@property
def reverse_dyn_sort(self):
"""Gets the reverse_dyn_sort of this DashboardParameterValue. # noqa: E501
Whether to reverse alphabetically sort the returned result. # noqa: E501
:return: The reverse_dyn_sort of this DashboardParameterValue. # noqa: E501
:rtype: bool
"""
return self._reverse_dyn_sort
@reverse_dyn_sort.setter
def reverse_dyn_sort(self, reverse_dyn_sort):
"""Sets the reverse_dyn_sort of this DashboardParameterValue.
Whether to reverse alphabetically sort the returned result. # noqa: E501
:param reverse_dyn_sort: The reverse_dyn_sort of this DashboardParameterValue. # noqa: E501
:type: bool
"""
self._reverse_dyn_sort = reverse_dyn_sort
@property
def tag_key(self):
"""Gets the tag_key of this DashboardParameterValue. # noqa: E501
:return: The tag_key of this DashboardParameterValue. # noqa: E501
:rtype: str
"""
return self._tag_key
@tag_key.setter
def tag_key(self, tag_key):
"""Sets the tag_key of this DashboardParameterValue.
:param tag_key: The tag_key of this DashboardParameterValue. # noqa: E501
:type: str
"""
self._tag_key = tag_key
@property
def tags_black_list_regex(self):
"""Gets the tags_black_list_regex of this DashboardParameterValue. # noqa: E501
The regular expression to filter out source tags from the Current Values list. # noqa: E501
:return: The tags_black_list_regex of this DashboardParameterValue. # noqa: E501
:rtype: str
"""
return self._tags_black_list_regex
@tags_black_list_regex.setter
def tags_black_list_regex(self, tags_black_list_regex):
"""Sets the tags_black_list_regex of this DashboardParameterValue.
The regular expression to filter out source tags from the Current Values list. # noqa: E501
:param tags_black_list_regex: The tags_black_list_regex of this DashboardParameterValue. # noqa: E501
:type: str
"""
self._tags_black_list_regex = tags_black_list_regex
@property
def value_ordering(self):
"""Gets the value_ordering of this DashboardParameterValue. # noqa: E501
:return: The value_ordering of this DashboardParameterValue. # noqa: E501
:rtype: list[str]
"""
return self._value_ordering
@value_ordering.setter
def value_ordering(self, value_ordering):
"""Sets the value_ordering of this DashboardParameterValue.
:param value_ordering: The value_ordering of this DashboardParameterValue. # noqa: E501
:type: list[str]
"""
self._value_ordering = value_ordering
@property
def values_to_readable_strings(self):
"""Gets the values_to_readable_strings of this DashboardParameterValue. # noqa: E501
:return: The values_to_readable_strings of this DashboardParameterValue. # noqa: E501
:rtype: dict(str, str)
"""
return self._values_to_readable_strings
@values_to_readable_strings.setter
def values_to_readable_strings(self, values_to_readable_strings):
"""Sets the values_to_readable_strings of this DashboardParameterValue.
:param values_to_readable_strings: The values_to_readable_strings of this DashboardParameterValue. # noqa: E501
:type: dict(str, str)
"""
self._values_to_readable_strings = values_to_readable_strings
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DashboardParameterValue, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DashboardParameterValue):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, DashboardParameterValue):
return True
return self.to_dict() != other.to_dict()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ManagedPrivateEndpointsOperations(object):
"""ManagedPrivateEndpointsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.synapse.managedprivateendpoints.v2021_06_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
managed_private_endpoint_name, # type: str
managed_virtual_network_name="default", # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ManagedPrivateEndpoint"
"""Get Managed Private Endpoints.
:param managed_private_endpoint_name: Managed private endpoint name.
:type managed_private_endpoint_name: str
:param managed_virtual_network_name: Managed virtual network name.
:type managed_virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedPrivateEndpoint, or the result of cls(response)
:rtype: ~azure.synapse.managedprivateendpoints.v2021_06_01_preview.models.ManagedPrivateEndpoint
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedPrivateEndpoint"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'managedVirtualNetworkName': self._serialize.url("managed_virtual_network_name", managed_virtual_network_name, 'str'),
'managedPrivateEndpointName': self._serialize.url("managed_private_endpoint_name", managed_private_endpoint_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize('ManagedPrivateEndpoint', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/managedVirtualNetworks/{managedVirtualNetworkName}/managedPrivateEndpoints/{managedPrivateEndpointName}'} # type: ignore
def create(
self,
managed_private_endpoint_name, # type: str
managed_virtual_network_name="default", # type: str
properties=None, # type: Optional["_models.ManagedPrivateEndpointProperties"]
**kwargs # type: Any
):
# type: (...) -> "_models.ManagedPrivateEndpoint"
"""Create Managed Private Endpoints.
:param managed_private_endpoint_name: Managed private endpoint name.
:type managed_private_endpoint_name: str
:param managed_virtual_network_name: Managed virtual network name.
:type managed_virtual_network_name: str
:param properties: Managed private endpoint properties.
:type properties: ~azure.synapse.managedprivateendpoints.v2021_06_01_preview.models.ManagedPrivateEndpointProperties
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedPrivateEndpoint, or the result of cls(response)
:rtype: ~azure.synapse.managedprivateendpoints.v2021_06_01_preview.models.ManagedPrivateEndpoint
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedPrivateEndpoint"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_managed_private_endpoint = _models.ManagedPrivateEndpoint(properties=properties)
api_version = "2021-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'managedVirtualNetworkName': self._serialize.url("managed_virtual_network_name", managed_virtual_network_name, 'str'),
'managedPrivateEndpointName': self._serialize.url("managed_private_endpoint_name", managed_private_endpoint_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_managed_private_endpoint, 'ManagedPrivateEndpoint')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize('ManagedPrivateEndpoint', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/managedVirtualNetworks/{managedVirtualNetworkName}/managedPrivateEndpoints/{managedPrivateEndpointName}'} # type: ignore
def delete(
self,
managed_private_endpoint_name, # type: str
managed_virtual_network_name="default", # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Delete Managed Private Endpoints.
:param managed_private_endpoint_name: Managed private endpoint name.
:type managed_private_endpoint_name: str
:param managed_virtual_network_name: Managed virtual network name.
:type managed_virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01-preview"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'managedVirtualNetworkName': self._serialize.url("managed_virtual_network_name", managed_virtual_network_name, 'str'),
'managedPrivateEndpointName': self._serialize.url("managed_private_endpoint_name", managed_private_endpoint_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/managedVirtualNetworks/{managedVirtualNetworkName}/managedPrivateEndpoints/{managedPrivateEndpointName}'} # type: ignore
def list(
self,
managed_virtual_network_name="default", # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ManagedPrivateEndpointListResponse"]
"""List Managed Private Endpoints.
:param managed_virtual_network_name: Managed virtual network name.
:type managed_virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedPrivateEndpointListResponse or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.synapse.managedprivateendpoints.v2021_06_01_preview.models.ManagedPrivateEndpointListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedPrivateEndpointListResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'managedVirtualNetworkName': self._serialize.url("managed_virtual_network_name", managed_virtual_network_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'managedVirtualNetworkName': self._serialize.url("managed_virtual_network_name", managed_virtual_network_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ManagedPrivateEndpointListResponse', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/managedVirtualNetworks/{managedVirtualNetworkName}/managedPrivateEndpoints'} # type: ignore
|
|
# -*- coding: utf-8 -*-
# Copyright 2017 Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests performed on the annotations of an instance of ``cobra.Model``."""
from __future__ import absolute_import, division
from builtins import dict
import pytest
import memote.support.annotation as annotation
from memote.utils import annotate, get_ids, truncate, wrapper
@annotate(title="Presence of Metabolite Annotation", format_type="count")
def test_metabolite_annotation_presence(model):
"""
Expect all metabolites to have a non-empty annotation attribute.
This test checks if any annotations at all are present in the SBML
annotations field for each metabolite, irrespective of the type of
annotation i.e. specific database cross-references, ontology terms,
additional information. For this test to pass the model is expected to
have metabolites and each of them should have some form of annotation.
Implementation:
Check if the annotation attribute of each cobra.Metabolite object of the
model is unset or empty.
"""
ann = test_metabolite_annotation_presence.annotation
ann["data"] = get_ids(
annotation.find_components_without_annotation(model, "metabolites")
)
ann["metric"] = len(ann["data"]) / len(model.metabolites)
ann["message"] = wrapper.fill(
"""A total of {} metabolites ({:.2%}) lack any form of annotation:
{}""".format(
len(ann["data"]), ann["metric"], truncate(ann["data"])
)
)
assert len(ann["data"]) == 0, ann["message"]
@annotate(title="Presence of Reaction Annotation", format_type="count")
def test_reaction_annotation_presence(model):
"""
Expect all reactions to have a non-empty annotation attribute.
This test checks if any annotations at all are present in the SBML
annotations field for each reaction, irrespective of the type of
annotation i.e. specific database cross-references, ontology terms,
additional information. For this test to pass the model is expected to
have reactions and each of them should have some form of annotation.
Implementation:
Check if the annotation attribute of each cobra.Reaction object of the
model is unset or empty.
"""
ann = test_reaction_annotation_presence.annotation
ann["data"] = get_ids(
annotation.find_components_without_annotation(model, "reactions")
)
ann["metric"] = len(ann["data"]) / len(model.reactions)
ann["message"] = wrapper.fill(
"""A total of {} reactions ({:.2%}) lack any form of annotation:
{}""".format(
len(ann["data"]), ann["metric"], truncate(ann["data"])
)
)
assert len(ann["data"]) == 0, ann["message"]
@annotate(title="Presence of Gene Annotation", format_type="count")
def test_gene_product_annotation_presence(model):
"""
Expect all genes to have a non-empty annotation attribute.
This test checks if any annotations at all are present in the SBML
annotations field (extended by FBC package) for each gene product,
irrespective of the type of annotation i.e. specific database,
cross-references, ontology terms, additional information. For this test to
pass the model is expected to have genes and each of them should have some
form of annotation.
Implementation:
Check if the annotation attribute of each cobra.Gene object of the
model is unset or empty.
"""
ann = test_gene_product_annotation_presence.annotation
ann["data"] = get_ids(annotation.find_components_without_annotation(model, "genes"))
ann["metric"] = len(ann["data"]) / len(model.genes)
ann["message"] = wrapper.fill(
"""A total of {} genes ({:.2%}) lack any form of
annotation: {}""".format(
len(ann["data"]), ann["metric"], truncate(ann["data"])
)
)
assert len(ann["data"]) == 0, ann["message"]
@pytest.mark.parametrize("db", list(annotation.METABOLITE_ANNOTATIONS))
@annotate(
title="Metabolite Annotations Per Database",
format_type="percent",
message=dict(),
data=dict(),
metric=dict(),
)
def test_metabolite_annotation_overview(model, db):
"""
Expect all metabolites to have annotations from common databases.
Specific database cross-references are paramount to mapping information.
To provide references to as many databases as possible helps to make the
metabolic model more accessible to other researchers. This does not only
facilitate the use of a model in a broad array of computational pipelines,
it also promotes the metabolic model itself to become an organism-specific
knowledge base.
For this test to pass, each metabolite annotation should contain
cross-references to a number of databases. The currently selection is
listed in `annotation.py`, but an ongoing discussion can be found at
https://github.com/opencobra/memote/issues/332. For each database this
test checks for the presence of its corresponding namespace ID to comply
with the MIRIAM guidelines i.e. they have to match those defined on
https://identifiers.org/.
Since each database is quite different and some potentially incomplete, it
may not be feasible to achieve 100% coverage for each of them. Generally
it should be possible, however, to obtain cross-references to at least
one of the databases for all metabolites consistently.
Implementation:
Check if the keys of the annotation attribute of each cobra.Metabolite of
the model match with a selection of common biochemical databases. The
annotation attribute of cobrapy components is a dictionary of
key:value pairs.
"""
ann = test_metabolite_annotation_overview.annotation
ann["data"][db] = get_ids(
annotation.generate_component_annotation_overview(model.metabolites, db)
)
ann["metric"][db] = len(ann["data"][db]) / len(model.metabolites)
ann["message"][db] = wrapper.fill(
"""The following {} metabolites ({:.2%}) lack annotation for {}:
{}""".format(
len(ann["data"][db]), ann["metric"][db], db, truncate(ann["data"][db])
)
)
assert len(ann["data"][db]) == 0, ann["message"][db]
@pytest.mark.parametrize("db", list(annotation.REACTION_ANNOTATIONS))
@annotate(
title="Reaction Annotations Per Database",
format_type="percent",
message=dict(),
data=dict(),
metric=dict(),
)
def test_reaction_annotation_overview(model, db):
"""
Expect all reactions to have annotations from common databases.
Specific database cross-references are paramount to mapping information.
To provide references to as many databases as possible helps to make the
metabolic model more accessible to other researchers. This does not only
facilitate the use of a model in a broad array of computational pipelines,
it also promotes the metabolic model itself to become an organism-specific
knowledge base.
For this test to pass, each reaction annotation should contain
cross-references to a number of databases. The currently selection is
listed in `annotation.py`, but an ongoing discussion can be found at
https://github.com/opencobra/memote/issues/332. For each database this
test checks for the presence of its corresponding namespace ID to comply
with the MIRIAM guidelines i.e. they have to match those defined on
https://identifiers.org/.
Since each database is quite different and some potentially incomplete, it
may not be feasible to achieve 100% coverage for each of them. Generally
it should be possible, however, to obtain cross-references to at least
one of the databases for all reactions consistently.
Implementation:
Check if the keys of the annotation attribute of each cobra.Reaction of
the model match with a selection of common biochemical databases. The
annotation attribute of cobrapy components is a dictionary of
key:value pairs.
"""
ann = test_reaction_annotation_overview.annotation
ann["data"][db] = get_ids(
annotation.generate_component_annotation_overview(model.reactions, db)
)
ann["metric"][db] = len(ann["data"][db]) / len(model.reactions)
ann["message"][db] = wrapper.fill(
"""The following {} reactions ({:.2%}) lack annotation for {}:
{}""".format(
len(ann["data"][db]), ann["metric"][db], db, truncate(ann["data"][db])
)
)
assert len(ann["data"][db]) == 0, ann["message"][db]
@pytest.mark.parametrize("db", list(annotation.GENE_PRODUCT_ANNOTATIONS))
@annotate(
title="Gene Annotations Per Database",
format_type="percent",
message=dict(),
data=dict(),
metric=dict(),
)
def test_gene_product_annotation_overview(model, db):
"""
Expect all genes to have annotations from common databases.
Specific database cross-references are paramount to mapping information.
To provide references to as many databases as possible helps to make the
metabolic model more accessible to other researchers. This does not only
facilitate the use of a model in a broad array of computational pipelines,
it also promotes the metabolic model itself to become an organism-specific
knowledge base.
For this test to pass, each gene annotation should contain
cross-references to a number of databases. The currently selection is
listed in `annotation.py`, but an ongoing discussion can be found at
https://github.com/opencobra/memote/issues/332. For each database this
test checks for the presence of its corresponding namespace ID to comply
with the MIRIAM guidelines i.e. they have to match those defined on
https://identifiers.org/.
Since each database is quite different and some potentially incomplete, it
may not be feasible to achieve 100% coverage for each of them. Generally
it should be possible, however, to obtain cross-references to at least
one of the databases for all gene products consistently.
Implementation:
Check if the keys of the annotation attribute of each cobra.Gene of
the model match with a selection of common genome databases. The
annotation attribute of cobrapy components is a dictionary of
key:value pairs.
"""
ann = test_gene_product_annotation_overview.annotation
ann["data"][db] = get_ids(
annotation.generate_component_annotation_overview(model.genes, db)
)
ann["metric"][db] = len(ann["data"][db]) / len(model.genes)
ann["message"][db] = wrapper.fill(
"""The following {} genes ({:.2%}) lack annotation for {}:
{}""".format(
len(ann["data"][db]), ann["metric"][db], db, truncate(ann["data"][db])
)
)
assert len(ann["data"][db]) == 0, ann["message"][db]
@pytest.mark.parametrize("db", list(annotation.METABOLITE_ANNOTATIONS))
@annotate(
title="Metabolite Annotation Conformity Per Database",
format_type="percent",
message=dict(),
data=dict(),
metric=dict(),
)
def test_metabolite_annotation_wrong_ids(model, db):
"""
Expect all annotations of metabolites to be in the correct format.
To identify databases and the identifiers belonging to them, computational
tools rely on the presence of specific patterns. Only when these patterns
can be identified consistently is an ID truly machine-readable. This test
checks if the database cross-references in metabolite annotations conform
to patterns defined according to the MIRIAM guidelines, i.e. matching
those that are defined at https://identifiers.org/.
The required formats, i.e., regex patterns are further outlined in
`annotation.py`. This test does not carry out a web query for the composed
URI, it merely controls that the regex patterns match the identifiers.
Implementation:
For those metabolites whose annotation keys match any of the tested
databases, check if the corresponding values match the identifier pattern
of each database.
"""
ann = test_metabolite_annotation_wrong_ids.annotation
ann["data"][db] = total = get_ids(
set(model.metabolites).difference(
annotation.generate_component_annotation_overview(model.metabolites, db)
)
)
ann["metric"][db] = 1.0
ann["message"][db] = wrapper.fill(
"""There are no metabolite annotations for the {} database.
""".format(
db
)
)
assert len(total) > 0, ann["message"][db]
ann["data"][db] = get_ids(
annotation.generate_component_annotation_miriam_match(
model.metabolites, "metabolites", db
)
)
ann["metric"][db] = len(ann["data"][db]) / len(total)
ann["message"][db] = wrapper.fill(
"""A total of {} metabolite annotations ({:.2%}) do not match the
regular expression patterns defined on identifiers.org for the {}
database: {}""".format(
len(ann["data"][db]), ann["metric"][db], db, truncate(ann["data"][db])
)
)
assert len(ann["data"][db]) == 0, ann["message"][db]
@pytest.mark.parametrize("db", annotation.REACTION_ANNOTATIONS)
@annotate(
title="Reaction Annotation Conformity Per Database",
format_type="percent",
message=dict(),
data=dict(),
metric=dict(),
)
def test_reaction_annotation_wrong_ids(model, db):
"""
Expect all annotations of reactions to be in the correct format.
To identify databases and the identifiers belonging to them, computational
tools rely on the presence of specific patterns. Only when these patterns
can be identified consistently is an ID truly machine-readable. This test
checks if the database cross-references in reaction annotations conform
to patterns defined according to the MIRIAM guidelines, i.e. matching
those that are defined at https://identifiers.org/.
The required formats, i.e., regex patterns are further outlined in
`annotation.py`. This test does not carry out a web query for the composed
URI, it merely controls that the regex patterns match the identifiers.
Implementation:
For those reaction whose annotation keys match any of the tested
databases, check if the corresponding values match the identifier pattern
of each database.
"""
ann = test_reaction_annotation_wrong_ids.annotation
ann["data"][db] = total = get_ids(
set(model.reactions).difference(
annotation.generate_component_annotation_overview(model.reactions, db)
)
)
ann["metric"][db] = 1.0
ann["message"][db] = wrapper.fill(
"""There are no reaction annotations for the {} database.
""".format(
db
)
)
assert len(total) > 0, ann["message"][db]
ann["data"][db] = get_ids(
annotation.generate_component_annotation_miriam_match(
model.reactions, "reactions", db
)
)
ann["metric"][db] = len(ann["data"][db]) / len(model.reactions)
ann["message"][db] = wrapper.fill(
"""A total of {} reaction annotations ({:.2%}) do not match the
regular expression patterns defined on identifiers.org for the {}
database: {}""".format(
len(ann["data"][db]), ann["metric"][db], db, truncate(ann["data"][db])
)
)
assert len(ann["data"][db]) == 0, ann["message"][db]
@pytest.mark.parametrize("db", annotation.GENE_PRODUCT_ANNOTATIONS)
@annotate(
title="Gene Annotation Conformity Per Database",
format_type="percent",
message=dict(),
data=dict(),
metric=dict(),
)
def test_gene_product_annotation_wrong_ids(model, db):
"""
Expect all annotations of genes/gene-products to be in the correct format.
To identify databases and the identifiers belonging to them, computational
tools rely on the presence of specific patterns. Only when these patterns
can be identified consistently is an ID truly machine-readable. This test
checks if the database cross-references in reaction annotations conform
to patterns defined according to the MIRIAM guidelines, i.e. matching
those that are defined at https://identifiers.org/.
The required formats, i.e., regex patterns are further outlined in
`annotation.py`. This test does not carry out a web query for the composed
URI, it merely controls that the regex patterns match the identifiers.
Implementation:
For those genes whose annotation keys match any of the tested
databases, check if the corresponding values match the identifier pattern
of each database.
"""
ann = test_gene_product_annotation_wrong_ids.annotation
ann["data"][db] = total = get_ids(
set(model.genes).difference(
annotation.generate_component_annotation_overview(model.genes, db)
)
)
ann["metric"][db] = 1.0
ann["message"][db] = wrapper.fill(
"""There are no gene annotations for the {} database.
""".format(
db
)
)
assert len(total) > 0, ann["message"][db]
ann["data"][db] = get_ids(
annotation.generate_component_annotation_miriam_match(model.genes, "genes", db)
)
ann["metric"][db] = len(ann["data"][db]) / len(model.genes)
ann["message"][db] = wrapper.fill(
"""A total of {} gene annotations ({:.2%}) do not match the
regular expression patterns defined on identifiers.org for the {}
database: {}""".format(
len(ann["data"][db]), ann["metric"][db], db, truncate(ann["data"][db])
)
)
assert len(ann["data"][db]) == 0, ann["message"][db]
@annotate(title="Uniform Metabolite Identifier Namespace", format_type="count")
def test_metabolite_id_namespace_consistency(model):
"""
Expect metabolite identifiers to be from the same namespace.
In well-annotated models it is no problem if the pool of main identifiers
for metabolites consists of identifiers from several databases. However,
in models that lack appropriate annotations, it may hamper the ability of
other researchers to use it. Running the model through a computational
pipeline may be difficult without first consolidating the namespace.
Hence, this test checks if the main metabolite identifiers can be
attributed to one single namespace based on the regex patterns defined at
https://identifiers.org/
Implementation:
Generate a table with each column corresponding to one
database from the selection and each row to a metabolite identifier. A
Boolean entry indicates whether the identifier matches the regular
expression of the corresponding database. Since the Biocyc pattern matches
broadly, we assume that any instance of an identifier matching to Biocyc
AND any other database pattern is a false positive match for Biocyc and
thus set it to ``false``. Sum the positive matches for each database and
assume that the largest set is the 'main' identifier namespace.
"""
ann = test_metabolite_id_namespace_consistency.annotation
overview = annotation.generate_component_id_namespace_overview(model, "metabolites")
distribution = overview.sum()
cols = list(distribution.index)
largest = distribution[cols].idxmax()
# Assume that all identifiers match the largest namespace.
ann["data"] = list(
set(get_ids(model.metabolites)).difference(
overview[overview[largest]].index.tolist()
)
)
ann["metric"] = len(ann["data"]) / len(model.metabolites)
ann["message"] = wrapper.fill(
"""{} metabolite identifiers ({:.2%}) deviate from the largest found
namespace ({}): {}""".format(
len(ann["data"]), ann["metric"], largest, truncate(ann["data"])
)
)
assert len(ann["data"]) == 0, ann["message"]
@annotate(title="Uniform Reaction Identifier Namespace", format_type="count")
def test_reaction_id_namespace_consistency(model):
"""
Expect reaction identifiers to be from the same namespace.
In well-annotated models it is no problem if the pool of main identifiers
for reactions consists of identifiers from several databases. However,
in models that lack appropriate annotations, it may hamper the ability of
other researchers to use it. Running the model through a computational
pipeline may be difficult without first consolidating the namespace.
Hence, this test checks if the main reaction identifiers can be
attributed to one single namespace based on the regex patterns defined at
https://identifiers.org/
Implementation:
Generate a pandas.DataFrame with each column corresponding to one
database from the selection and each row to the reaction ID. A boolean
entry indicates whether the metabolite ID matches the regex pattern
of the corresponding database. Since the Biocyc pattern matches quite,
assume that any instance of an identifier matching to Biocyc
AND any other DB pattern is a false positive match for Biocyc and then set
the boolean to ``false``. Sum the positive matches for each database and
assume that the largest set is the 'main' identifier namespace.
"""
ann = test_reaction_id_namespace_consistency.annotation
overview = annotation.generate_component_id_namespace_overview(model, "reactions")
distribution = overview.sum()
cols = list(distribution.index)
largest = distribution[cols].idxmax()
# Assume that all identifiers match the largest namespace.
ann["data"] = list(
set(get_ids(model.reactions)).difference(
overview[overview[largest]].index.tolist()
)
)
ann["metric"] = len(ann["data"]) / len(model.reactions)
ann["message"] = wrapper.fill(
"""{} reaction identifiers ({:.2%}) deviate from the largest found
namespace ({}): {}""".format(
len(ann["data"]), ann["metric"], largest, truncate(ann["data"])
)
)
assert len(ann["data"]) == 0, ann["message"]
|
|
# Copyright (c) 2010 Chris Moyer http://coredumped.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.exception import SDBResponseError
class SequenceGenerator(object):
"""Generic Sequence Generator object, this takes a single
string as the "sequence" and uses that to figure out
what the next value in a string is. For example
if you give "ABC" and pass in "A" it will give you "B",
and if you give it "C" it will give you "AA".
If you set "rollover" to True in the above example, passing
in "C" would give you "A" again.
The Sequence string can be a string or any iterable
that has the "index" function and is indexable.
"""
__name__ = "SequenceGenerator"
def __init__(self, sequence_string, rollover=False):
"""Create a new SequenceGenerator using the sequence_string
as how to generate the next item.
:param sequence_string: The string or list that explains
how to generate the next item in the sequence
:type sequence_string: str,iterable
:param rollover: Rollover instead of incrementing when
we hit the end of the sequence
:type rollover: bool
"""
self.sequence_string = sequence_string
self.sequence_length = len(sequence_string[0])
self.rollover = rollover
self.last_item = sequence_string[-1]
self.__name__ = "%s('%s')" % (self.__class__.__name__, sequence_string)
def __call__(self, val, last=None):
"""Get the next value in the sequence"""
# If they pass us in a string that's not at least
# the lenght of our sequence, then return the
# first element in our sequence
if val == None or len(val) < self.sequence_length:
return self.sequence_string[0]
last_value = val[-self.sequence_length:]
if (not self.rollover) and (last_value == self.last_item):
val = "%s%s" % (self(val[:-self.sequence_length]), self._inc(last_value))
else:
val = "%s%s" % (val[:-self.sequence_length], self._inc(last_value))
return val
def _inc(self, val):
"""Increment a single value"""
assert(len(val) == self.sequence_length)
return self.sequence_string[(self.sequence_string.index(val)+1) % len(self.sequence_string)]
#
# Simple Sequence Functions
#
def increment_by_one(cv=None, lv=None):
if cv == None:
return 0
return cv + 1
def double(cv=None, lv=None):
if cv == None:
return 1
return cv * 2
def fib(cv=1, lv=0):
"""The fibonacci sequence, this incrementer uses the
last value"""
if cv == None:
cv = 1
if lv == None:
lv = 0
return cv + lv
increment_string = SequenceGenerator("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
class Sequence(object):
"""A simple Sequence using the new SDB "Consistent" features
Based largly off of the "Counter" example from mitch garnaat:
http://bitbucket.org/mitch/stupidbototricks/src/tip/counter.py"""
def __init__(self, id=None, domain_name=None, fnc=increment_by_one, init_val=None):
"""Create a new Sequence, using an optional function to
increment to the next number, by default we just increment by one.
Every parameter here is optional, if you don't specify any options
then you'll get a new SequenceGenerator with a random ID stored in the
default domain that increments by one and uses the default botoweb
environment
:param id: Optional ID (name) for this counter
:type id: str
:param domain_name: Optional domain name to use, by default we get this out of the
environment configuration
:type domain_name:str
:param fnc: Optional function to use for the incrementation, by default we just increment by one
There are several functions defined in this module.
Your function must accept "None" to get the initial value
:type fnc: function, str
:param init_val: Initial value, by default this is the first element in your sequence,
but you can pass in any value, even a string if you pass in a function that uses
strings instead of ints to increment
"""
self._db = None
self._value = None
self.last_value = None
self.domain_name = domain_name
self.id = id
if self.id == None:
import uuid
self.id = str(uuid.uuid4())
if init_val == None:
init_val = fnc(init_val)
self.val = init_val
self.item_type = type(fnc(None))
self.timestamp = None
# Allow us to pass in a full name to a function
if type(fnc) == str:
from boto.utils import find_class
fnc = find_class(fnc)
self.fnc = fnc
def set(self, val):
"""Set the value"""
import time
now = time.time()
expected_values = []
new_val = {}
new_val['timestamp'] = now
if self._value != None:
new_val['last_value'] = self._value
expected_values = ['current_value', str(self._value)]
new_val['current_value'] = val
try:
self.db.put_attributes(self.id, new_val, expected_values=expected_values)
self.timestamp = new_val['timestamp']
except SDBResponseError, e:
if e.status == 409:
raise ValueError, "Sequence out of sync"
else:
raise
def get(self):
"""Get the value"""
val = self.db.get_attributes(self.id, consistent_read=True)
if val and val.has_key('timestamp'):
self.timestamp = val['timestamp']
if val and val.has_key('current_value'):
self._value = self.item_type(val['current_value'])
if val.has_key("last_value") and val['last_value'] != None:
self.last_value = self.item_type(val['last_value'])
return self._value
val = property(get, set)
def __repr__(self):
return "%s('%s', '%s', '%s.%s', '%s')" % (
self.__class__.__name__,
self.id,
self.domain_name,
self.fnc.__module__, self.fnc.__name__,
self.val)
def _connect(self):
"""Connect to our domain"""
if not self._db:
if not self.domain_name:
import boto
sdb = boto.connect_sdb()
self.domain_name = boto.config.get("DB", "sequence_db", boto.config.get("DB", "db_name", "default"))
try:
self._db = sdb.get_domain(self.domain_name)
except SDBResponseError, e:
if e.status == 400:
self._db = sdb.create_domain(self.domain_name)
else:
raise
return self._db
db = property(_connect)
def next(self):
self.val = self.fnc(self.val, self.last_value)
return self.val
def delete(self):
"""Remove this sequence"""
self.db.delete_attributes(self.id)
def __del__(self):
self.delete()
|
|
import argparse
import unittest
from unittest import mock
from mopidy import commands
class ConfigOverrideTypeTest(unittest.TestCase):
def test_valid_override(self):
expected = ("section", "key", "value")
assert expected == commands.config_override_type("section/key=value")
assert expected == commands.config_override_type("section/key=value ")
assert expected == commands.config_override_type("section/key =value")
assert expected == commands.config_override_type("section /key=value")
def test_empty_override(self):
expected = ("section", "key", "")
assert expected == commands.config_override_type("section/key=")
assert expected == commands.config_override_type("section/key= ")
def test_invalid_override(self):
with self.assertRaises(argparse.ArgumentTypeError):
commands.config_override_type("section/key")
with self.assertRaises(argparse.ArgumentTypeError):
commands.config_override_type("section=")
with self.assertRaises(argparse.ArgumentTypeError):
commands.config_override_type("section")
class CommandParsingTest(unittest.TestCase):
def setUp(self): # noqa: N802
self.exit_patcher = mock.patch.object(commands.Command, "exit")
self.exit_mock = self.exit_patcher.start()
self.exit_mock.side_effect = SystemExit
def tearDown(self): # noqa: N802
self.exit_patcher.stop()
def test_command_parsing_returns_namespace(self):
cmd = commands.Command()
assert isinstance(cmd.parse([]), argparse.Namespace)
def test_command_parsing_does_not_contain_args(self):
cmd = commands.Command()
result = cmd.parse([])
assert not hasattr(result, "_args")
def test_unknown_options_bails(self):
cmd = commands.Command()
with self.assertRaises(SystemExit):
cmd.parse(["--foobar"])
def test_invalid_sub_command_bails(self):
cmd = commands.Command()
with self.assertRaises(SystemExit):
cmd.parse(["foo"])
def test_command_arguments(self):
cmd = commands.Command()
cmd.add_argument("--bar")
result = cmd.parse(["--bar", "baz"])
assert result.bar == "baz"
def test_command_arguments_and_sub_command(self):
child = commands.Command()
child.add_argument("--baz")
cmd = commands.Command()
cmd.add_argument("--bar")
cmd.add_child("foo", child)
result = cmd.parse(["--bar", "baz", "foo"])
assert result.bar == "baz"
assert result.baz is None
def test_subcommand_may_have_positional(self):
child = commands.Command()
child.add_argument("bar")
cmd = commands.Command()
cmd.add_child("foo", child)
result = cmd.parse(["foo", "baz"])
assert result.bar == "baz"
def test_subcommand_may_have_remainder(self):
child = commands.Command()
child.add_argument("bar", nargs=argparse.REMAINDER)
cmd = commands.Command()
cmd.add_child("foo", child)
result = cmd.parse(["foo", "baz", "bep", "bop"])
assert result.bar == ["baz", "bep", "bop"]
def test_result_stores_choosen_command(self):
child = commands.Command()
cmd = commands.Command()
cmd.add_child("foo", child)
result = cmd.parse(["foo"])
assert result.command == child
result = cmd.parse([])
assert result.command == cmd
child2 = commands.Command()
cmd.add_child("bar", child2)
subchild = commands.Command()
child.add_child("baz", subchild)
result = cmd.parse(["bar"])
assert result.command == child2
result = cmd.parse(["foo", "baz"])
assert result.command == subchild
def test_invalid_type(self):
cmd = commands.Command()
cmd.add_argument("--bar", type=int)
with self.assertRaises(SystemExit):
cmd.parse(["--bar", "zero"], prog="foo")
self.exit_mock.assert_called_once_with(
1,
"argument --bar: invalid int value: 'zero'",
"usage: foo [--bar BAR]",
)
@mock.patch("sys.argv")
def test_command_error_usage_prog(self, argv_mock):
argv_mock.__getitem__.return_value = "/usr/bin/foo"
cmd = commands.Command()
cmd.add_argument("--bar", required=True)
with self.assertRaises(SystemExit):
cmd.parse([])
self.exit_mock.assert_called_once_with(
mock.ANY, mock.ANY, "usage: foo --bar BAR"
)
self.exit_mock.reset_mock()
with self.assertRaises(SystemExit):
cmd.parse([], prog="baz")
self.exit_mock.assert_called_once_with(
mock.ANY, mock.ANY, "usage: baz --bar BAR"
)
def test_missing_required(self):
cmd = commands.Command()
cmd.add_argument("--bar", required=True)
with self.assertRaises(SystemExit):
cmd.parse([], prog="foo")
self.exit_mock.assert_called_once_with(
1,
"the following arguments are required: --bar",
"usage: foo --bar BAR",
)
def test_missing_positionals(self):
cmd = commands.Command()
cmd.add_argument("bar")
with self.assertRaises(SystemExit):
cmd.parse([], prog="foo")
self.exit_mock.assert_called_once_with(
1,
"the following arguments are required: bar, _args",
"usage: foo bar",
)
def test_missing_positionals_subcommand(self):
child = commands.Command()
child.add_argument("baz")
cmd = commands.Command()
cmd.add_child("bar", child)
with self.assertRaises(SystemExit):
cmd.parse(["bar"], prog="foo")
self.exit_mock.assert_called_once_with(
1,
"the following arguments are required: baz, _args",
"usage: foo bar baz",
)
def test_unknown_command(self):
cmd = commands.Command()
with self.assertRaises(SystemExit):
cmd.parse(["--help"], prog="foo")
self.exit_mock.assert_called_once_with(
1, "unrecognized arguments: --help", "usage: foo"
)
def test_invalid_subcommand(self):
cmd = commands.Command()
cmd.add_child("baz", commands.Command())
with self.assertRaises(SystemExit):
cmd.parse(["bar"], prog="foo")
self.exit_mock.assert_called_once_with(
1, "unrecognized command: bar", "usage: foo"
)
def test_set(self):
cmd = commands.Command()
cmd.set(foo="bar")
result = cmd.parse([])
assert result.foo == "bar"
def test_set_propegate(self):
child = commands.Command()
cmd = commands.Command()
cmd.set(foo="bar")
cmd.add_child("command", child)
result = cmd.parse(["command"])
assert result.foo == "bar"
def test_innermost_set_wins(self):
child = commands.Command()
child.set(foo="bar", baz=1)
cmd = commands.Command()
cmd.set(foo="baz", baz=None)
cmd.add_child("command", child)
result = cmd.parse(["command"])
assert result.foo == "bar"
assert result.baz == 1
def test_help_action_works(self):
cmd = commands.Command()
cmd.add_argument("-h", action="help")
cmd.format_help = mock.Mock()
with self.assertRaises(SystemExit):
cmd.parse(["-h"])
cmd.format_help.assert_called_once_with(mock.ANY)
self.exit_mock.assert_called_once_with(0, cmd.format_help.return_value)
class UsageTest(unittest.TestCase):
@mock.patch("sys.argv")
def test_prog_name_default_and_override(self, argv_mock):
argv_mock.__getitem__.return_value = "/usr/bin/foo"
cmd = commands.Command()
assert "usage: foo" == cmd.format_usage().strip()
assert "usage: baz" == cmd.format_usage("baz").strip()
def test_basic_usage(self):
cmd = commands.Command()
assert "usage: foo" == cmd.format_usage("foo").strip()
cmd.add_argument("-h", "--help", action="store_true")
assert "usage: foo [-h]" == cmd.format_usage("foo").strip()
cmd.add_argument("bar")
assert "usage: foo [-h] bar" == cmd.format_usage("foo").strip()
def test_nested_usage(self):
child = commands.Command()
cmd = commands.Command()
cmd.add_child("bar", child)
assert "usage: foo" == cmd.format_usage("foo").strip()
assert "usage: foo bar" == cmd.format_usage("foo bar").strip()
cmd.add_argument("-h", "--help", action="store_true")
assert "usage: foo bar" == child.format_usage("foo bar").strip()
child.add_argument("-h", "--help", action="store_true")
assert "usage: foo bar [-h]" == child.format_usage("foo bar").strip()
class HelpTest(unittest.TestCase):
@mock.patch("sys.argv")
def test_prog_name_default_and_override(self, argv_mock):
argv_mock.__getitem__.return_value = "/usr/bin/foo"
cmd = commands.Command()
assert "usage: foo" == cmd.format_help().strip()
assert "usage: bar" == cmd.format_help("bar").strip()
def test_command_without_documenation_or_options(self):
cmd = commands.Command()
assert "usage: bar" == cmd.format_help("bar").strip()
def test_command_with_option(self):
cmd = commands.Command()
cmd.add_argument(
"-h", "--help", action="store_true", help="show this message"
)
expected = (
"usage: foo [-h]\n\n"
"OPTIONS:\n\n"
" -h, --help show this message"
)
assert expected == cmd.format_help("foo").strip()
def test_command_with_option_and_positional(self):
cmd = commands.Command()
cmd.add_argument(
"-h", "--help", action="store_true", help="show this message"
)
cmd.add_argument("bar", help="some help text")
expected = (
"usage: foo [-h] bar\n\n"
"OPTIONS:\n\n"
" -h, --help show this message\n"
" bar some help text"
)
assert expected == cmd.format_help("foo").strip()
def test_command_with_documentation(self):
cmd = commands.Command()
cmd.help = "some text about everything this command does."
expected = (
"usage: foo\n\n" "some text about everything this command does."
)
assert expected == cmd.format_help("foo").strip()
def test_command_with_documentation_and_option(self):
cmd = commands.Command()
cmd.help = "some text about everything this command does."
cmd.add_argument(
"-h", "--help", action="store_true", help="show this message"
)
expected = (
"usage: foo [-h]\n\n"
"some text about everything this command does.\n\n"
"OPTIONS:\n\n"
" -h, --help show this message"
)
assert expected == cmd.format_help("foo").strip()
def test_subcommand_without_documentation_or_options(self):
child = commands.Command()
cmd = commands.Command()
cmd.add_child("bar", child)
assert "usage: foo" == cmd.format_help("foo").strip()
def test_subcommand_with_documentation_shown(self):
child = commands.Command()
child.help = "some text about everything this command does."
cmd = commands.Command()
cmd.add_child("bar", child)
expected = (
"usage: foo\n\n"
"COMMANDS:\n\n"
"bar\n\n"
" some text about everything this command does."
)
assert expected == cmd.format_help("foo").strip()
def test_subcommand_with_options_shown(self):
child = commands.Command()
child.add_argument(
"-h", "--help", action="store_true", help="show this message"
)
cmd = commands.Command()
cmd.add_child("bar", child)
expected = (
"usage: foo\n\n"
"COMMANDS:\n\n"
"bar [-h]\n\n"
" -h, --help show this message"
)
assert expected == cmd.format_help("foo").strip()
def test_subcommand_with_positional_shown(self):
child = commands.Command()
child.add_argument("baz", help="the great and wonderful")
cmd = commands.Command()
cmd.add_child("bar", child)
expected = (
"usage: foo\n\n"
"COMMANDS:\n\n"
"bar baz\n\n"
" baz the great and wonderful"
)
assert expected == cmd.format_help("foo").strip()
def test_subcommand_with_options_and_documentation(self):
child = commands.Command()
child.help = " some text about everything this command does."
child.add_argument(
"-h", "--help", action="store_true", help="show this message"
)
cmd = commands.Command()
cmd.add_child("bar", child)
expected = (
"usage: foo\n\n"
"COMMANDS:\n\n"
"bar [-h]\n\n"
" some text about everything this command does.\n\n"
" -h, --help show this message"
)
assert expected == cmd.format_help("foo").strip()
def test_nested_subcommands_with_options(self):
subchild = commands.Command()
subchild.add_argument("--test", help="the great and wonderful")
child = commands.Command()
child.add_child("baz", subchild)
child.add_argument(
"-h", "--help", action="store_true", help="show this message"
)
cmd = commands.Command()
cmd.add_child("bar", child)
expected = (
"usage: foo\n\n"
"COMMANDS:\n\n"
"bar [-h]\n\n"
" -h, --help show this message\n\n"
"bar baz [--test TEST]\n\n"
" --test TEST the great and wonderful"
)
assert expected == cmd.format_help("foo").strip()
def test_nested_subcommands_skipped_intermediate(self):
subchild = commands.Command()
subchild.add_argument("--test", help="the great and wonderful")
child = commands.Command()
child.add_child("baz", subchild)
cmd = commands.Command()
cmd.add_child("bar", child)
expected = (
"usage: foo\n\n"
"COMMANDS:\n\n"
"bar baz [--test TEST]\n\n"
" --test TEST the great and wonderful"
)
assert expected == cmd.format_help("foo").strip()
def test_command_with_option_and_subcommand_with_option(self):
child = commands.Command()
child.add_argument("--test", help="the great and wonderful")
cmd = commands.Command()
cmd.add_argument(
"-h", "--help", action="store_true", help="show this message"
)
cmd.add_child("bar", child)
expected = (
"usage: foo [-h]\n\n"
"OPTIONS:\n\n"
" -h, --help show this message\n\n"
"COMMANDS:\n\n"
"bar [--test TEST]\n\n"
" --test TEST the great and wonderful"
)
assert expected == cmd.format_help("foo").strip()
def test_command_with_options_doc_and_subcommand_with_option_and_doc(self):
child = commands.Command()
child.help = "some text about this sub-command."
child.add_argument("--test", help="the great and wonderful")
cmd = commands.Command()
cmd.help = "some text about everything this command does."
cmd.add_argument(
"-h", "--help", action="store_true", help="show this message"
)
cmd.add_child("bar", child)
expected = (
"usage: foo [-h]\n\n"
"some text about everything this command does.\n\n"
"OPTIONS:\n\n"
" -h, --help show this message\n\n"
"COMMANDS:\n\n"
"bar [--test TEST]\n\n"
" some text about this sub-command.\n\n"
" --test TEST the great and wonderful"
)
assert expected == cmd.format_help("foo").strip()
class RunTest(unittest.TestCase):
def test_default_implmentation_raises_error(self):
with self.assertRaises(NotImplementedError):
commands.Command().run()
class RootCommandTest(unittest.TestCase):
def test_config_overrides(self):
cmd = commands.RootCommand()
result = cmd.parse(["--option", "foo/bar=baz"])
assert result.config_overrides[0] == ("foo", "bar", "baz")
|
|
import logging
import os
import stat
import sys
_logger = logging.getLogger(__name__)
_root = None
_current_folder = None
class _Folder(object):
def __init__(self, folder_id, parent, name):
self.folder_id = folder_id
self.parent = parent
self.name = name
self.sub_folders = []
self.files = None
def cd(path):
global _current_folder
global _root
path_walked = []
if path[0] == '/':
_current_folder = _root
path = path[1:]
path_walked.append('')
for sub_path in path.split('/'):
if len(sub_path) > 0:
if sub_path == '..' and _current_folder.parent is not None:
_current_folder = _current_folder.parent
elif sub_path != '.':
found = False
sub_path_encoded = _to_unicode(sub_path)
for sub_folder in _current_folder.sub_folders:
if sub_path_encoded == sub_folder.name:
_current_folder = sub_folder
found = True
break
if not found:
path_walked.append(sub_path)
sys.stderr.write('cd: Folder not found: %s\n' % '/'.join(path_walked))
return False
path_walked.append(sub_path)
return True
def ls(client, name):
global _current_folder
def print_folder(folder):
for entity_name in sorted([entity.name for entity in folder.sub_folders]):
print '%s/' % entity_name
for entity_name in sorted([entity.name for entity in folder.files]):
print '%s' % entity_name
_load_files_if_necessary(client, _current_folder)
if name is None:
print_folder(_current_folder)
else:
name_encoded = _to_unicode(name)
for sub_folder in _current_folder.sub_folders:
if sub_folder.name == name_encoded:
_load_files_if_necessary(client, sub_folder)
print_folder(sub_folder)
return
for sub_file in _current_folder.files:
if sub_file.name == name_encoded:
print '%s - %s - %d' % (sub_file.name, sub_file.creationDate, sub_file.size)
return
sys.stderr.write('ls: File/Folder not found: %s\n' % name)
def mkdir(client, name):
global _current_folder
f = client.folders.create(name, _current_folder.folder_id)
_current_folder.sub_folders.append(_Folder(f.id, _current_folder, f.name))
def rm(client, name):
global _current_folder
name_encoded = _to_unicode(name)
_load_files_if_necessary(client, _current_folder)
for sub_file in _current_folder.files:
if sub_file.name == name_encoded:
client.files.delete(sub_file.id)
_load_files_if_necessary(client, _current_folder, True)
return
idx = 0
for sub_directory in _current_folder.sub_folders:
if sub_directory.name == name_encoded:
client.folders.delete(sub_directory.folder_id)
_current_folder.sub_folders.pop(idx)
return
idx += 1
sys.stderr.write('rm: File/Folder not found: %s\n' % name)
def upload(client, input_path, extensions=None):
global _current_folder
filter_extensions_splitted = [] if extensions is None else extensions.split(',')
def keep_file(file_name):
dot_position = file_name.rfind('.')
return len(filter_extensions_splitted) == 0 \
or dot_position >= 0 and file_name[dot_position + 1:] in filter_extensions_splitted \
or dot_position == -1 and '' in filter_extensions_splitted
if os.path.isfile(input_path):
_load_files_if_necessary(client, _current_folder)
_log_file_activity(input_path)
if _is_file_present(_current_folder, os.path.basename(input_path)):
print 'ALREADY EXISTS'
else:
client.files.upload(input_path, _current_folder.folder_id)
_load_files_if_necessary(client, _current_folder, True)
print 'OK'
elif os.path.isdir(input_path):
_upload_directory(client, input_path, _current_folder, keep_file)
else:
sys.stderr.write('upload: Bad system file, must be either a directory or a file: %s\n' % input_path)
return
def download(client, output_path, name):
global _current_folder
if not os.path.isdir(output_path):
sys.stderr.write('download: invalid directory: %s\n' % output_path)
return
elif name == '.':
_download_directory(client, _current_folder, output_path)
else:
_load_files_if_necessary(client, _current_folder)
name_encoded = _to_unicode(name)
for sub_file in _current_folder.files:
if sub_file.name == name_encoded:
file_output_path = os.path.join(output_path, sub_file.name)
_log_file_activity(file_output_path)
client.files.download(sub_file.downloadUrl, file_output_path)
print 'OK'
return
for sub_directory in _current_folder.sub_folders:
if sub_directory.name == name:
destination_path = os.path.join(output_path, sub_directory.name)
if not os.path.exists(destination_path):
_create_local_directory(destination_path)
if not os.path.isdir(destination_path) or not os.access(destination_path, os.W_OK):
sys.stderr.write('download: %s exist and is not a writable directory\n' % destination_path)
return
else:
_download_directory(client, sub_directory, destination_path)
return
sys.stderr.write('download: File/Folder not found: %s\n' % name)
def freespace(client):
freespace_in_octet = client.freespace.get().freespace
print freespace_in_octet
one_ko = 1024
one_mo = 1024 * one_ko
one_go = 1024 * one_mo
if freespace_in_octet < one_ko:
print '%d o' % freespace_in_octet
elif freespace_in_octet < one_mo:
print '%0.1f Ko' % (float(freespace_in_octet) / one_ko)
elif freespace_in_octet < one_go:
print '%0.1f Mo' % (float(one_go) / one_mo)
else:
print '%0.1f Go' % (float(freespace_in_octet) / one_go)
def reload_cache(client):
global _root
global _current_folder
flat_hierarchy = client.folders.get(flat=True, tree=True)
root = _Folder(flat_hierarchy.id, None, flat_hierarchy.name)
folders_by_id = {f.id: _Folder(f.id, None, f.name) for f in flat_hierarchy.subfolders}
folders_by_id[root.folder_id] = root
for f in flat_hierarchy.subfolders:
if f.parentId not in folders_by_id:
sys.stderr.write('%s not in list. Available: \n%s\n' % (f.parentId, '\n'.join(folders_by_id.keys())))
parent = folders_by_id[f.parentId]
folder = folders_by_id[f.id]
folder.parent = parent
parent.sub_folders.append(folder)
_root = root
if _current_folder is None:
_current_folder = root
else:
while _current_folder is not None and _current_folder.folder_id not in folders_by_id:
_current_folder = _current_folder.parent
if _current_folder is None:
_current_folder = root
else:
_current_folder = folders_by_id[_current_folder.folder_id]
def pwd():
print get_path()
def get_path():
global _current_folder
result = []
visitor = _current_folder
while visitor is not None:
result.append(visitor.name)
visitor = visitor.parent
result.reverse()
return '/%s' % '/'.join(result)
def _upload_directory(client, directory_path, current_directory, keep_file):
local_directory_name = os.path.basename(directory_path)
if len(local_directory_name) == 0:
# Occurs if directory_path ends with /
local_directory_name = os.path.basename(os.path.dirname(directory_path))
encoded_directory_name = _to_unicode(local_directory_name)
remote_directory = None
for sub_folder in current_directory.sub_folders:
if sub_folder.name == encoded_directory_name:
remote_directory = sub_folder
_load_files_if_necessary(client, remote_directory)
break
if remote_directory is None:
f = client.folders.create(local_directory_name, current_directory.folder_id)
remote_directory = _Folder(f.id, current_directory, f.name)
current_directory.sub_folders.append(remote_directory)
sub_folders = []
new_files = False
for sub_entity in sorted(os.listdir(directory_path)):
full_path = os.path.join(directory_path, sub_entity)
if os.path.isfile(full_path) and keep_file(sub_entity):
_log_file_activity(full_path)
if remote_directory.files is not None and _is_file_present(remote_directory, sub_entity):
print 'ALREADY EXISTS'
else:
client.files.upload(full_path, remote_directory.folder_id)
print 'OK'
new_files = True
elif os.path.isdir(full_path):
sub_folders.append(full_path)
if new_files:
remote_directory.files = None
for sub_folder in sub_folders:
_upload_directory(client, sub_folder, remote_directory, keep_file)
def _is_file_present(remote_directory, local_filename):
encoded_filename = _to_unicode(local_filename)
already_exists = False
for sub_file in remote_directory.files:
if sub_file.name == encoded_filename:
already_exists = True
break
return already_exists
def _create_local_directory(destination_path):
os.mkdir(destination_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
def _download_directory(client, folder, destination_path):
_load_files_if_necessary(client, folder)
for sub_file in folder.files:
file_output_path = os.path.join(destination_path, sub_file.name)
_log_file_activity(file_output_path)
client.files.download(sub_file.downloadUrl, file_output_path)
print 'OK'
for sub_folder in folder.sub_folders:
sub_folder_path = os.path.join(destination_path, sub_folder.name)
if not os.path.exists(sub_folder_path):
_create_local_directory(sub_folder_path)
if not os.path.isdir(sub_folder_path) or not os.access(sub_folder_path, os.W_OK):
sys.stderr.write('download: %s exist and is not a writable directory\n' % sub_folder_path)
return
else:
_download_directory(client, sub_folder, sub_folder_path)
def _load_files_if_necessary(client, folder, force=False):
if folder.files is None or force:
folder.files = client.folders.get(folder.folder_id, showthumbnails=True).files
def _to_unicode(name):
return unicode(name, "utf-8", errors="ignore")
def _log_file_activity(file_path):
sys.stdout.write('%s ...' % file_path)
sys.stdout.flush()
|
|
def reconstruct_chain(bestfitloc='posteriorpdf.fits', outfile='chain_reconstructed.pkl'):
"""
Reconstruct an unflattened chain from bestfitloc.
If this works, it would be better than saving unflattened chain in an additional pickle file, since this occupies much less space.
From posterior, mus' are also parameters, and have their own chains
Parameters
----------
bestfitloc: str
where the flattened chains are stored
outfile: str
output filename to save unflattened chains
Returns
-------
outfile:
pickle file containing unflattened chains that we can run our visualize programs with.
chains:
reconstructed chains
"""
import numpy
from astropy.io import fits
print("Reading burnin results from {0:s}".format(bestfitloc))
pdf = fits.getdata(bestfitloc)
from astropy.table import Table
fitKeys = Table.read(bestfitloc).keys()
import yaml
configloc = 'config.yaml'
configfile = open(configloc)
config = yaml.load(configfile)
nwalkers = config['Nwalkers']
nsteps = len(pdf)/nwalkers
ndim = len(fitKeys)
assert isinstance(nsteps, int), 'the total number of sameples should be nsteps x nwalkers'
chains = numpy.empty([nwalkers, nsteps, ndim])
for ii, param in enumerate(fitKeys):
these_chains = pdf[param]
for i in range(nwalkers):
chains[i, :, ii] = these_chains[i::nwalkers]
import cPickle as pickle
with open(outfile, 'wb') as f:
pickle.dump(chains, f, -1)
print("Saved reconstructed unflattened chains to {}.".format(outfile))
def test_reconstruct_chain(bestfitloc='posteriorpdf.fits', chainFile='chain.pkl'):
"""
test that we have reconstructed the flattened chain
Parameters
----------
bestfitloc: str
the fits file from which we will reconstruct the chain
chainFile: str
the pickle file with unflattened chain
"""
import cPickle as pickle
with open(chainFile) as f:
chain = pickle.load(f)
reconstructed = reconstruct_chain(bestfitloc)
import pdb; pdb.set_trace()
# number of walkers and iterations should be the same
assert (reconstructed.shape[0] == chain.shape[0])
assert (reconstructed.shape[1] == chain.shape[1])
assert (reconstructed[:, :, 1] == chain[:, :, 0])
def get_autocor(chainFile='chain.pkl'):
'''
get the AC length across all iterations for each param averaging over all the walkers
Returns
-------
idx: int
max. ac length among all parameters
'''
# chainFile = 'chain_reconstructed.pkl'
import cPickle as pickle
with open(chainFile) as f:
chain = pickle.load(f)
from emcee import autocorr
import numpy as np
ac = []
for i in range(chain.shape[-1]):
dum = autocorr.integrated_time(np.mean(chain[:, :, i], axis=0), axis=0, fast=False)
ac.append(dum)
autocorr_message = '{0:.2f}'.format(dum)
# print(autocorr_message)
try:
idx = int(np.max(ac))
except ValueError:
idx = 150
return idx
def plotPDF(fitresults, tag, limits='', Ngood=5000, axes='auto'):
"""
Plot the PDF of each parameter of the model.
Returns
-------
avg_dic: dict
key = names of the model parameters, value = average value from the last Ngood samples
"""
import numpy
import matplotlib.pyplot as plt
from pylab import savefig
from matplotlib import rc
import modifypdf
# plotting parameters
rc('font',**{'family':'sans-serif',
'size':'12'})
# grab the last Ngood fits
fitresults = fitresults[-Ngood:]
#lnprobstring = "prior to pruning <Ln Prob>: {:f}"
#print(lnprobstring.format(fitresults['lnprob'].mean()))
# identify the good fits
fitresultsgood = modifypdf.prune(fitresults)
# determine dimensions of PDF plots
nparams = len(fitresultsgood[0])
ncol = 4
nrow = (nparams/ncol + 1) if nparams % ncol != 0 else nparams/ncol
plt.figure(figsize=(18.0, 2.0 * nrow))
# set up the plotting window
plt.subplots_adjust(left=0.08, bottom=0.15, right=0.95, top=0.95,
wspace=0.4, hspace=0.65)
pnames = fitresultsgood.names
width = 1e-6
# intialize a dictionary w/ keywords = pnames to hold the average value of each paramter in the chain
avg_dic = dict.fromkeys(pnames)
for i, pname in enumerate(pnames):
ax = plt.subplot(nrow, ncol, i+1) # nparams/ncol
frg = fitresultsgood[pname]
rmsval = numpy.std(frg)
if rmsval > width:
avgval = numpy.mean(frg)
print("{:s} = {:.4f} +/- {:.4f}").format(pname, avgval, rmsval)
avg_dic[pname] = avgval
totalwidth = frg.max() - frg.min()
nbins = totalwidth / rmsval * 5
plt.hist(frg, int(nbins), edgecolor='blue')
plt.ylabel('N')
plt.xlabel(pname)
if axes == 'auto':
start, end = ax.get_xlim()
nticks = 5
stepsize = (end - start) / nticks
ax.xaxis.set_ticks(numpy.arange(start, end + 0.99*stepsize,
stepsize))
elif axes == 'initial':
oldaxis = plt.axis()
if pname[0:6] == 'lnprob':
xmin = frg.min()
xmax = frg.max()
elif pname[0:2] == 'mu':
xmin = 0
xmax = 30
else:
p_l = limits[0]
p_u = limits[1]
xmin = p_l[i]
xmax = p_u[i]
ymin = oldaxis[2]
ymax = oldaxis[3]
plt.axis([xmin, xmax, ymin, ymax])
else:
print('*** Distribution narrorwer than {} ***').format(width)
print('*** This is likely a fixed parameter: {}. ***').format(pname)
print('*** Check config.yaml *** ')
savefile = tag + 'PDFs.png'
savefig(savefile)
return avg_dic
def makeSBmap(config, fitresult):
"""
Make a surface brightness map of the lensed image for a given set of model
parameters.
"""
import lensutil
from astropy.io import fits
import os
import setuputil
import re
import numpy
# Loop over each region
# read the input parameters
paramData = setuputil.loadParams(config)
nlensedsource = paramData['nlensedsource']
nlensedregions = paramData['nlensedregions']
npar_previous = 0
configkeys = config.keys()
configkeystring = " ".join(configkeys)
regionlist = re.findall('Region.', configkeystring)
SBmap_all = 0
LensedSBmap_all = 0
nregion = len(regionlist)
for regioni in range(nregion):
regstring = 'Region' + str(regioni)
#indx = paramData['regionlist'].index(regstring)
cr = config[regstring]
nmu = 2 * (numpy.array(nlensedsource).sum() + nlensedregions)
if nmu > 0:
allparameters0 = list(fitresult)[1:-nmu]
else:
allparameters0 = list(fitresult)[1:]
# search poff_models for parameters fixed relative to other parameters
fixindx = setuputil.fixParams(paramData)
poff = paramData['poff']
ndim_total = len(poff)
fixed = (numpy.where(fixindx >= 0))[0]
nfixed = fixindx[fixed].size
parameters_offset = numpy.zeros(ndim_total)
for ifix in range(nfixed):
ifixed = fixed[ifix]
subindx = int(fixindx[ifixed])
par0 = 0
if fixindx[subindx] > 0:
par0 = fitresult[fixindx[subindx] + 1]
parameters_offset[ifixed] = fitresult[subindx + 1] + par0
allparameters = allparameters0 + parameters_offset
# count the number of lenses
configkeys = cr.keys()
configkeystring = " ".join(configkeys)
lenslist = re.findall('Lens.', configkeystring)
nlens = len(lenslist)
# count the number of sources
sourcelist = re.findall('Source.', configkeystring)
nsource = len(sourcelist)
nparperlens = 5
nparpersource = 6
nparlens = nparperlens * nlens
nparsource = nparpersource * nsource
npar = nparlens + nparsource + npar_previous
parameters = allparameters[npar_previous:npar]
npar_previous = npar
#nlens = paramData['nlens_regions'][indx]
#nsource = paramData['nsource_regions'][indx]
x = paramData['x'][regioni]
y = paramData['y'][regioni]
modelheader = paramData['modelheader'][regioni]
model_types = paramData['model_types'][regioni]
SBmap, LensedSBmap, Aperture, LensedAperture, mu_tot, mu_mask = \
lensutil.sbmap(x, y, nlens, nsource, parameters, model_types, \
computeamp=True)
caustics = False
if caustics:
deltapar = parameters[0:nparlens + nparpersource]
refine = 2
nx = x[:, 0].size * refine
ny = y[:, 0].size * refine
x1 = x[0, :].min()
x2 = x[0, :].max()
linspacex = numpy.linspace(x1, x2, nx)
y1 = y[:, 0].min()
y2 = y[:, 0].max()
linspacey = numpy.linspace(y1, y2, ny)
onex = numpy.ones(nx)
oney = numpy.ones(ny)
finex = numpy.outer(oney, linspacex)
finey = numpy.outer(linspacey, onex)
mumap = numpy.zeros([ny, nx])
for ix in range(nx):
for iy in range(ny):
deltapar[-nparpersource + 0] = finex[ix, iy]
deltapar[-nparpersource + 1] = finey[ix, iy]
xcell = paramData['celldata']
deltapar[-nparpersource + 2] = xcell
deltaunlensed, deltalensed, A1, A2, mu_xy, mu_xymask = \
lensutil.sbmap(finex, finey, nlens, 1, deltapar, ['Delta'])
mumap[ix, iy] = mu_xy[0]
import matplotlib.pyplot as plt
plt.imshow(mumap, origin='lower')
plt.contour(mumap, levels=[mumap.max()/1.1])
import pdb; pdb.set_trace()
SBmap_all += SBmap
LensedSBmap_all += LensedSBmap
LensedSBmapLoc = 'LensedSBmap.fits'
SBmapLoc = 'SBmap_Region.fits'
cmd = 'rm -rf ' + LensedSBmapLoc + ' ' + SBmapLoc
os.system(cmd)
fits.writeto(LensedSBmapLoc, LensedSBmap_all, modelheader)
fits.writeto(SBmapLoc, SBmap_all, modelheader)
return
def makeVis(config, miriad=False, idtag=''):
"""
Make simulated visibilities given a model image and observed visibilities.
Writes the visibilities to uvfits files.
"""
import uvmodel
import os
# get the uvfits files
visfile = config['UVData']
#----------------------------------------------------------------------
# Python version of UVMODEL
# "Observe" the lensed emission with the SMA and write to a new file
#----------------------------------------------------------------------
# Python version of UVMODEL's "replace" subroutine:
# is visfile a list of visibilities files?
if not type(visfile) is list:
visname, visext = os.path.splitext(visfile)
if miriad:
# We use miriad to do the imaging
tag = '.miriad'
DataMiriad = visname + tag
if not os.path.exists(DataMiriad):
print("Creating new miriad data file: " + DataMiriad)
os.system('rm -rf ' + DataMiriad)
command = 'fits op=uvin in=' + visfile + ' out=' + DataMiriad
os.system(command)
else:
# We use CASA to do the imaging
tag = '.ms'
# check to see if the CASA ms exists
try:
from taskinit import tb
tb.open(visname + tag)
tb.close()
print "Found an existing CASA ms file."
except RuntimeError:
print "No CASA ms file found, creating one from " + visname \
+ ".uvfits file."
from casa import importuvfits
infile = visname + '.uvfits'
outfile = visname + '.ms'
importuvfits(fitsfile=infile, vis=outfile)
visfile = visname + tag
SBmapLoc = 'LensedSBmap.fits'
modelvisfile = visname + '_model_' + idtag + tag
os.system('rm -rf ' + modelvisfile)
if miriad:
SBmapMiriad = 'LensedSBmap' + tag
os.system('rm -rf ' + SBmapMiriad)
command = 'fits op=xyin in=' + SBmapLoc + ' out=' \
+ SBmapMiriad
os.system(command)
command = 'uvmodel options=replace vis=' + visfile + \
' model=' + SBmapMiriad + ' out=' + modelvisfile
os.system(command + ' > uvmodeloutput.txt')
#command = 'cp ' + visfile + '/wflags ' + modelvisfile
#os.system(command)
else:
#print(visfile, modelvisfile)
uvmodel.replace(SBmapLoc, visfile, modelvisfile,
miriad=miriad)
#print(visfile, modelvisfile)
# Python version of UVMODEL's "subtract" subroutine:
modelvisfile = visname + '_residual_' + idtag + tag
os.system('rm -rf ' + modelvisfile)
if miriad:
SBmapMiriad = 'LensedSBmap' + tag
os.system('rm -rf ' + SBmapMiriad)
command = 'fits op=xyin in=' + SBmapLoc + ' out=' \
+ SBmapMiriad
os.system(command)
os.system('rm -rf ' + modelvisfile)
command = 'uvmodel options=subtract vis=' + visfile + \
' model=' + SBmapMiriad + ' out=' + modelvisfile
os.system(command)
#command = 'cp ' + visfile + '/wflags ' + modelvisfile
#os.system(command)
else:
#print(visfile, modelvisfile)
uvmodel.subtract(SBmapLoc, visfile, modelvisfile,
miriad=miriad)
else:
for i, ivisfile in enumerate(visfile):
visname, visext = os.path.splitext(ivisfile)
print(visname)
if miriad:
tag = '.uvfits'
else:
tag = '.ms'
# check to see if the CASA ms exists
try:
from taskinit import tb
tb.open(visname + tag)
tb.close()
print "Found an existing CASA ms file."
except RuntimeError:
print "No CASA ms file found, creating one from " + visname \
+ ".uvfits file."
from casa import importuvfits
infile = visname + '.uvfits'
outfile = visname + '.ms'
importuvfits(fitsfile=infile, vis=outfile)
SBmapLoc = 'LensedSBmap.fits'
if miriad:
SBmapMiriad = 'LensedSBmap.miriad'
os.system('rm -rf ' + SBmapMiriad)
command = 'fits op=xyin in=' + SBmapLoc + ' out=' \
+ SBmapMiriad
os.system(command)
ivisfile = visname + '.miriad'
modelivisfile = visname + '_model_' + idtag + '.miriad'
os.system('rm -rf ' + modelivisfile)
command = 'uvmodel options=replace vis=' + ivisfile + \
' model=' + SBmapMiriad + ' out=' + modelivisfile
os.system(command)
#command = 'cp ' + ivisfile + '/wflags ' + modelivisfile
#os.system(command)
else:
ivisfile = visname + tag
modelivisfile = visname + '_model_' + idtag + tag
os.system('rm -rf ' + modelivisfile)
uvmodel.replace(SBmapLoc, ivisfile, modelivisfile,
miriad=miriad)
# Python version of UVMODEL's "subtract" subroutine:
if miriad:
SBmapMiriad = 'LensedSBmap.miriad'
os.system('rm -rf ' + SBmapMiriad)
command = 'fits op=xyin in=' + SBmapLoc + ' out=' \
+ SBmapMiriad
os.system(command)
modelivisfile = visname + '_residual_' + idtag + '.miriad'
os.system('rm -rf ' + modelivisfile)
command = 'uvmodel options=subtract vis=' + ivisfile + \
' model=' + SBmapMiriad + ' out=' + modelivisfile
os.system(command)
#command = 'cp ' + ivisfile + '/wflags ' + modelivisfile
#os.system(command)
else:
modelivisfile = visname + '_residual_' + idtag + tag
os.system('rm -rf ' + modelivisfile)
uvmodel.subtract(SBmapLoc, ivisfile, modelivisfile,
miriad=miriad)
#except:
# msg = "Visibility datasets must be specified as either a string " \
# + "or a list of strings."
# print(msg)
# raise TypeError
def makeImage(config, threshold, interactive=True, miriad=False, idtag=''):
"""
Make an image of the model and the residual from simulated model
visibilities. Requires CASA or miriad.
Parameters
----------
threshold: float
in mJy, cleaning threshold
"""
import os
from astropy.io import fits
import miriadutil
from rmtables import rmtables
visfile = config['UVData']
target = config['ObjectName']
fitsim = config['ImageName']
fitshead = fits.getheader(fitsim)
imsize = [fitshead['NAXIS1'], fitshead['NAXIS2']]
cell = str(fitshead['CDELT2'] * 3600) + 'arcsec'
# invert and clean the simulated model visibilities
if miriad:
try:
# use miriad for imaging
imsize = str(fitshead['NAXIS1'])
index = visfile.find('.uvfits')
name = visfile[0:index]
uvfitsloc = name + '_model_' + idtag + '.uvfits'
uvmirloc = name + '_model_' + idtag + '.miriad'
command = 'rm -rf ' + uvmirloc
#os.system(command)
command = 'fits op=uvin options=varwt in=' + uvfitsloc \
+ ' out=' + uvmirloc
#os.system(command + ' > fitsoutput.txt')
imloc = target + '_clean_model'
niter = '10000'
cell = str(fitshead['CDELT2'] * 3600)
cutoff = '2e-3'
cutoff2 = '4e-3'
robust = '+0.5'
region = 'quarter'
region2 = 'quarter'
gain = '0.1'
fwhm = '0'
sup = '0'
parameters = [uvmirloc, imloc, imsize, cell, niter, cutoff, \
cutoff2, robust, region, region2, gain, fwhm, sup]
miriadutil.makeScript(parameters)
command = 'csh image.csh'
os.system(command + ' > imageoutput.txt')
# the simulated residual visibilities
uvfitsloc = name + '_residual_' + idtag + '.uvfits'
uvmirloc = name + '_residual_' + idtag + '.miriad'
command = 'rm -rf ' + uvmirloc
#os.system(command)
command = 'fits op=uvin options=varwt in=' + uvfitsloc \
+ ' out=' + uvmirloc
#os.system(command + ' >> fitsoutput.txt')
imloc = target + '_clean_residual'
parameters = [uvmirloc, imloc, imsize, cell, niter, cutoff, \
cutoff2, robust, region, region2, gain, fwhm, sup]
miriadutil.makeScript(parameters)
command = 'csh image.csh'
os.system(command)# + ' >> imageoutput.txt')
except:
try:
modellist = []
residlist = []
for ivisfile in visfile:
# use miriad for imaging
imsize = str(fitshead['NAXIS1'])
index = ivisfile.find('.uvfits')
name = ivisfile[0:index]
uvfitsloc = name + '_model_' + idtag + '.uvfits'
uvmirloc = name + '_model_' + idtag + '.miriad'
modellist.append(uvmirloc)
command = 'rm -rf ' + uvmirloc
#os.system(command)
command = 'fits op=uvin in=' + uvfitsloc + ' out=' \
+ uvmirloc
#os.system(command + ' > fitsoutput.txt')
# the simulated residual visibilities
uvfitsloc = name + '_residual_' + idtag + '.uvfits'
uvmirloc = name + '_residual_' + idtag + '.miriad'
residlist.append(uvmirloc)
command = 'rm -rf ' + uvmirloc
#os.system(command)
command = 'fits op=uvin in=' + uvfitsloc + ' out=' \
+ uvmirloc
#os.system(command + ' >> fitsoutput.txt')
invisloc = ','.join(modellist)
imloc = target + '_model'
parameters = [invisloc, imloc, imsize, cell, niter, cutoff, \
cutoff2, robust, region, region2, gain, fwhm, sup]
miriadutil.makeScript(parameters)
command = 'csh image.csh'
os.system(command + ' > imageoutput.txt')
invisloc = ','.join(residlist)
imloc = target + '_residual'
parameters = [invisloc, imloc, imsize, cell, niter, cutoff, \
cutoff2, robust, region, region2, gain, fwhm, sup]
miriadutil.makeScript(parameters)
command = 'csh image.csh'
os.system(command)# + ' >> imageoutput.txt')
except:
msg = "Visibility datasets must be specified as either a "\
"string or a list of strings."
print(msg)
raise TypeError
else:
# use CASA for imaging
from clean import clean
from casa import exportfits
# ---------------------------------------
# invert and clean the model visibilities
# remove any existing clean products, except for .mask
# mask should be re-usable
imloc = target + '_clean_model'
# os.system('rm -rf ' + imloc + '*')
for ext in ['.flux', '.image', '.model', '.psf', '.residual']:
rmtables(imloc + ext)
# handle lists of visibility files
if type(visfile) is list:
modelvisloc = []
for i, ivisfile in enumerate(visfile):
visname, visext = os.path.splitext(ivisfile)
modelvisloc.append(visname + '_model_' + idtag + '.ms')
# handle single visibility files
else:
visname, visext = os.path.splitext(visfile)
modelvisloc = visname + '_model_' + idtag + '.ms'
# search for an existing mask
maskname = imloc + '.mask'
try:
maskcheck = os.path.exists(maskname)
except:
maskcheck = False
if maskcheck:
mask = maskname
else:
mask = ''
threshold = str(threshold)
# use CASA's clean task to make the images
print("")
print("*** CLEANING with the following options: *** \n")
print("vis={:s}, imagename={:s}, mode='mfs', niters=10000, threshold={:s} mJy, interactive={:}, mask={:s}, imsize={:s},cell={:s},weighting='briggs',robust=0.5").format(modelvisloc, imloc+'.image', threshold, interactive, mask, imsize, cell)
clean(vis=modelvisloc, imagename=imloc, mode='mfs', niter=10000,
threshold=threshold+'mJy', interactive=interactive, mask=mask,
imsize=imsize, cell=cell, weighting='briggs', robust=0.5)
# export the cleaned image to a fits file
os.system('rm -rf ' + imloc + '.fits')
exportfits(imagename=imloc + '.image', fitsimage=imloc + '.fits')
# ---------------------------------------
# invert and clean the residual visibilities
# remove any existing clean products, except for .mask
imloc = target + '_clean_residual'
# os.system('rm -rf ' + imloc + '*')
for ext in ['.flux', '.image', '.model', '.psf', '.residual']:
rmtables(imloc + ext)
# handle lists of visibility files
if type(visfile) is list:
modelvisloc = []
for i, ivisfile in enumerate(visfile):
visname, visext = os.path.splitext(ivisfile)
modelvisloc.append(visname + '_residual_' + idtag + '.ms')
# handle single visibility files
else:
visname, visext = os.path.splitext(visfile)
modelvisloc = visname + '_residual_' + idtag + '.ms'
# use CASA's clean task to make the images
clean(vis=modelvisloc, imagename=imloc, mode='mfs', niter=10000,
threshold=threshold+'mJy', interactive=interactive, mask=mask,
imsize=imsize, cell=cell, weighting='briggs', robust=0.5)
# export the cleaned image to a fits file
os.system('rm -rf ' + imloc + '.fits')
exportfits(imagename=imloc + '.image', fitsimage=imloc + '.fits')
return
def plotImage(model, data, config, modeltype, fitresult, tag=''):
"""
Make a surface brightness map of a given model image. Overlay with red
contours a surface brightness map of the data image to which the model was
fit.
"""
import numpy
from astropy import wcs
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
from pylab import savefig
import setuputil
import re
# set font properties
font = {'family' : 'sans-serif',
'weight' : 'bold',
'size' : 13}
matplotlib.rc('font', **font)
matplotlib.rcParams['axes.linewidth'] = 1.5
matplotlib.rcParams['axes.labelsize'] = 'xx-large'
fig = plt.figure(figsize=(5.0, 5.0))
ax = fig.add_subplot(1, 1, 1)
plt.subplots_adjust(left=0.17, right=0.93, top=0.97,
bottom=0.11, wspace=0.35)
paramData = setuputil.loadParams(config)
nlensedsource = paramData['nlensedsource']
nlensedregions = paramData['nlensedregions']
npar_previous = 0
configkeys = config.keys()
configkeystring = " ".join(configkeys)
regionlist = re.findall('Region.', configkeystring)
nregion = len(regionlist)
from copy import deepcopy
fitresultDict = deepcopy(fitresult)
for iregion in range(nregion):
region = 'Region' + str(iregion)
cr = config[region]
ra_centroid = cr['RACentroid']
dec_centroid = cr['DecCentroid']
radialextent = cr['RadialExtent']
nmu = 2 * (numpy.array(nlensedsource).sum() + nlensedregions)
if nmu > 0:
allparameters0 = list(fitresult)[1:-nmu]
else:
allparameters0 = list(fitresult)[1:]
# search poff_models for parameters fixed relative to other parameters
fixindx = setuputil.fixParams(paramData)
poff = paramData['poff']
ndim_total = len(poff)
fixed = (numpy.where(fixindx >= 0))[0]
nfixed = fixindx[fixed].size
parameters_offset = numpy.zeros(ndim_total)
for ifix in range(nfixed):
ifixed = fixed[ifix]
subindx = fixindx[ifixed]
par0 = 0
if fixindx[subindx] > 0:
par0 = fitresult[fixindx[subindx] + 1]
parameters_offset[ifixed] = fitresult[subindx + 1] + par0
allparameters = allparameters0 + parameters_offset
# count the number of lenses
configkeys = cr.keys()
configkeystring = " ".join(configkeys)
lenslist = re.findall('Lens.', configkeystring)
nlens = len(lenslist)
# count the number of sources
sourcelist = re.findall('Source.', configkeystring)
nsource = len(sourcelist)
nparlens = 5 * nlens
nparsource = 6 * nsource
npar = nparlens + nparsource + npar_previous
parameters = allparameters[npar_previous:npar]
npar_previous = npar
for i in range(nsource):
i6 = i * 6
xxx = parameters[i6 + 2 + nparlens]
yyy = parameters[i6 + 3 + nparlens]
source_pa = 90 - parameters[i6 + 5 + nparlens]
#model_type = model_types[i]
#if model_type == 'gaussian':
norm = 2.35
#if model_type == 'cylinder':
# norm = numpy.sqrt(2)
meansize = norm * parameters[i6 + 1 + nparlens]
source_bmaj = meansize / numpy.sqrt(parameters[i6 + 4 + nparlens])
source_bmin = meansize * numpy.sqrt(parameters[i6 + 4 + nparlens])
e = Ellipse((xxx, yyy), source_bmaj, source_bmin, \
angle=source_pa, ec='white', lw=0.5, fc='magenta', \
zorder=2, fill=True, alpha=0.5)
ax.add_artist(e)
for i in range(nlens):
i5 = i * 5
xxx = numpy.array([parameters[i5 + 1]])
yyy = numpy.array([parameters[i5 + 2]])
plt.plot(xxx, yyy, 'o', ms=5., mfc='black', mec='white', mew=0.5, \
label='Lens Position', zorder=20)
lens_pa = 90 - parameters[i5 + 4]
meansize = 2 * parameters[i5]
lens_bmaj = meansize / numpy.sqrt(parameters[i5 + 3])
lens_bmin = meansize * numpy.sqrt(parameters[i5 + 3])
elens = Ellipse((xxx, yyy), lens_bmaj, lens_bmin, \
angle=lens_pa, ec='orange', lw=2.0, \
zorder=20, fill=False)
ax.add_artist(elens)
# get the image centroid in model pixel coordinates
headim = data[0].header # red contours
headmod = model[0].header # grayscale
im = data[0].data
im = im[0, 0, :, :]
# good region is where mask is zero
mask = setuputil.makeMask(config)
goodregion = mask == 0
# compute sigma image from cutout of SMA flux image
# dv = 1.
# bunit = headim['BUNIT']
# if bunit == 'JY/BEAM.KM/S':
# dv = 500.
rms = im[goodregion].std()# * dv
# Obtain measurements of beamsize and image min/max
bmaj = headim['BMAJ'] * 3600
bmin = headim['BMIN'] * 3600
bpa = headim['BPA']
cdelt1 = headim['CDELT1'] * 3600
cdelt2 = headim['CDELT2'] * 3600
cell = numpy.sqrt( abs(cdelt1) * abs(cdelt2) )
im_model = model[0].data
# Hack to read in optical data in extension 1 instead of 0
if im_model is None:
im_model = model[1].data
headmod = model[1].header
if im_model.ndim == 4:
im_model = im_model[0, 0, :, :]
#nx_model = im_model[0, :].size
pixextent = radialextent / cell
datawcs = wcs.WCS(headim, naxis=2)
pix = datawcs.wcs_world2pix(ra_centroid, dec_centroid, 1)
x0 = numpy.round(pix[0])
y0 = numpy.round(pix[1])
imrady = numpy.round(radialextent / cell)# nymod / 2.
imradx = numpy.round(radialextent / cell)# nxmod / 2.
# make data cutout
totdx1 = x0 - imradx
totdx2 = x0 + imradx
totdy1 = y0 - imrady
totdy2 = y0 + imrady
datacut = im[totdy1:totdy2,totdx1:totdx2]
# make cleaned model cutout
headerkeys = headmod.keys()
cd1_1 = headerkeys.count('CD1_1')
cd1_2 = headerkeys.count('CD1_2')
if cd1_1 == 0:
cdelt1_model = numpy.abs(headmod['CDELT1'] * 3600)
cdelt2_model = numpy.abs(headmod['CDELT2'] * 3600)
else:
cdelt1_model = numpy.abs(headmod['CD1_1'] * 3600)
cdelt2_model = numpy.abs(headmod['CD2_2'] * 3600)
cd11 = headmod['CD1_1']
if cd1_2 == 0:
cd12 = 0
cd21 = 0
else:
cd12 = headmod['CD1_2']
cd21 = headmod['CD2_1']
cd22 = headmod['CD2_2']
cdelt1_model = numpy.sqrt(cd11 ** 2 + cd12 ** 2) * 3600
cdelt2_model = numpy.sqrt(cd21 ** 2 + cd22 ** 2) * 3600
if cd12 == 0:
cd12 = cd11 / 1e8
cdratio = numpy.abs(cd11 / cd12)
if cdratio < 1:
cdratio = 1 / cdratio
cellmod = numpy.sqrt( abs(cdelt1_model) * abs(cdelt2_model) )
modelwcs = wcs.WCS(headmod, naxis=2)
pix = modelwcs.wcs_world2pix(ra_centroid, dec_centroid, 1)
x0 = numpy.round(pix[0])
y0 = numpy.round(pix[1])
modrady = numpy.round(radialextent / cellmod)
modradx = numpy.round(radialextent / cellmod)
totdx1 = x0 - modradx
totdx2 = x0 + modradx
totdy1 = y0 - modrady
totdy2 = y0 + modrady
modelcut = im_model[totdy1:totdy2,totdx1:totdx2]
#cellp = cell * (2 * pixextent + 1.1) / (2 * pixextent)
xlo = -radialextent
xhi = radialextent
ylo = -radialextent
yhi = radialextent
ncell = modelcut[:, 0].size#(xhi - xlo) / cell
modx = -numpy.linspace(xlo, xhi, ncell)
mody = numpy.linspace(ylo, yhi, ncell)
#modx = -(numpy.arange(2 * pixextent) - pixextent) * cellp - cell/2.
#mody = (numpy.arange(2 * pixextent) - pixextent) * cellp + cell/2.
cornerextent = [modx[0], modx[-1], mody[0], mody[-1] ]
if modeltype == 'residual':
grayscalename = 'Residual'
pcolor = 'white'
ncolor = 'black'
vmax = 5 * rms
vmin = -5 * rms
elif modeltype == 'model':
grayscalename = 'Model'
pcolor = 'red'
ncolor = 'red'
vmax = modelcut.max()
vmin = modelcut.min()
else:
grayscalename = config['OpticalTag']
filtindx = grayscalename.find(' ')
filtname = grayscalename[filtindx + 1:]
if filtname == 'F110W':
modelcut = numpy.log10(modelcut - modelcut.min() + 1)
pcolor = 'red'
ncolor = 'red'
vmax = modelcut.max()
vmin = modelcut.min()
plt.imshow(modelcut, cmap='gray_r', interpolation='nearest', \
extent=cornerextent, origin='lower', vmax=vmax, vmin=vmin)
plevs = 3*rms * 2**(numpy.arange(15))
nlevs = sorted(-3 * rms * 2**(numpy.arange(4)))
# plevs = 2*rms * numpy.sqrt(2)**(numpy.arange(10))
# nlevs = sorted(-2 * rms * numpy.sqrt(2)**(numpy.arange(4)))
pcline = 'solid'
ncline = 'dashed'
#nx_contour = datacut[0, :].size
#ny_contour = datacut[:, 0].size
#cmodx = -(numpy.arange(nx_contour) - pixextent) * cellp - cell/2.
#cmody = (numpy.arange(ny_contour) - pixextent) * cellp + cell/2.
ncell = datacut[:, 0].size#(xhi - xlo) / cell
cmodx = -numpy.linspace(xlo, xhi, ncell)
cmody = numpy.linspace(ylo, yhi, ncell)
plt.contour(cmodx, cmody, datacut, colors=pcolor, levels=plevs, \
linestyles=pcline, linewidths=1.5)
plt.contour(cmodx, cmody, datacut, colors=ncolor, levels=nlevs, \
linestyles=ncline, linewidths=1.5)
# plot the critical curve
#plt.contour(cmodx, cmody, dmu, colors='orange', levels=[100])
caustics = True
if caustics:
# Keeton 00, Kormann+94
phi = numpy.linspace(0.0, 2*numpy.pi, 2000)
def cart2pol(x, y):
""" Cartesian to Polar
"""
x, y = numpy.asarray(x), numpy.asarray(y)
r = numpy.sqrt(x**2 + y**2)
theta = numpy.arctan2(y, x)
return r, theta
def pol2cart(r, theta):
""" Polar to Cartesian
"""
r, theta = numpy.asarray(r), numpy.asarray(theta)
x, y = r * numpy.cos(theta), r * numpy.sin(theta)
return x, y
# no shear
# loop through each region
for ireg in range(nregion):
region = 'Region' + str(ireg)
# loop through each lens
for jlens in range(nlens):
i5 = jlens * 5
xxLens = parameters[i5 + 1]
yyLens = parameters[i5 + 2]
PA = parameters[i5 + 4]
PA = 180 - PA # RA increasing to the left (E is to the left of N)
q = parameters[i5 + 3]
qq = numpy.sqrt(1 - q**2)
b = parameters[i5] # in arcsec
Delta = numpy.sqrt(numpy.cos(phi)**2 + q**2 * numpy.sin(phi)**2)
# if not circular
if q != 1.0:
# radial caustic
x = (-b * numpy.sqrt(q)/qq) * numpy.arcsinh(numpy.cos(phi)*qq/q)
y = (b * numpy.sqrt(q)/qq) * numpy.arcsin(numpy.sin(phi)*qq)
# tangential, Kormann+94 eqn 31
xt = b * (((numpy.sqrt(q)/Delta) * numpy.cos(phi)) - ((numpy.sqrt(q)/qq)*numpy.arcsinh(qq/q * numpy.cos(phi))))
yt = -b * (((numpy.sqrt(q)/Delta) * numpy.sin(phi)) - ((numpy.sqrt(q)/qq) * numpy.arcsin(qq * numpy.sin(phi))))
# rotate to match image using PA
rr, thetar = cart2pol(x, y)
x, y = pol2cart(rr, thetar + PA * numpy.pi/180.)
x += xxLens
y += yyLens
rt, thetat = cart2pol(xt, yt)
xt, yt = pol2cart(rt, thetat + PA * numpy.pi/180.)
xt += xxLens
yt += yyLens
drawCaustic = numpy.atleast_3d([[x, y], [xt, yt]])
# if circular
else:
x = -b * numpy.cos(phi) + xxLens
y = -b * numpy.sin(phi) + yyLens
drawCaustic = numpy.atleast_3d([xr, yr])
drawCaustic = drawCaustic.reshape(drawCaustic.shape[2], drawCaustic.shape[0], drawCaustic.shape[1])
for i in range(drawCaustic.shape[0]):
plt.plot(drawCaustic[i, 0, :], drawCaustic[i, 1, :], color='cyan', lw='2', zorder=20) # alpha=(1.-(ireg+1)/5.-(jlens+1)/3.)
# axisrange = plt.axis()
axisrange = numpy.array([xhi,xlo,ylo,yhi]).astype(float)
plt.axis(axisrange)
plt.minorticks_on()
plt.tick_params(width=1.5, which='both')
plt.tick_params(length=4, which='minor')
plt.tick_params(length=8, which='major')
plt.xlabel(r'$\Delta$RA (arcsec)', fontsize='x-large')
plt.ylabel(r'$\Delta$Dec (arcsec)', fontsize='x-large')
bparad = bpa / 180 * numpy.pi
beamx = numpy.abs(numpy.sin(bparad) * bmaj) + \
numpy.abs(numpy.cos(bparad) * bmin)
beamy = numpy.abs(numpy.cos(bparad) * bmaj) + \
numpy.abs(numpy.sin(bparad) * bmin)
beamxhi = 2 * pixextent / cell
beamxlo = -2 * pixextent / cell
beamyhi = 2 * pixextent / cell
beamylo = -2 * pixextent / cell
beamdx = numpy.float(beamxhi) - numpy.float(beamxlo)
beamdy = numpy.float(beamyhi) - numpy.float(beamylo)
bufferx = 0.03 * beamdx / 6.0
buffery = 0.03 * beamdx / 6.0
xpos = 1 - beamx/beamdx/2 - bufferx
ypos = beamy/beamdy/2 + buffery
#beamx = bmaj * numpy.abs(numpy.cos(bparad))
#beamy = bmaj * numpy.abs(numpy.sin(bparad))
xpos = 0.95 * axisrange[1] + 0.95 * beamx / 2.
ypos = 0.95 * axisrange[2] + 0.95 * beamy / 2.
e = Ellipse((xpos,ypos), bmaj, bmin, angle=90 - bpa, ec='black', \
hatch='//////', lw=1.0, fc='None', zorder=10, fill=True)
ax.add_artist(e)
plt.text(0.92, 0.88, grayscalename, transform=ax.transAxes,
fontsize='xx-large', ha='right')
try:
from astropy.table import Table
tloc = '../../../Papers/Bussmann_2015a/Bussmann2015/Data/targetlist.dat'
hackstep = Table.read(tloc, format='ascii')
objname = config['ObjectName']
match = hackstep['dataname'] == objname
shortname = hackstep['shortname'][match][0]
plt.text(0.08, 0.88, shortname, transform=ax.transAxes,
fontsize='xx-large')
except:
objname = config['ObjectName']
plt.text(0.08, 0.88, objname, transform=ax.transAxes,
fontsize='xx-large')
bigtag = '.' + modeltype + '.' + tag
savefig('LensedSBmap' + bigtag + '.png')
#plt.clf()
def removeTempFiles():
"""
Remove files created along the way by visualutil routines.
"""
import os
cmd = 'rm -rf *SBmap*fits *_model* *_residual* *output.txt'
os.system(cmd)
def plotFit(config, fitresult, threshold, tag='', cleanup=True, showOptical=False,
interactive=True):
"""
Plot a particular model fit.
Parameters
----------
threshold: float
in mJy, cleaning threshold
showOptical: Bool
True: will plot data as contour, optical as grayscale
"""
from astropy.io import fits
# make the lensed image
makeSBmap(config, fitresult)
# are we using miriad to image the best-fit model?
if config.keys().count('UseMiriad') > 0:
miriad = config['UseMiriad']
if miriad == 'Visualize':
miriad = True
interactive = False
else:
miriad = False
# make the simulated visibilities
makeVis(config, miriad=miriad, idtag=tag)
# image the simulated visibilities
makeImage(config, threshold, miriad=miriad, interactive=interactive, idtag=tag)
# read in the images of the simulated visibilities
objectname = config['ObjectName']
simimloc = objectname + '_clean_model.fits'
model = fits.open(simimloc)
simimloc = objectname + '_clean_residual.fits'
residual = fits.open(simimloc)
# read in the data
data = fits.open(config['ImageName'])
if showOptical:
# read in the data
optical = fits.open(config['OpticalImage'])
# plot the images
plotImage(optical, data, config, 'optical', fitresult, tag=tag)
# plot the images
plotImage(model, data, config, 'model', fitresult, tag=tag)
# plot the residual
plotImage(residual, residual, config, 'residual', fitresult, tag=tag)
# remove the intermediate files
if cleanup:
removeTempFiles()
def preProcess(config, paramData, fitresult, tag='', cleanup=True,
showOptical=False, interactive=True):
"""
Cycle through each region and run plotFit, selecting parameters
appropriately.
Parameters
----------
threshold: float (need to implement)
in mJy, cleaning threshold
"""
import setuputil
import numpy
import re
# Loop over each region
nlensedsource = paramData['nlensedsource']
nlensedregions = paramData['nlensedregions']
npar_previous = 0
configkeys = config.keys()
configkeystring = " ".join(configkeys)
regionlist = re.findall('Region.', configkeystring)
for regioni, region in enumerate(regionlist):
cr = config[region]
nmu = 2 * (numpy.array(nlensedsource).sum() + nlensedregions)
if nmu > 0:
allparameters0 = list(fitresult)[1:-nmu]
else:
allparameters0 = list(fitresult)[1:]
# search poff_models for parameters fixed relative to other parameters
fixindx = setuputil.fixParams(paramData)
poff = paramData['poff']
ndim_total = len(poff)
fixed = (numpy.where(fixindx >= 0))[0]
nfixed = fixindx[fixed].size
parameters_offset = numpy.zeros(ndim_total)
for ifix in range(nfixed):
ifixed = fixed[ifix]
subindx = fixindx[ifixed]
par0 = 0
if fixindx[subindx] > 0:
par0 = fitresult[fixindx[subindx] + 1]
parameters_offset[ifixed] = fitresult[subindx + 1] + par0
allparameters = allparameters0 + parameters_offset
# count the number of lenses
configkeys = cr.keys()
configkeystring = " ".join(configkeys)
lenslist = re.findall('Lens.', configkeystring)
nlens = len(lenslist)
# count the number of sources
sourcelist = re.findall('Source.', configkeystring)
nsource = len(sourcelist)
nparlens = 5 * nlens
nparsource = 6 * nsource
npar = nparlens + nparsource + npar_previous
parameters = allparameters[npar_previous:npar]
npar_previous = npar
plotFit(config, paramData, threshold, parameters, regioni, tag=tag,
cleanup=cleanup, showOptical=showOptical,
interactive=interactive)
|
|
#!/usr/bin/env python3
# This file contains a command line client that can be used to access the API.
import sys
import os
import json
from optparse import OptionParser
import re
import importlib
sys.path.append(os.path.realpath('modules'))
client_module = importlib.import_module('RestApiClient')
# This is to modify the behaviour of the OptParser to make it check arguments
# more strictly
class NonCorrectingOptionParser(OptionParser):
def _match_long_opt(self, opt):
# Is there an exact match?
if opt in self._long_opt:
return opt
else:
self.error('"{0}" is not a valid command line option.'.format(opt))
# This method return the parser to parse our command line arguments.
USAGE_MESSAGE = "Type 'python apiclient.py --help' for usage."
def get_params(option, opt_str, value, parser):
args = []
next_opt = False
while parser.rargs and not next_opt:
if not parser.rargs[0].startswith('--'):
args.append(parser.rargs.pop(0))
else:
next_opt = True
setattr(parser.values, option.dest, args)
def get_parser():
parser = NonCorrectingOptionParser(add_help_option=False)
parser.add_option('-h', '--help', help='Show help message',
action='store_true')
parser.add_option('--print_api',
help='Print all available endpoints of the API',
action='store_true')
parser.add_option('-a', '--api', help='Path of the API to call. Required',
action='store')
parser.add_option('-m', '--method',
help='HTTP method to call the API using, either GET, ' +
'POST, DELETE',
action='store', choices=['GET', 'POST', 'DELETE'])
parser.add_option('--response_format',
help='Content-type of response, either ' +
'application/json, application/xml, ' +
'application/csv, or text/table. Defaults to json.',
action='store', default='application/json')
parser.add_option('--request_format', '--content_type',
help='Content-type of body parameter. Required only ' +
'for endpoints that have \'body\' parameters.')
parser.add_option('-v', '--version',
help='The version of the endpoint you would like to ' +
'use. Default is most recent version',
action='store')
parser.add_option('--add_headers',
help='For any headers you would like to pass. This is ' +
'not required to use any endpoint. Must be in ' +
'format "<name1>=<value1>+<name2>=<value2>"')
parser.add_option('-p', '--params',
help='For any parameters you would like to pass. ' +
'Individual parameters are separated by spaces.' +
'\nExample: --params <name1>="<value1>" ' +
'<name2>="<value2>"',
action='callback', callback=get_params, dest="params")
parser.add_option('-r', '--range',
help='Allows you to construct a Range header to ' +
'perform paging (v3_0 endpoints and above only). ' +
'Range is 0 based inclusive, and must be in ' +
'formation \'x-y\'', action='store', default='')
return parser
# This method takes the output of the /help/capabilities endpoint and prints it
# into a user readable format.
def print_api():
api_client = client_module.RestApiClient()
response = api_client.call_api('help/capabilities', 'GET')
response_json = json.loads(response.read().decode('utf-8'))
for category in response_json['categories']:
for api in category['apis']:
print("API: " + category['path'] + api['path'])
print("Operations:")
for operation in api['operations']:
print("\tVersion: " + str(operation['version']))
print("\tMethod: " + operation['httpMethod'])
desc = re.sub("[\\t\\n\\r]+", " ", operation['description'])
print("\tDescription: " + desc)
sys.stdout.write("\tOutput Type(s): ")
response_types = []
for response_type in operation['supportedContentTypes']:
response_types.append(response_type['mimeType'])
print(", ".join(response_types))
print("\tParameters: ")
for params in operation['parameters']:
print("\t\tName: " + params['name'])
if params['description']:
desc = re.sub("[\\t\\n\\r]+", " ",
params['description'])
print("\t\tDescription: " + desc)
print("\t\tSource: " + params['source'])
print("\t\tRequired: " + str(params['required']))
for contentTypes in params['supportedContentTypes']:
try:
if contentTypes['dataType']:
print("\t\tType: " + contentTypes['dataType'])
except KeyError:
print("\t\tType: " + params['dataType'])
print("\t\tMimeType: " + contentTypes['mimeType'])
print("")
# This is the output when "apiclient.py -h" is called on the command line.
def print_help(parser):
print(parser.format_help().strip())
print("\n\nExample query: python apiclient.py --api /help/versions " +
"--method GET --params filter=\"version=6.0\"")
def parse_params(args):
params = {}
if args:
for x in args:
key_value = x.split('=', 1)
pair = {key_value[0]: key_value[1]}
params.update(pair)
return params
# This method calls the api for the user.
def make_request(args):
# Create an API for the version specified by the user. If args.version is
# None the latest version will be used.
api_client = client_module.RestApiClient(version=args.version)
# Make a copy of the headers so we are able to set some custom headers.
headers = api_client.get_headers()
# Gets endpoint from --api ENDPOINT argument
endpoint = args.api
# Strips endpoint of first forward slash, if it has one. Allows user to
# supply or omit forward slash from beginning of endpoint.
if str.startswith(endpoint, '/'):
endpoint = endpoint[1:]
# Changes 'Accept' header to --response_format RESPONSE_FORMAT argument.
headers['Accept'] = args.response_format
# This code snippet adds any extra headers you wish to send with your api
# call. Must be in name1=value1+name2=value2 form.
if args.add_headers:
try:
header_pairs = args.add_headers.split("+")
for header_pair in header_pairs:
header_pair = header_pair.split("=", 1)
headers[header_pair[0]] = header_pair[1]
except IndexError as ex:
raise ParseError("Error: Parsing headers failed. Make sure " +
"headers are in format \"<name1>=<value1>+" +
"<name2>=<value2>\"", ex)
if args.range:
headers['Range'] = 'items='+args.range
# This adds any query/body params to the list of query/body params.
params = parse_params(args.params)
# Checks content_type to see if it should send params as body param, or
# query param.
content_type = None
# Gets Content-type from --request_format REQUEST_FORMAT argument.
if args.request_format:
headers['Content-type'] = args.request_format
content_type = args.request_format
try:
# If content_type is application/json, then it is sending a JSON object
# as a body parameter.
if content_type == 'application/json':
data = params['data'].encode('utf-8')
return api_client.call_api(endpoint, 'POST', data=data,
headers=headers)
# Else it sends all params as query parameters.
else:
for key, value in params.items():
params[key] = value
return api_client.call_api(endpoint, args.method, params=params,
headers=headers)
except IndexError:
raise ParseError('Error: Parameter parsing failed. Make sure any ' +
'parameters follow the syntax ' +
'<paramname>="<paramvalue>"')
def handle_response_error(response, body):
try:
response_json = json.loads(body)
if response.code == 401:
failed_auth()
elif response.code == 422 and response_json['code'] == 36:
print("\nFailed to parse Range header. The syntax of the " +
"--range parameter must follow 'x-y'.")
print("Example: --range 0-1\n")
return [response_json['code'], json.dumps(response_json, indent=2,
separators=(',', ':'))]
except ValueError:
print("Failed to parse JSON of " + str(response.code) +
" error response body")
print("Text of Response Body: \n")
return [None, body]
def failed_auth():
print("AuthorizationError:")
print("\nToken, or user credentials failed to authorize api call. " +
"Please verify your token, or user credentials are correct.\n")
print("Body returned by failed request:\n")
def main(args):
# Then if --print_api is true, then apiclient prints output of
# /help/capabilities endpoint.
if args[0].print_api:
print_api()
# Then if --api and --method both have values, apiclient will attempt an
# api request.
elif args[0].api and args[0].method:
# Gets response object from making api call.
response = make_request(args[0])
# Determines content type of response object (for printing).
content_type = response.headers.get('Content-type')
# Gleans body from response object.
print(response.headers)
body = response.read().decode('utf-8')
output = body
if response.code >= 300:
# ERROR OCCURED, HANDLE ERROR
[error_code, output] = handle_response_error(response, body)
# SUCCESSFUL CALL
# If JSON object, it pretty prints JSON
# Else it merely prints the body of the response object.
elif content_type == 'application/json':
if body:
try:
response_json = json.loads(body)
output = json.dumps(response_json, indent=2,
separators=(',', ':'))
except ValueError:
print("Failed to parse JSON, unparsed JSON below: ")
else:
print("\nResponse body was empty.\n")
print(response.code)
print("")
print(output)
# If either only api, or method args are sent, then then this error
# message is printed.
else:
message = ""
if args[0].api:
message += "httpMethod must be specified by --method argument\n"
if args[0].method:
message += "api endpoint must be specified by --api argument\n"
if message:
print("ArgumentError: " + message)
print(USAGE_MESSAGE+"\n")
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
if args[0].help:
print_help(parser)
elif not sys.argv[1:]: # NO COMMAND LINE ARGUMENTS
print(USAGE_MESSAGE+"\n")
else:
main(args)
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Filter Scheduler.
"""
import mox
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor import api as conductor_api
from nova import context
from nova import db
from nova import exception
from nova.pci import pci_request
from nova.scheduler import driver
from nova.scheduler import filter_scheduler
from nova.scheduler import host_manager
from nova.scheduler import utils as scheduler_utils
from nova.scheduler import weights
from nova.tests.scheduler import fakes
from nova.tests.scheduler import test_scheduler
def fake_get_filtered_hosts(hosts, filter_properties, index):
return list(hosts)
def fake_get_group_filtered_hosts(hosts, filter_properties, index):
group_hosts = filter_properties.get('group_hosts') or []
if group_hosts:
hosts = list(hosts)
hosts.pop(0)
return hosts
else:
return list(hosts)
class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
"""Test case for Filter Scheduler."""
driver_cls = filter_scheduler.FilterScheduler
def test_run_instance_no_hosts(self):
def _fake_empty_call_zone_method(*args, **kwargs):
return []
sched = fakes.FakeFilterScheduler()
uuid = 'fake-uuid1'
fake_context = context.RequestContext('user', 'project')
instance_properties = {'project_id': 1, 'os_type': 'Linux'}
request_spec = {'instance_type': {'memory_mb': 1, 'root_gb': 1,
'ephemeral_gb': 0},
'instance_properties': instance_properties,
'instance_uuids': [uuid]}
self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
old_ref, new_ref = db.instance_update_and_get_original(fake_context,
uuid, {'vm_state': vm_states.ERROR, 'task_state':
None}).AndReturn(({}, {}))
compute_utils.add_instance_fault_from_exc(fake_context,
mox.IsA(conductor_api.LocalAPI), new_ref,
mox.IsA(exception.NoValidHost), mox.IgnoreArg())
self.mox.StubOutWithMock(db, 'compute_node_get_all')
db.compute_node_get_all(mox.IgnoreArg()).AndReturn([])
self.mox.ReplayAll()
sched.schedule_run_instance(
fake_context, request_spec, None, None,
None, None, {}, False)
def test_run_instance_non_admin(self):
self.was_admin = False
def fake_get(context, *args, **kwargs):
# make sure this is called with admin context, even though
# we're using user context below
self.was_admin = context.is_admin
return {}
sched = fakes.FakeFilterScheduler()
self.stubs.Set(sched.host_manager, 'get_all_host_states', fake_get)
fake_context = context.RequestContext('user', 'project')
uuid = 'fake-uuid1'
instance_properties = {'project_id': 1, 'os_type': 'Linux'}
request_spec = {'instance_type': {'memory_mb': 1, 'local_gb': 1},
'instance_properties': instance_properties,
'instance_uuids': [uuid]}
self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
old_ref, new_ref = db.instance_update_and_get_original(fake_context,
uuid, {'vm_state': vm_states.ERROR, 'task_state':
None}).AndReturn(({}, {}))
compute_utils.add_instance_fault_from_exc(fake_context,
mox.IsA(conductor_api.LocalAPI), new_ref,
mox.IsA(exception.NoValidHost), mox.IgnoreArg())
self.mox.ReplayAll()
sched.schedule_run_instance(
fake_context, request_spec, None, None, None, None, {}, False)
self.assertTrue(self.was_admin)
def test_scheduler_includes_launch_index(self):
fake_context = context.RequestContext('user', 'project')
instance_opts = {'fake_opt1': 'meow'}
request_spec = {'instance_uuids': ['fake-uuid1', 'fake-uuid2'],
'instance_properties': instance_opts}
instance1 = {'uuid': 'fake-uuid1'}
instance2 = {'uuid': 'fake-uuid2'}
def _has_launch_index(expected_index):
"""Return a function that verifies the expected index."""
def _check_launch_index(value):
if 'instance_properties' in value:
if 'launch_index' in value['instance_properties']:
index = value['instance_properties']['launch_index']
if index == expected_index:
return True
return False
return _check_launch_index
self.mox.StubOutWithMock(self.driver, '_schedule')
self.mox.StubOutWithMock(self.driver, '_provision_resource')
self.driver._schedule(fake_context, request_spec, {},
['fake-uuid1', 'fake-uuid2']).AndReturn(['host1', 'host2'])
# instance 1
self.driver._provision_resource(
fake_context, 'host1',
mox.Func(_has_launch_index(0)), {},
None, None, None, None,
instance_uuid='fake-uuid1',
legacy_bdm_in_spec=False).AndReturn(instance1)
# instance 2
self.driver._provision_resource(
fake_context, 'host2',
mox.Func(_has_launch_index(1)), {},
None, None, None, None,
instance_uuid='fake-uuid2',
legacy_bdm_in_spec=False).AndReturn(instance2)
self.mox.ReplayAll()
self.driver.schedule_run_instance(fake_context, request_spec,
None, None, None, None, {}, False)
def test_schedule_happy_day(self):
"""Make sure there's nothing glaringly wrong with _schedule()
by doing a happy day pass through.
"""
self.next_weight = 1.0
def _fake_weigh_objects(_self, functions, hosts, options):
self.next_weight += 2.0
host_state = hosts[0]
return [weights.WeighedHost(host_state, self.next_weight)]
sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project',
is_admin=True)
self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
fake_get_filtered_hosts)
self.stubs.Set(weights.HostWeightHandler,
'get_weighed_objects', _fake_weigh_objects)
fakes.mox_host_manager_db_calls(self.mox, fake_context)
request_spec = {'num_instances': 10,
'instance_type': {'memory_mb': 512, 'root_gb': 512,
'ephemeral_gb': 0,
'vcpus': 1},
'instance_properties': {'project_id': 1,
'root_gb': 512,
'memory_mb': 512,
'ephemeral_gb': 0,
'vcpus': 1,
'os_type': 'Linux'}}
self.mox.ReplayAll()
weighed_hosts = sched._schedule(fake_context, request_spec, {})
self.assertEquals(len(weighed_hosts), 10)
for weighed_host in weighed_hosts:
self.assertTrue(weighed_host.obj is not None)
def test_max_attempts(self):
self.flags(scheduler_max_attempts=4)
sched = fakes.FakeFilterScheduler()
self.assertEqual(4, sched._max_attempts())
def test_invalid_max_attempts(self):
self.flags(scheduler_max_attempts=0)
sched = fakes.FakeFilterScheduler()
self.assertRaises(exception.NovaException, sched._max_attempts)
def test_retry_disabled(self):
# Retry info should not get populated when re-scheduling is off.
self.flags(scheduler_max_attempts=1)
sched = fakes.FakeFilterScheduler()
instance_properties = {'project_id': '12345', 'os_type': 'Linux'}
request_spec = dict(instance_properties=instance_properties,
instance_type={})
filter_properties = {}
self.mox.StubOutWithMock(db, 'compute_node_get_all')
db.compute_node_get_all(mox.IgnoreArg()).AndReturn([])
self.mox.ReplayAll()
sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
# should not have retry info in the populated filter properties:
self.assertNotIn("retry", filter_properties)
def test_retry_force_hosts(self):
# Retry info should not get populated when re-scheduling is off.
self.flags(scheduler_max_attempts=2)
sched = fakes.FakeFilterScheduler()
instance_properties = {'project_id': '12345', 'os_type': 'Linux'}
request_spec = dict(instance_properties=instance_properties)
filter_properties = dict(force_hosts=['force_host'])
self.mox.StubOutWithMock(db, 'compute_node_get_all')
db.compute_node_get_all(mox.IgnoreArg()).AndReturn([])
self.mox.ReplayAll()
sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
# should not have retry info in the populated filter properties:
self.assertNotIn("retry", filter_properties)
def test_retry_force_nodes(self):
# Retry info should not get populated when re-scheduling is off.
self.flags(scheduler_max_attempts=2)
sched = fakes.FakeFilterScheduler()
instance_properties = {'project_id': '12345', 'os_type': 'Linux'}
request_spec = dict(instance_properties=instance_properties)
filter_properties = dict(force_nodes=['force_node'])
self.mox.StubOutWithMock(db, 'compute_node_get_all')
db.compute_node_get_all(mox.IgnoreArg()).AndReturn([])
self.mox.ReplayAll()
sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
# should not have retry info in the populated filter properties:
self.assertNotIn("retry", filter_properties)
def test_retry_attempt_one(self):
# Test retry logic on initial scheduling attempt.
self.flags(scheduler_max_attempts=2)
sched = fakes.FakeFilterScheduler()
instance_properties = {'project_id': '12345', 'os_type': 'Linux'}
request_spec = dict(instance_properties=instance_properties,
instance_type={})
filter_properties = {}
self.mox.StubOutWithMock(db, 'compute_node_get_all')
db.compute_node_get_all(mox.IgnoreArg()).AndReturn([])
self.mox.ReplayAll()
sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
num_attempts = filter_properties['retry']['num_attempts']
self.assertEqual(1, num_attempts)
def test_retry_attempt_two(self):
# Test retry logic when re-scheduling.
self.flags(scheduler_max_attempts=2)
sched = fakes.FakeFilterScheduler()
instance_properties = {'project_id': '12345', 'os_type': 'Linux'}
request_spec = dict(instance_properties=instance_properties,
instance_type={})
retry = dict(num_attempts=1)
filter_properties = dict(retry=retry)
self.mox.StubOutWithMock(db, 'compute_node_get_all')
db.compute_node_get_all(mox.IgnoreArg()).AndReturn([])
self.mox.ReplayAll()
sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
num_attempts = filter_properties['retry']['num_attempts']
self.assertEqual(2, num_attempts)
def test_retry_exceeded_max_attempts(self):
# Test for necessary explosion when max retries is exceeded and that
# the information needed in request_spec is still present for error
# handling
self.flags(scheduler_max_attempts=2)
sched = fakes.FakeFilterScheduler()
instance_properties = {'project_id': '12345', 'os_type': 'Linux'}
instance_uuids = ['fake-id']
request_spec = dict(instance_properties=instance_properties,
instance_uuids=instance_uuids)
retry = dict(num_attempts=2)
filter_properties = dict(retry=retry)
self.assertRaises(exception.NoValidHost, sched.schedule_run_instance,
self.context, request_spec, admin_password=None,
injected_files=None, requested_networks=None,
is_first_time=False,
filter_properties=filter_properties,
legacy_bdm_in_spec=False)
uuids = request_spec.get('instance_uuids')
self.assertEqual(uuids, instance_uuids)
def test_add_retry_host(self):
retry = dict(num_attempts=1, hosts=[])
filter_properties = dict(retry=retry)
host = "fakehost"
node = "fakenode"
scheduler_utils._add_retry_host(filter_properties, host, node)
hosts = filter_properties['retry']['hosts']
self.assertEqual(1, len(hosts))
self.assertEqual([host, node], hosts[0])
def test_post_select_populate(self):
# Test addition of certain filter props after a node is selected.
retry = {'hosts': [], 'num_attempts': 1}
filter_properties = {'retry': retry}
host_state = host_manager.HostState('host', 'node')
host_state.limits['vcpus'] = 5
scheduler_utils.populate_filter_properties(filter_properties,
host_state)
self.assertEqual(['host', 'node'],
filter_properties['retry']['hosts'][0])
self.assertEqual({'vcpus': 5}, host_state.limits)
def test_basic_schedule_run_instances_anti_affinity(self):
filter_properties = {'scheduler_hints':
{'group': 'cats'}}
# Request spec 1
instance_opts1 = {'project_id': 1, 'os_type': 'Linux',
'memory_mb': 512, 'root_gb': 512,
'ephemeral_gb': 0, 'vcpus': 1,
'system_metadata': {'system': 'metadata'}}
request_spec1 = {'instance_uuids': ['fake-uuid1-1', 'fake-uuid1-2'],
'instance_properties': instance_opts1,
'instance_type': {'memory_mb': 512, 'root_gb': 512,
'ephemeral_gb': 0, 'vcpus': 1}}
self.next_weight = 1.0
def _fake_weigh_objects(_self, functions, hosts, options):
self.next_weight += 2.0
host_state = hosts[0]
return [weights.WeighedHost(host_state, self.next_weight)]
sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project',
is_admin=True)
self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
fake_get_group_filtered_hosts)
self.stubs.Set(weights.HostWeightHandler,
'get_weighed_objects', _fake_weigh_objects)
fakes.mox_host_manager_db_calls(self.mox, fake_context)
self.mox.StubOutWithMock(driver, 'instance_update_db')
self.mox.StubOutWithMock(compute_rpcapi.ComputeAPI, 'run_instance')
self.mox.StubOutWithMock(sched, 'group_hosts')
instance1_1 = {'uuid': 'fake-uuid1-1'}
instance1_2 = {'uuid': 'fake-uuid1-2'}
sched.group_hosts(mox.IgnoreArg(), 'cats').AndReturn([])
def inc_launch_index1(*args, **kwargs):
request_spec1['instance_properties']['launch_index'] = (
request_spec1['instance_properties']['launch_index'] + 1)
expected_metadata = {'system_metadata':
{'system': 'metadata', 'group': 'cats'}}
driver.instance_update_db(fake_context, instance1_1['uuid'],
extra_values=expected_metadata).WithSideEffects(
inc_launch_index1).AndReturn(instance1_1)
compute_rpcapi.ComputeAPI.run_instance(fake_context, host='host3',
instance=instance1_1, requested_networks=None,
injected_files=None, admin_password=None, is_first_time=None,
request_spec=request_spec1, filter_properties=mox.IgnoreArg(),
node='node3', legacy_bdm_in_spec=False)
driver.instance_update_db(fake_context, instance1_2['uuid'],
extra_values=expected_metadata).WithSideEffects(
inc_launch_index1).AndReturn(instance1_2)
compute_rpcapi.ComputeAPI.run_instance(fake_context, host='host4',
instance=instance1_2, requested_networks=None,
injected_files=None, admin_password=None, is_first_time=None,
request_spec=request_spec1, filter_properties=mox.IgnoreArg(),
node='node4', legacy_bdm_in_spec=False)
self.mox.ReplayAll()
sched.schedule_run_instance(fake_context, request_spec1,
None, None, None, None, filter_properties, False)
def test_schedule_host_pool(self):
"""Make sure the scheduler_host_subset_size property works properly."""
self.flags(scheduler_host_subset_size=2)
sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project',
is_admin=True)
self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
fake_get_filtered_hosts)
fakes.mox_host_manager_db_calls(self.mox, fake_context)
instance_properties = {'project_id': 1,
'root_gb': 512,
'memory_mb': 512,
'ephemeral_gb': 0,
'vcpus': 1,
'os_type': 'Linux'}
request_spec = dict(instance_properties=instance_properties,
instance_type={})
filter_properties = {}
self.mox.ReplayAll()
hosts = sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
# one host should be chosen
self.assertEqual(len(hosts), 1)
def test_schedule_large_host_pool(self):
"""Hosts should still be chosen if pool size
is larger than number of filtered hosts.
"""
sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project',
is_admin=True)
self.flags(scheduler_host_subset_size=20)
self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
fake_get_filtered_hosts)
fakes.mox_host_manager_db_calls(self.mox, fake_context)
instance_properties = {'project_id': 1,
'root_gb': 512,
'memory_mb': 512,
'ephemeral_gb': 0,
'vcpus': 1,
'os_type': 'Linux'}
request_spec = dict(instance_properties=instance_properties,
instance_type={})
filter_properties = {}
self.mox.ReplayAll()
hosts = sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
# one host should be chose
self.assertEqual(len(hosts), 1)
def test_schedule_chooses_best_host(self):
"""If scheduler_host_subset_size is 1, the largest host with greatest
weight should be returned.
"""
self.flags(scheduler_host_subset_size=1)
sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project',
is_admin=True)
self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
fake_get_filtered_hosts)
fakes.mox_host_manager_db_calls(self.mox, fake_context)
self.next_weight = 50
def _fake_weigh_objects(_self, functions, hosts, options):
this_weight = self.next_weight
self.next_weight = 0
host_state = hosts[0]
return [weights.WeighedHost(host_state, this_weight)]
instance_properties = {'project_id': 1,
'root_gb': 512,
'memory_mb': 512,
'ephemeral_gb': 0,
'vcpus': 1,
'os_type': 'Linux'}
request_spec = dict(instance_properties=instance_properties,
instance_type={})
self.stubs.Set(weights.HostWeightHandler,
'get_weighed_objects', _fake_weigh_objects)
filter_properties = {}
self.mox.ReplayAll()
hosts = sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
# one host should be chosen
self.assertEquals(1, len(hosts))
self.assertEquals(50, hosts[0].weight)
def test_select_hosts_happy_day(self):
"""select_hosts is basically a wrapper around the _select() method.
Similar to the _select tests, this just does a happy path test to
ensure there is nothing glaringly wrong.
"""
self.next_weight = 1.0
selected_hosts = []
def _fake_weigh_objects(_self, functions, hosts, options):
self.next_weight += 2.0
host_state = hosts[0]
selected_hosts.append(host_state.host)
return [weights.WeighedHost(host_state, self.next_weight)]
sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project',
is_admin=True)
self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
fake_get_filtered_hosts)
self.stubs.Set(weights.HostWeightHandler,
'get_weighed_objects', _fake_weigh_objects)
fakes.mox_host_manager_db_calls(self.mox, fake_context)
request_spec = {'num_instances': 10,
'instance_type': {'memory_mb': 512, 'root_gb': 512,
'ephemeral_gb': 0,
'vcpus': 1},
'instance_properties': {'project_id': 1,
'root_gb': 512,
'memory_mb': 512,
'ephemeral_gb': 0,
'vcpus': 1,
'os_type': 'Linux'}}
self.mox.ReplayAll()
hosts = sched.select_hosts(fake_context, request_spec, {})
self.assertEquals(len(hosts), 10)
self.assertEquals(hosts, selected_hosts)
def test_select_hosts_no_valid_host(self):
def _return_no_host(*args, **kwargs):
return []
self.stubs.Set(self.driver, '_schedule', _return_no_host)
self.assertRaises(exception.NoValidHost,
self.driver.select_hosts, self.context, {}, {})
def test_select_destinations(self):
"""select_destinations is basically a wrapper around _schedule().
Similar to the _schedule tests, this just does a happy path test to
ensure there is nothing glaringly wrong.
"""
self.next_weight = 1.0
selected_hosts = []
selected_nodes = []
def _fake_weigh_objects(_self, functions, hosts, options):
self.next_weight += 2.0
host_state = hosts[0]
selected_hosts.append(host_state.host)
selected_nodes.append(host_state.nodename)
return [weights.WeighedHost(host_state, self.next_weight)]
sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project',
is_admin=True)
self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
fake_get_filtered_hosts)
self.stubs.Set(weights.HostWeightHandler,
'get_weighed_objects', _fake_weigh_objects)
fakes.mox_host_manager_db_calls(self.mox, fake_context)
request_spec = {'instance_type': {'memory_mb': 512, 'root_gb': 512,
'ephemeral_gb': 0,
'vcpus': 1},
'instance_properties': {'project_id': 1,
'root_gb': 512,
'memory_mb': 512,
'ephemeral_gb': 0,
'vcpus': 1,
'os_type': 'Linux'},
'num_instances': 1}
self.mox.ReplayAll()
dests = sched.select_destinations(fake_context, request_spec, {})
(host, node) = (dests[0]['host'], dests[0]['nodename'])
self.assertEquals(host, selected_hosts[0])
self.assertEquals(node, selected_nodes[0])
def test_select_destinations_no_valid_host(self):
def _return_no_host(*args, **kwargs):
return []
self.stubs.Set(self.driver, '_schedule', _return_no_host)
self.assertRaises(exception.NoValidHost,
self.driver.select_destinations, self.context,
{'num_instances': 1}, {})
def test_handles_deleted_instance(self):
"""Test instance deletion while being scheduled."""
def _raise_instance_not_found(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='123')
self.stubs.Set(driver, 'instance_update_db',
_raise_instance_not_found)
sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project')
host_state = host_manager.HostState('host2', 'node2')
weighted_host = weights.WeighedHost(host_state, 1.42)
filter_properties = {}
uuid = 'fake-uuid1'
instance_properties = {'project_id': 1, 'os_type': 'Linux'}
request_spec = {'instance_type': {'memory_mb': 1, 'local_gb': 1},
'instance_properties': instance_properties,
'instance_uuids': [uuid]}
sched._provision_resource(fake_context, weighted_host,
request_spec, filter_properties,
None, None, None, None)
def test_pci_request_in_filter_properties(self):
instance_type = {}
request_spec = {'instance_type': instance_type,
'instance_properties': {'project_id': 1,
'os_type': 'Linux'}}
filter_properties = {}
requests = [{'count': 1, 'spec': [{'vendor_id': '8086'}]}]
self.mox.StubOutWithMock(pci_request, 'get_pci_requests_from_flavor')
pci_request.get_pci_requests_from_flavor(
instance_type).AndReturn(requests)
self.mox.ReplayAll()
self.driver.populate_filter_properties(
request_spec, filter_properties)
self.assertEqual(filter_properties.get('pci_requests'),
requests)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import math
import numpy as np
import tensorflow as tf
import time
from differential_privacy.multiple_teachers import utils
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_integer('dropout_seed', 123, """seed for dropout.""")
tf.app.flags.DEFINE_integer('batch_size', 128, """Nb of images in a batch.""")
tf.app.flags.DEFINE_integer('epochs_per_decay', 350, """Nb epochs per decay""")
tf.app.flags.DEFINE_integer('learning_rate', 5, """100 * learning rate""")
tf.app.flags.DEFINE_boolean('log_device_placement', False, """see TF doc""")
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
var = tf.get_variable(name, shape, initializer=initializer)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
var = _variable_on_cpu(name, shape,
tf.truncated_normal_initializer(stddev=stddev))
if wd is not None:
weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def inference(images, dropout=False):
"""Build the CNN model.
Args:
images: Images returned from distorted_inputs() or inputs().
dropout: Boolean controling whether to use dropout or not
Returns:
Logits
"""
if FLAGS.dataset == 'mnist':
first_conv_shape = [5, 5, 1, 64]
else:
first_conv_shape = [5, 5, 3, 64]
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights',
shape=first_conv_shape,
stddev=1e-4,
wd=0.0)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias, name=scope.name)
if dropout:
conv1 = tf.nn.dropout(conv1, 0.3, seed=FLAGS.dropout_seed)
# pool1
pool1 = tf.nn.max_pool(conv1,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1,
4,
bias=1.0,
alpha=0.001 / 9.0,
beta=0.75,
name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 64, 128],
stddev=1e-4,
wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [128], tf.constant_initializer(0.1))
bias = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(bias, name=scope.name)
if dropout:
conv2 = tf.nn.dropout(conv2, 0.3, seed=FLAGS.dropout_seed)
# norm2
norm2 = tf.nn.lrn(conv2,
4,
bias=1.0,
alpha=0.001 / 9.0,
beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool2')
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [FLAGS.batch_size, -1])
dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay('weights',
shape=[dim, 384],
stddev=0.04,
wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
if dropout:
local3 = tf.nn.dropout(local3, 0.5, seed=FLAGS.dropout_seed)
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights',
shape=[384, 192],
stddev=0.04,
wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
if dropout:
local4 = tf.nn.dropout(local4, 0.5, seed=FLAGS.dropout_seed)
# compute logits
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights',
[192, FLAGS.nb_labels],
stddev=1/192.0,
wd=0.0)
biases = _variable_on_cpu('biases',
[FLAGS.nb_labels],
tf.constant_initializer(0.0))
logits = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
return logits
def inference_deeper(images, dropout=False):
"""Build a deeper CNN model.
Args:
images: Images returned from distorted_inputs() or inputs().
dropout: Boolean controling whether to use dropout or not
Returns:
Logits
"""
if FLAGS.dataset == 'mnist':
first_conv_shape = [3, 3, 1, 96]
else:
first_conv_shape = [3, 3, 3, 96]
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights',
shape=first_conv_shape,
stddev=0.05,
wd=0.0)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [96], tf.constant_initializer(0.0))
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias, name=scope.name)
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[3, 3, 96, 96],
stddev=0.05,
wd=0.0)
conv = tf.nn.conv2d(conv1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [96], tf.constant_initializer(0.0))
bias = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(bias, name=scope.name)
# conv3
with tf.variable_scope('conv3') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[3, 3, 96, 96],
stddev=0.05,
wd=0.0)
conv = tf.nn.conv2d(conv2, kernel, [1, 2, 2, 1], padding='SAME')
biases = _variable_on_cpu('biases', [96], tf.constant_initializer(0.0))
bias = tf.nn.bias_add(conv, biases)
conv3 = tf.nn.relu(bias, name=scope.name)
if dropout:
conv3 = tf.nn.dropout(conv3, 0.5, seed=FLAGS.dropout_seed)
# conv4
with tf.variable_scope('conv4') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[3, 3, 96, 192],
stddev=0.05,
wd=0.0)
conv = tf.nn.conv2d(conv3, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.0))
bias = tf.nn.bias_add(conv, biases)
conv4 = tf.nn.relu(bias, name=scope.name)
# conv5
with tf.variable_scope('conv5') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[3, 3, 192, 192],
stddev=0.05,
wd=0.0)
conv = tf.nn.conv2d(conv4, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.0))
bias = tf.nn.bias_add(conv, biases)
conv5 = tf.nn.relu(bias, name=scope.name)
# conv6
with tf.variable_scope('conv6') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[3, 3, 192, 192],
stddev=0.05,
wd=0.0)
conv = tf.nn.conv2d(conv5, kernel, [1, 2, 2, 1], padding='SAME')
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.0))
bias = tf.nn.bias_add(conv, biases)
conv6 = tf.nn.relu(bias, name=scope.name)
if dropout:
conv6 = tf.nn.dropout(conv6, 0.5, seed=FLAGS.dropout_seed)
# conv7
with tf.variable_scope('conv7') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 192, 192],
stddev=1e-4,
wd=0.0)
conv = tf.nn.conv2d(conv6, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
bias = tf.nn.bias_add(conv, biases)
conv7 = tf.nn.relu(bias, name=scope.name)
# local1
with tf.variable_scope('local1') as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(conv7, [FLAGS.batch_size, -1])
dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay('weights',
shape=[dim, 192],
stddev=0.05,
wd=0)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local1 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
# local2
with tf.variable_scope('local2') as scope:
weights = _variable_with_weight_decay('weights',
shape=[192, 192],
stddev=0.05,
wd=0)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local2 = tf.nn.relu(tf.matmul(local1, weights) + biases, name=scope.name)
if dropout:
local2 = tf.nn.dropout(local2, 0.5, seed=FLAGS.dropout_seed)
# compute logits
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights',
[192, FLAGS.nb_labels],
stddev=0.05,
wd=0.0)
biases = _variable_on_cpu('biases',
[FLAGS.nb_labels],
tf.constant_initializer(0.0))
logits = tf.add(tf.matmul(local2, weights), biases, name=scope.name)
return logits
def loss_fun(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
distillation: if set to True, use probabilities and not class labels to
compute softmax loss
Returns:
Loss tensor of type float.
"""
# Calculate the cross entropy between labels and predictions
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels, name='cross_entropy_per_example')
# Calculate the average cross entropy loss across the batch.
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
# Add to TF collection for losses
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def moving_av(total_loss):
"""
Generates moving average for all losses
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
return loss_averages_op
def train_op_fun(total_loss, global_step):
"""Train model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
nb_ex_per_train_epoch = int(60000 / FLAGS.nb_teachers)
num_batches_per_epoch = nb_ex_per_train_epoch / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * FLAGS.epochs_per_decay)
initial_learning_rate = float(FLAGS.learning_rate) / 100.0
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(initial_learning_rate,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.scalar_summary('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = moving_av(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.histogram_summary(var.op.name, var)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def _input_placeholder():
"""
This helper function declares a TF placeholder for the graph input data
:return: TF placeholder for the graph input data
"""
if FLAGS.dataset == 'mnist':
image_size = 28
num_channels = 1
else:
image_size = 32
num_channels = 3
# Declare data placeholder
train_node_shape = (FLAGS.batch_size, image_size, image_size, num_channels)
return tf.placeholder(tf.float32, shape=train_node_shape)
def train(images, labels, ckpt_path, dropout=False):
"""
This function contains the loop that actually trains the model.
:param images: a numpy array with the input data
:param labels: a numpy array with the output labels
:param ckpt_path: a path (including name) where model checkpoints are saved
:param dropout: Boolean, whether to use dropout or not
:return: True if everything went well
"""
# Check training data
assert len(images) == len(labels)
assert images.dtype == np.float32
assert labels.dtype == np.int32
# Set default TF graph
with tf.Graph().as_default():
global_step = tf.Variable(0, trainable=False)
# Declare data placeholder
train_data_node = _input_placeholder()
# Create a placeholder to hold labels
train_labels_shape = (FLAGS.batch_size,)
train_labels_node = tf.placeholder(tf.int32, shape=train_labels_shape)
print("Done Initializing Training Placeholders")
# Build a Graph that computes the logits predictions from the placeholder
if FLAGS.deeper:
logits = inference_deeper(train_data_node, dropout=dropout)
else:
logits = inference(train_data_node, dropout=dropout)
# Calculate loss
loss = loss_fun(logits, train_labels_node)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = train_op_fun(loss, global_step)
# Create a saver.
saver = tf.train.Saver(tf.all_variables())
print("Graph constructed and saver created")
# Build an initialization operation to run below.
init = tf.global_variables_initializer()
# Create and init sessions
sess = tf.Session(config=tf.ConfigProto(log_device_placement=FLAGS.log_device_placement)) #NOLINT(long-line)
sess.run(init)
print("Session ready, beginning training loop")
# Initialize the number of batches
data_length = len(images)
nb_batches = math.ceil(data_length / FLAGS.batch_size)
for step in xrange(FLAGS.max_steps):
# for debug, save start time
start_time = time.time()
# Current batch number
batch_nb = step % nb_batches
# Current batch start and end indices
start, end = utils.batch_indices(batch_nb, data_length, FLAGS.batch_size)
# Prepare dictionnary to feed the session with
feed_dict = {train_data_node: images[start:end],
train_labels_node: labels[start:end]}
# Run training step
_, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)
# Compute duration of training step
duration = time.time() - start_time
# Sanity check
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
# Echo loss once in a while
if step % 100 == 0:
num_examples_per_step = FLAGS.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), step, loss_value,
examples_per_sec, sec_per_batch))
# Save the model checkpoint periodically.
if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
saver.save(sess, ckpt_path, global_step=step)
return True
def softmax_preds(images, ckpt_path, return_logits=False):
"""
Compute softmax activations (probabilities) with the model saved in the path
specified as an argument
:param images: a np array of images
:param ckpt_path: a TF model checkpoint
:param logits: if set to True, return logits instead of probabilities
:return: probabilities (or logits if logits is set to True)
"""
# Compute nb samples and deduce nb of batches
data_length = len(images)
nb_batches = math.ceil(len(images) / FLAGS.batch_size)
# Declare data placeholder
train_data_node = _input_placeholder()
# Build a Graph that computes the logits predictions from the placeholder
if FLAGS.deeper:
logits = inference_deeper(train_data_node)
else:
logits = inference(train_data_node)
if return_logits:
# We are returning the logits directly (no need to apply softmax)
output = logits
else:
# Add softmax predictions to graph: will return probabilities
output = tf.nn.softmax(logits)
# Restore the moving average version of the learned variables for eval.
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
# Will hold the result
preds = np.zeros((data_length, FLAGS.nb_labels), dtype=np.float32)
# Create TF session
with tf.Session() as sess:
# Restore TF session from checkpoint file
saver.restore(sess, ckpt_path)
# Parse data by batch
for batch_nb in xrange(0, int(nb_batches+1)):
# Compute batch start and end indices
start, end = utils.batch_indices(batch_nb, data_length, FLAGS.batch_size)
# Prepare feed dictionary
feed_dict = {train_data_node: images[start:end]}
# Run session ([0] because run returns a batch with len 1st dim == 1)
preds[start:end, :] = sess.run([output], feed_dict=feed_dict)[0]
# Reset graph to allow multiple calls
tf.reset_default_graph()
return preds
|
|
# This module allows you to collect network stats. These values that are collected from
#
# /proc/net/netstat
import sys
import re
import time
import copy
import string
PARAMS = {}
METRICS = {
'time': 0,
'data': {}
}
stats_files = ["/proc/net/netstat", "/proc/net/snmp"]
LAST_METRICS = copy.deepcopy(METRICS)
METRICS_CACHE_MAX = 5
# Metrics that are not counters but absolute values
ABSOLUTE_VALUES = ["currestab"]
stats_pos = {}
def get_metrics():
"""Return all metrics"""
global METRICS, LAST_METRICS
if (time.time() - METRICS['time']) > METRICS_CACHE_MAX:
new_metrics = {}
for file in stats_files:
try:
file = open(file, 'r')
except IOError:
return 0
# convert to dict
metrics = {}
for line in file:
if re.match("(.*): [0-9]", line):
count = 0
metrics = re.split("\s+", line)
metric_group = metrics[0].replace(":", "").lower()
if metric_group not in stats_pos:
continue
new_metrics[metric_group] = dict()
for value in metrics:
# Skip first
if count > 0 and value >= 0 and count in stats_pos[metric_group]:
metric_name = stats_pos[metric_group][count]
new_metrics[metric_group][metric_name] = value
count += 1
file.close()
# update cache
LAST_METRICS = copy.deepcopy(METRICS)
METRICS = {
'time': time.time(),
'data': new_metrics
}
return [METRICS, LAST_METRICS]
def get_value(name):
"""Return a value for the requested metric"""
# get metrics
[curr_metrics, last_metrics] = get_metrics()
parts = name.split("_")
group = parts[0]
metric = "_".join(parts[1:])
try:
result = float(curr_metrics['data'][group][metric])
except StandardError:
result = 0
return result
def get_delta(name):
"""Return change over time for the requested metric"""
# get metrics
[curr_metrics, last_metrics] = get_metrics()
parts = name.split("_")
group = parts[0]
metric = "_".join(parts[1:])
try:
delta = (float(curr_metrics['data'][group][metric]) - float(last_metrics['data'][group][metric])) / (curr_metrics['time'] - last_metrics['time'])
if delta < 0:
print name + " is less 0"
delta = 0
except KeyError:
delta = 0.0
return delta
def get_tcploss_percentage(name):
# get metrics
[curr_metrics, last_metrics] = get_metrics()
try:
pct = 100 * (float(curr_metrics['data']['tcpext']["tcploss"]) - float(last_metrics["data"]['tcpext']["tcploss"])) / (float(curr_metrics['data']['tcp']['outsegs']) + float(curr_metrics['data']['tcp']['insegs']) - float(last_metrics['data']['tcp']['insegs']) - float(last_metrics['data']['tcp']['outsegs']))
if pct < 0:
print name + " is less 0"
pct = 0
except KeyError:
pct = 0.0
except ZeroDivisionError:
pct = 0.0
return pct
def get_tcpattemptfail_percentage(name):
# get metrics
[curr_metrics, last_metrics] = get_metrics()
try:
pct = 100 * (float(curr_metrics['data']['tcp']["attemptfails"]) - float(last_metrics["data"]['tcp']["attemptfails"])) / (float(curr_metrics['data']['tcp']['outsegs']) + float(curr_metrics['data']['tcp']['insegs']) - float(last_metrics['data']['tcp']['insegs']) - float(last_metrics['data']['tcp']['outsegs']))
if pct < 0:
print name + " is less 0"
pct = 0
except Exception:
pct = 0.0
return pct
def get_retrans_percentage(name):
# get metrics
[curr_metrics, last_metrics] = get_metrics()
try:
pct = 100 * (float(curr_metrics['data']['tcp']["retranssegs"]) - float(last_metrics['data']['tcp']["retranssegs"])) / (float(curr_metrics['data']['tcp']['outsegs']) + float(curr_metrics['data']['tcp']['insegs']) - float(last_metrics['data']['tcp']['insegs']) - float(last_metrics['data']['tcp']['outsegs']))
if pct < 0:
print name + " is less 0"
pct = 0
except KeyError:
pct = 0.0
except ZeroDivisionError:
pct = 0.0
return pct
def create_desc(skel, prop):
d = skel.copy()
for k, v in prop.iteritems():
d[k] = v
return d
def metric_init(params):
global descriptors, metric_map, Desc_Skel
descriptors = []
Desc_Skel = {
'name' : 'XXX',
'call_back' : get_delta,
'time_max' : 60,
'value_type' : 'float',
'format' : '%.5f',
'units' : 'count/s',
'slope' : 'both', # zero|positive|negative|both
'description' : 'XXX',
'groups' : 'XXX',
}
####################################################################################
# Let's figure out what metrics are available
#
# Read /proc/net/netstat
####################################################################################
for file in stats_files:
try:
file = open(file, 'r')
except IOError:
return 0
# Find mapping
for line in file:
# Lines with
if not re.match("(.*): [0-9]", line):
count = 0
mapping = re.split("\s+", line)
metric_group = mapping[0].replace(":", "").lower()
stats_pos[metric_group] = dict()
for metric in mapping:
# Skip first
if count > 0 and metric != "":
lowercase_metric = metric.lower()
stats_pos[metric_group][count] = lowercase_metric
count += 1
file.close()
for group in stats_pos:
for item in stats_pos[group]:
if stats_pos[group][item] in ABSOLUTE_VALUES:
descriptors.append(create_desc(Desc_Skel, {
"name" : group + "_" + stats_pos[group][item],
"call_back" : get_value,
"groups" : group
}))
else:
descriptors.append(create_desc(Desc_Skel, {
"name" : group + "_" + stats_pos[group][item],
"groups" : group
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : "tcpext_tcploss_percentage",
"call_back" : get_tcploss_percentage,
"description": "TCP percentage loss, tcploss / insegs + outsegs",
"units" : "pct",
'groups' : 'tcpext'
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : "tcp_attemptfails_percentage",
"call_back" : get_tcpattemptfail_percentage,
"description": "TCP attemptfail percentage, tcpattemptfail / insegs + outsegs",
"units" : "pct",
'groups' : 'tcp'
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : "tcp_retrans_percentage",
"call_back" : get_retrans_percentage,
"description": "TCP retrans percentage, retranssegs / insegs + outsegs",
"units" : "pct",
'groups' : 'tcp'
}))
return descriptors
def metric_cleanup():
'''Clean up the metric module.'''
pass
# This code is for debugging and unit testing
if __name__ == '__main__':
descriptors = metric_init(PARAMS)
while True:
for d in descriptors:
v = d['call_back'](d['name'])
print '%s = %s' % (d['name'], v)
print 'Sleeping 15 seconds'
time.sleep(15)
|
|
"""
lsh.py
Algorithms based on 'Mining of Massive Datasets'
http://infolab.stanford.edu/~ullman/mmds/ch3.pdf - Section 3.4
"""
from collections import defaultdict
import multiprocessing as mp
import random
import pyhash
import Levenshtein
from jsonleveldb import JsonLevelDB
from .unionfind import UnionFind
class Signature(object):
"""Signature Base class."""
def __init__(self, dim):
self.dim = dim
self.hashes = self.hash_functions()
def hash_functions(self):
"""Returns dim different hash functions"""
pass
def sign(self, object):
"""Return the signature for object s"""
pass
class MinHashSignature(Signature):
"""Creates signatures for sets/tuples using minhash."""
def __init__(self, dim, seeds=None):
self.dim = dim
self.seeds = self._set_seeds(seeds)
self.hasher = pyhash.murmur3_32()
self.hashes = self._hash_functions()
def _set_seeds(self, seeds):
"""Returns random 32 bit seeds for hash functions"""
if seeds is not None:
if len(seeds) != self.dim:
raise Exception("Seeds length should match dim")
return seeds
return [random.getrandbits(32) for i in xrange(self.dim)]
def _hash_functions(self):
"""Return dim different hash functions"""
def hash_factory(n):
return lambda x: self.hasher(x.encode('utf-8'), seed=self.seeds[n])
return [ hash_factory(_) for _ in range(self.dim) ]
def sign(self, s):
"""Returns minhash signature for set s"""
sig = [ float("inf") ] * self.dim
for hash_ix, hash_fn in enumerate(self.hashes):
sig[hash_ix] = min(hash_fn(value) for value in s)
return sig
class LSH(object):
"""Locality sensitive hashing. Uses a banding approach to hash
similar signatures to the same buckets."""
def __init__(self, dim, threshold):
self.dim = dim
self.threshold = threshold
self.bandwidth = self.get_bandwidth(dim, threshold)
self.hasher = pyhash.murmur3_32()
def gen_hash(self, sig):
"""Generate hashvals for this signature"""
for band in zip(*(iter(sig),) * self.bandwidth):
seed = 0x3456789
for item in band:
hashval = self.hasher(str(item), seed=seed)
seed = hashval
yield hashval
def get_bandwidth(self, n, t):
"""Approximates the bandwidth (number of rows in each band)
needed to get threshold.
Threshold t = (1/b) ** (1/r) where
b = #bands
r = #rows per band
n = b * r = #elements in signature
"""
best = n, 1
minerr = float("inf")
for r in range(1, n + 1):
try:
b = 1. / (t ** r)
except: # Divide by zero, your signature is huge
return best
err = abs(n - b * r)
if err < minerr:
best = r
minerr = err
return best
def get_threshold(self):
r = self.bandwidth
b = self.dim / r
return (1. / b) ** (1. / r)
def get_n_bands(self):
return int(self.dim / self.bandwidth)
class Cluster(object):
"""Clusters sets with Jaccard similarity above threshold with high
probability.
Algorithm based on Rajaraman, "Mining of Massive Datasets":
1. Use LSH to highlight similar signatures as candidate pairs
2. Verify similarity of candidate pairs with constraint function
3. Use UnionFind to merge buckets containing same values
"""
def __init__(self, dim=10, threshold=0.5, shingle_size = None, docs_db = None, state = None):
self.doccache = dict()
if state:
self.dim = state['dim']
self.threshold = state['threshold']
self.shingle_size = state['shingle_size']
self.hasher = LSH(dim, self.threshold)
self.unionfind = state['unionfind']
self.hashmaps = state['hashmaps']
self.labellist = state['labellist']
else:
self.dim = dim
self.threshold = threshold
self.shingle_size = shingle_size
self.unionfind = UnionFind()
self.hasher = LSH(dim, self.threshold)
self.hashmaps = [defaultdict(list) for _ in range(self.hasher.get_n_bands())]
self.labellist = set()
self.docs_db = docs_db #JsonLevelDB documents
#Set up workers
self.job_queue = mp.Queue()
self.result_queue = mp.Queue()
self.children = list()
for i in range(mp.cpu_count()):
p = mp.Process(target=self._worker, args=())
p.daemon = True
p.start()
self.children.append(p)
# Levenshtein similarity computed on candidate pairs
def _worker(self):
for label1, label2, band_idx, hshval in iter(self.job_queue.get,"STOP"):
try:
text1 = self.doccache[label1]
except KeyError:
text1 = self.docs_db.Get(label1)
self.doccache[label1] = text1
try:
text2 = self.doccache[label2]
except KeyError:
text2 = self.docs_db.Get(label2)
self.doccache[label2] = text2
# For efficiency - skip documents > 10,000 characters
if len(text1) > 10000 or len(text2) > 10000:
self.result_queue.put((label1, label2, band_idx, hshval, False))
continue
sim = Levenshtein.ratio(text1, text2)
result = sim > self.threshold
self.result_queue.put((label1, label2, band_idx, hshval, result))
def _add_to_unionfind(self, label, sig):
# Add label to unionfind
self.unionfind[label]
self.labellist.add(label)
jobs = 0
checked = set()
clustered = dict()
for band_idx, hshval in enumerate(self.hasher.gen_hash(sig)):
clustered[(band_idx, hshval)] = False
for map_label in self.hashmaps[band_idx][hshval]:
if (label, map_label) not in checked:
checked.add((label, map_label))
self.job_queue.put((label, map_label, band_idx, hshval))
jobs += 1
for i in range(jobs):
l1, l2, band_idx, hshval, result = self.result_queue.get()
if result:
clustered[(band_idx, hshval)] = True
self.unionfind.union(l1, l2)
for band_idx, hshval in clustered.iterkeys():
if not clustered[(band_idx, hshval)]:
self.hashmaps[band_idx][hshval].append(label)
def add_signature(self, sig, label):
""" sig should be a signature tuple/set """
self._add_to_unionfind(label, sig)
def add_set(self, s, label=None):
""" s should be a set that defines the item to be clustered """
if not label:
label = s
sig = self._sign(s)
self._add_to_unionfind(label, sig)
def get_state(self):
state = dict()
state['hashmaps'] = self.hashmaps
state['dim'] = self.dim
state['unionfind'] = self.unionfind
state['threshold'] = self.threshold
state['labellist'] = self.labellist
state['shingle_size'] = self.shingle_size
return state
def close_workers(self):
for i in range(mp.cpu_count()):
self.job_queue.put("STOP")
for child in self.children:
child.join()
def contains(self, label):
return label in self.labellist
def get_sets(self):
return self.unionfind.sets()
def get_sorted_clusters(self, reverse=True):
# Returns a list of cluster members (no cluster names)
clusters = self.get_sets().values()
clusters.sort(key=len, reverse=reverse)
return clusters
def shingle(s, k):
"""Generate k-length shingles of string s"""
k = min(len(s), k)
for i in range(len(s) - k + 1):
yield s[i:i+k]
def hshingle(s, k):
"""Generate k-length shingles then hash"""
for s in shingle(s, k):
yield hash(s)
def jaccard_sim(X, Y):
x = set(X)
y = set(Y)
"""Jaccard similarity between two sets"""
return float(len(x & y)) / len(x | y)
def jaccard_dist(X, Y):
"""Jaccard distance between two sets"""
return 1 - jaccard_sim(X, Y)
|
|
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import skip_if_32bit
from sklearn import datasets
from sklearn.linear_model import LogisticRegression, SGDClassifier, Lasso
from sklearn.svm import LinearSVC
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.utils.fixes import norm
iris = datasets.load_iris()
data, y = iris.data, iris.target
rng = np.random.RandomState(0)
def test_transform_linear_model():
for clf in (LogisticRegression(C=0.1),
LinearSVC(C=0.01, dual=False),
SGDClassifier(alpha=0.001, n_iter=50, shuffle=True,
random_state=0)):
for thresh in (None, ".09*mean", "1e-5 * median"):
for func in (np.array, sp.csr_matrix):
X = func(data)
clf.set_params(penalty="l1")
clf.fit(X, y)
X_new = assert_warns(
DeprecationWarning, clf.transform, X, thresh)
if isinstance(clf, SGDClassifier):
assert_true(X_new.shape[1] <= X.shape[1])
else:
assert_less(X_new.shape[1], X.shape[1])
clf.set_params(penalty="l2")
clf.fit(X_new, y)
pred = clf.predict(X_new)
assert_greater(np.mean(pred == y), 0.7)
def test_invalid_input():
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=None)
for threshold in ["gobbledigook", ".5 * gobbledigook"]:
model = SelectFromModel(clf, threshold=threshold)
model.fit(data, y)
assert_raises(ValueError, model.transform, data)
def test_input_estimator_unchanged():
"""
Test that SelectFromModel fits on a clone of the estimator.
"""
est = RandomForestClassifier()
transformer = SelectFromModel(estimator=est)
transformer.fit(data, y)
assert_true(transformer.estimator is est)
@skip_if_32bit
def test_feature_importances():
X, y = datasets.make_classification(
n_samples=1000, n_features=10, n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False, random_state=0)
est = RandomForestClassifier(n_estimators=50, random_state=0)
for threshold, func in zip(["mean", "median"], [np.mean, np.median]):
transformer = SelectFromModel(estimator=est, threshold=threshold)
transformer.fit(X, y)
assert_true(hasattr(transformer.estimator_, 'feature_importances_'))
X_new = transformer.transform(X)
assert_less(X_new.shape[1], X.shape[1])
importances = transformer.estimator_.feature_importances_
feature_mask = np.abs(importances) > func(importances)
assert_array_almost_equal(X_new, X[:, feature_mask])
# Check with sample weights
sample_weight = np.ones(y.shape)
sample_weight[y == 1] *= 100
est = RandomForestClassifier(n_estimators=50, random_state=0)
transformer = SelectFromModel(estimator=est)
transformer.fit(X, y, sample_weight=sample_weight)
importances = transformer.estimator_.feature_importances_
transformer.fit(X, y, sample_weight=3 * sample_weight)
importances_bis = transformer.estimator_.feature_importances_
assert_almost_equal(importances, importances_bis)
# For the Lasso and related models, the threshold defaults to 1e-5
transformer = SelectFromModel(estimator=Lasso(alpha=0.1))
transformer.fit(X, y)
X_new = transformer.transform(X)
mask = np.abs(transformer.estimator_.coef_) > 1e-5
assert_array_equal(X_new, X[:, mask])
@skip_if_32bit
def test_feature_importances_2d_coef():
X, y = datasets.make_classification(
n_samples=1000, n_features=10, n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False, random_state=0, n_classes=4)
est = LogisticRegression()
for threshold, func in zip(["mean", "median"], [np.mean, np.median]):
for order in [1, 2, np.inf]:
# Fit SelectFromModel a multi-class problem
transformer = SelectFromModel(estimator=LogisticRegression(),
threshold=threshold,
norm_order=order)
transformer.fit(X, y)
assert_true(hasattr(transformer.estimator_, 'coef_'))
X_new = transformer.transform(X)
assert_less(X_new.shape[1], X.shape[1])
# Manually check that the norm is correctly performed
est.fit(X, y)
importances = norm(est.coef_, axis=0, ord=order)
feature_mask = importances > func(importances)
assert_array_equal(X_new, X[:, feature_mask])
def test_partial_fit():
est = PassiveAggressiveClassifier(random_state=0, shuffle=False)
transformer = SelectFromModel(estimator=est)
transformer.partial_fit(data, y,
classes=np.unique(y))
old_model = transformer.estimator_
transformer.partial_fit(data, y,
classes=np.unique(y))
new_model = transformer.estimator_
assert_true(old_model is new_model)
X_transform = transformer.transform(data)
transformer.fit(np.vstack((data, data)), np.concatenate((y, y)))
assert_array_equal(X_transform, transformer.transform(data))
def test_calling_fit_reinitializes():
est = LinearSVC(random_state=0)
transformer = SelectFromModel(estimator=est)
transformer.fit(data, y)
transformer.set_params(estimator__C=100)
transformer.fit(data, y)
assert_equal(transformer.estimator_.C, 100)
def test_prefit():
"""
Test all possible combinations of the prefit parameter.
"""
# Passing a prefit parameter with the selected model
# and fitting a unfit model with prefit=False should give same results.
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=0)
model = SelectFromModel(clf)
model.fit(data, y)
X_transform = model.transform(data)
clf.fit(data, y)
model = SelectFromModel(clf, prefit=True)
assert_array_equal(model.transform(data), X_transform)
# Check that the model is rewritten if prefit=False and a fitted model is
# passed
model = SelectFromModel(clf, prefit=False)
model.fit(data, y)
assert_array_equal(model.transform(data), X_transform)
# Check that prefit=True and calling fit raises a ValueError
model = SelectFromModel(clf, prefit=True)
assert_raises(ValueError, model.fit, data, y)
def test_threshold_string():
est = RandomForestClassifier(n_estimators=50, random_state=0)
model = SelectFromModel(est, threshold="0.5*mean")
model.fit(data, y)
X_transform = model.transform(data)
# Calculate the threshold from the estimator directly.
est.fit(data, y)
threshold = 0.5 * np.mean(est.feature_importances_)
mask = est.feature_importances_ > threshold
assert_array_equal(X_transform, data[:, mask])
def test_threshold_without_refitting():
"""Test that the threshold can be set without refitting the model."""
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=0)
model = SelectFromModel(clf, threshold=0.1)
model.fit(data, y)
X_transform = model.transform(data)
# Set a higher threshold to filter out more features.
model.threshold = 1.0
assert_greater(X_transform.shape[1], model.transform(data).shape[1])
|
|
"""Support for DoorBird devices."""
import logging
from urllib.error import HTTPError
import voluptuous as vol
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import (
CONF_DEVICES,
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_TOKEN,
CONF_USERNAME,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.util import dt as dt_util, slugify
_LOGGER = logging.getLogger(__name__)
DOMAIN = "doorbird"
API_URL = f"/api/{DOMAIN}"
CONF_CUSTOM_URL = "hass_url_override"
CONF_EVENTS = "events"
RESET_DEVICE_FAVORITES = "doorbird_reset_favorites"
DEVICE_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_TOKEN): cv.string,
vol.Optional(CONF_EVENTS, default=[]): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_CUSTOM_URL): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{vol.Required(CONF_DEVICES): vol.All(cv.ensure_list, [DEVICE_SCHEMA])}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the DoorBird component."""
from doorbirdpy import DoorBird
# Provide an endpoint for the doorstations to call to trigger events
hass.http.register_view(DoorBirdRequestView)
doorstations = []
for index, doorstation_config in enumerate(config[DOMAIN][CONF_DEVICES]):
device_ip = doorstation_config.get(CONF_HOST)
username = doorstation_config.get(CONF_USERNAME)
password = doorstation_config.get(CONF_PASSWORD)
custom_url = doorstation_config.get(CONF_CUSTOM_URL)
events = doorstation_config.get(CONF_EVENTS)
token = doorstation_config.get(CONF_TOKEN)
name = doorstation_config.get(CONF_NAME) or "DoorBird {}".format(index + 1)
device = DoorBird(device_ip, username, password)
status = device.ready()
if status[0]:
doorstation = ConfiguredDoorBird(device, name, events, custom_url, token)
doorstations.append(doorstation)
_LOGGER.info(
'Connected to DoorBird "%s" as %s@%s',
doorstation.name,
username,
device_ip,
)
elif status[1] == 401:
_LOGGER.error(
"Authorization rejected by DoorBird for %s@%s", username, device_ip
)
return False
else:
_LOGGER.error(
"Could not connect to DoorBird as %s@%s: Error %s",
username,
device_ip,
str(status[1]),
)
return False
# Subscribe to doorbell or motion events
if events:
try:
doorstation.register_events(hass)
except HTTPError:
hass.components.persistent_notification.create(
"Doorbird configuration failed. Please verify that API "
"Operator permission is enabled for the Doorbird user. "
"A restart will be required once permissions have been "
"verified.",
title="Doorbird Configuration Failure",
notification_id="doorbird_schedule_error",
)
return False
hass.data[DOMAIN] = doorstations
def _reset_device_favorites_handler(event):
"""Handle clearing favorites on device."""
token = event.data.get("token")
if token is None:
return
doorstation = get_doorstation_by_token(hass, token)
if doorstation is None:
_LOGGER.error("Device not found for provided token.")
# Clear webhooks
favorites = doorstation.device.favorites()
for favorite_type in favorites:
for favorite_id in favorites[favorite_type]:
doorstation.device.delete_favorite(favorite_type, favorite_id)
hass.bus.listen(RESET_DEVICE_FAVORITES, _reset_device_favorites_handler)
return True
def get_doorstation_by_token(hass, token):
"""Get doorstation by slug."""
for doorstation in hass.data[DOMAIN]:
if token == doorstation.token:
return doorstation
class ConfiguredDoorBird:
"""Attach additional information to pass along with configured device."""
def __init__(self, device, name, events, custom_url, token):
"""Initialize configured device."""
self._name = name
self._device = device
self._custom_url = custom_url
self._events = events
self._token = token
@property
def name(self):
"""Get custom device name."""
return self._name
@property
def device(self):
"""Get the configured device."""
return self._device
@property
def custom_url(self):
"""Get custom url for device."""
return self._custom_url
@property
def token(self):
"""Get token for device."""
return self._token
def register_events(self, hass):
"""Register events on device."""
# Get the URL of this server
hass_url = hass.config.api.base_url
# Override url if another is specified in the configuration
if self.custom_url is not None:
hass_url = self.custom_url
for event in self._events:
event = self._get_event_name(event)
self._register_event(hass_url, event)
_LOGGER.info("Successfully registered URL for %s on %s", event, self.name)
@property
def slug(self):
"""Get device slug."""
return slugify(self._name)
def _get_event_name(self, event):
return f"{self.slug}_{event}"
def _register_event(self, hass_url, event):
"""Add a schedule entry in the device for a sensor."""
url = f"{hass_url}{API_URL}/{event}?token={self._token}"
# Register HA URL as webhook if not already, then get the ID
if not self.webhook_is_registered(url):
self.device.change_favorite("http", f"Home Assistant ({event})", url)
fav_id = self.get_webhook_id(url)
if not fav_id:
_LOGGER.warning(
'Could not find favorite for URL "%s". ' 'Skipping sensor "%s"',
url,
event,
)
return
def webhook_is_registered(self, url, favs=None) -> bool:
"""Return whether the given URL is registered as a device favorite."""
favs = favs if favs else self.device.favorites()
if "http" not in favs:
return False
for fav in favs["http"].values():
if fav["value"] == url:
return True
return False
def get_webhook_id(self, url, favs=None) -> str or None:
"""
Return the device favorite ID for the given URL.
The favorite must exist or there will be problems.
"""
favs = favs if favs else self.device.favorites()
if "http" not in favs:
return None
for fav_id in favs["http"]:
if favs["http"][fav_id]["value"] == url:
return fav_id
return None
def get_event_data(self):
"""Get data to pass along with HA event."""
return {
"timestamp": dt_util.utcnow().isoformat(),
"live_video_url": self._device.live_video_url,
"live_image_url": self._device.live_image_url,
"rtsp_live_video_url": self._device.rtsp_live_video_url,
"html5_viewer_url": self._device.html5_viewer_url,
}
class DoorBirdRequestView(HomeAssistantView):
"""Provide a page for the device to call."""
requires_auth = False
url = API_URL
name = API_URL[1:].replace("/", ":")
extra_urls = [API_URL + "/{event}"]
async def get(self, request, event):
"""Respond to requests from the device."""
from aiohttp import web
hass = request.app["hass"]
token = request.query.get("token")
device = get_doorstation_by_token(hass, token)
if device is None:
return web.Response(status=401, text="Invalid token provided.")
if device:
event_data = device.get_event_data()
else:
event_data = {}
if event == "clear":
hass.bus.async_fire(RESET_DEVICE_FAVORITES, {"token": token})
message = f"HTTP Favorites cleared for {device.slug}"
return web.Response(status=200, text=message)
hass.bus.async_fire(f"{DOMAIN}_{event}", event_data)
return web.Response(status=200, text="OK")
|
|
#!/usr/bin/python
# Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test the archive_lib module."""
from __future__ import print_function
import logging
import os
import sys
sys.path.insert(0, os.path.abspath('%s/../..' % os.path.dirname(__file__)))
from chromite.cbuildbot import archive_lib
from chromite.cbuildbot import cbuildbot_config
from chromite.cbuildbot import cbuildbot_run
from chromite.lib import cros_test_lib
from chromite.lib import parallel_unittest
import mock
DEFAULT_ARCHIVE_PREFIX = 'bogus_bucket/TheArchiveBase'
DEFAULT_ARCHIVE_BASE = 'gs://%s' % DEFAULT_ARCHIVE_PREFIX
DEFAULT_BUILDROOT = '/tmp/foo/bar/buildroot'
DEFAULT_BUILDNUMBER = 12345
DEFAULT_BRANCH = 'TheBranch'
DEFAULT_CHROME_BRANCH = 'TheChromeBranch'
DEFAULT_VERSION_STRING = 'TheVersionString'
DEFAULT_BOARD = 'TheBoard'
DEFAULT_BOT_NAME = 'TheCoolBot'
# Access to protected member.
# pylint: disable=W0212
DEFAULT_OPTIONS = cros_test_lib.EasyAttr(
archive_base=DEFAULT_ARCHIVE_BASE,
buildroot=DEFAULT_BUILDROOT,
buildnumber=DEFAULT_BUILDNUMBER,
buildbot=True,
branch=DEFAULT_BRANCH,
remote_trybot=False,
debug=False,
)
DEFAULT_CONFIG = cbuildbot_config._config(
name=DEFAULT_BOT_NAME,
master=True,
boards=[DEFAULT_BOARD],
child_configs=[cbuildbot_config._config(name='foo'),
cbuildbot_config._config(name='bar'),
],
)
def _ExtendDefaultOptions(**kwargs):
"""Extend DEFAULT_OPTIONS with keys/values in kwargs."""
options_kwargs = DEFAULT_OPTIONS.copy()
options_kwargs.update(kwargs)
return cros_test_lib.EasyAttr(**options_kwargs)
def _ExtendDefaultConfig(**kwargs):
"""Extend DEFAULT_CONFIG with keys/values in kwargs."""
config_kwargs = DEFAULT_CONFIG.copy()
config_kwargs.update(kwargs)
return cbuildbot_config._config(**config_kwargs)
def _NewBuilderRun(options=None, config=None):
"""Create a BuilderRun objection from options and config values.
Args:
options: Specify options or default to DEFAULT_OPTIONS.
config: Specify build config or default to DEFAULT_CONFIG.
Returns:
BuilderRun object.
"""
manager = parallel_unittest.FakeMultiprocessManager()
options = options or DEFAULT_OPTIONS
config = config or DEFAULT_CONFIG
return cbuildbot_run.BuilderRun(options, config, manager)
class GetBaseUploadURITest(cros_test_lib.TestCase):
"""Test the GetBaseUploadURI function."""
ARCHIVE_BASE = '/tmp/the/archive/base'
BOT_ID = 'TheNewBotId'
def setUp(self):
self.cfg = DEFAULT_CONFIG
def _GetBaseUploadURI(self, *args, **kwargs):
"""Test GetBaseUploadURI with archive_base and no bot_id."""
return archive_lib.GetBaseUploadURI(self.cfg, *args, **kwargs)
def testArchiveBaseRemoteTrybotFalse(self):
expected_result = '%s/%s' % (self.ARCHIVE_BASE, DEFAULT_BOT_NAME)
result = self._GetBaseUploadURI(archive_base=self.ARCHIVE_BASE,
remote_trybot=False)
self.assertEqual(expected_result, result)
def testArchiveBaseRemoteTrybotTrue(self):
expected_result = '%s/trybot-%s' % (self.ARCHIVE_BASE, DEFAULT_BOT_NAME)
result = self._GetBaseUploadURI(archive_base=self.ARCHIVE_BASE,
remote_trybot=True)
self.assertEqual(expected_result, result)
def testArchiveBaseBotIdRemoteTrybotFalse(self):
expected_result = '%s/%s' % (self.ARCHIVE_BASE, self.BOT_ID)
result = self._GetBaseUploadURI(archive_base=self.ARCHIVE_BASE,
bot_id=self.BOT_ID, remote_trybot=False)
self.assertEqual(expected_result, result)
def testArchiveBaseBotIdRemoteTrybotTrue(self):
expected_result = '%s/%s' % (self.ARCHIVE_BASE, self.BOT_ID)
result = self._GetBaseUploadURI(archive_base=self.ARCHIVE_BASE,
bot_id=self.BOT_ID, remote_trybot=True)
self.assertEqual(expected_result, result)
def testRemoteTrybotTrue(self):
"""Test GetBaseUploadURI with no archive base but remote_trybot is True."""
expected_result = ('%s/trybot-%s' %
(archive_lib.constants.DEFAULT_ARCHIVE_BUCKET,
DEFAULT_BOT_NAME))
result = self._GetBaseUploadURI(remote_trybot=True)
self.assertEqual(expected_result, result)
def testBotIdRemoteTrybotTrue(self):
expected_result = ('%s/%s' %
(archive_lib.constants.DEFAULT_ARCHIVE_BUCKET,
self.BOT_ID))
result = self._GetBaseUploadURI(bot_id=self.BOT_ID, remote_trybot=True)
self.assertEqual(expected_result, result)
def testDefaultGSPathRemoteTrybotFalse(self):
"""Test GetBaseUploadURI with default gs_path value in config."""
self.cfg = _ExtendDefaultConfig(gs_path=cbuildbot_config.GS_PATH_DEFAULT)
# Test without bot_id.
expected_result = ('%s/%s' %
(archive_lib.constants.DEFAULT_ARCHIVE_BUCKET,
DEFAULT_BOT_NAME))
result = self._GetBaseUploadURI(remote_trybot=False)
self.assertEqual(expected_result, result)
# Test with bot_id.
expected_result = ('%s/%s' %
(archive_lib.constants.DEFAULT_ARCHIVE_BUCKET,
self.BOT_ID))
result = self._GetBaseUploadURI(bot_id=self.BOT_ID, remote_trybot=False)
self.assertEqual(expected_result, result)
def testOverrideGSPath(self):
"""Test GetBaseUploadURI with default gs_path value in config."""
self.cfg = _ExtendDefaultConfig(gs_path='gs://funkytown/foo/bar')
# Test without bot_id.
expected_result = self.cfg.gs_path
result = self._GetBaseUploadURI(remote_trybot=False)
self.assertEqual(expected_result, result)
# Test with bot_id.
expected_result = self.cfg.gs_path
result = self._GetBaseUploadURI(bot_id=self.BOT_ID, remote_trybot=False)
self.assertEqual(expected_result, result)
class ArchiveTest(cros_test_lib.TestCase):
"""Test the Archive class."""
_VERSION = '6543.2.1'
def _GetAttributeValue(self, attr, options=None, config=None):
with mock.patch.object(cbuildbot_run._BuilderRunBase, 'GetVersion') as m:
m.return_value = self._VERSION
run = _NewBuilderRun(options, config)
return getattr(run.GetArchive(), attr)
def testVersion(self):
value = self._GetAttributeValue('version')
self.assertEqual(self._VERSION, value)
def testVersionNotReady(self):
run = _NewBuilderRun()
self.assertRaises(AttributeError, getattr, run, 'version')
def testArchivePathTrybot(self):
options = _ExtendDefaultOptions(buildbot=False)
value = self._GetAttributeValue('archive_path', options=options)
expected_value = ('%s/%s/%s/%s' %
(DEFAULT_BUILDROOT,
archive_lib.Archive._TRYBOT_ARCHIVE,
DEFAULT_BOT_NAME,
self._VERSION))
self.assertEqual(expected_value, value)
def testArchivePathBuildbot(self):
value = self._GetAttributeValue('archive_path')
expected_value = ('%s/%s/%s/%s' %
(DEFAULT_BUILDROOT,
archive_lib.Archive._BUILDBOT_ARCHIVE,
DEFAULT_BOT_NAME,
self._VERSION))
self.assertEqual(expected_value, value)
def testUploadUri(self):
value = self._GetAttributeValue('upload_url')
expected_value = '%s/%s/%s' % (DEFAULT_ARCHIVE_BASE,
DEFAULT_BOT_NAME,
self._VERSION)
self.assertEqual(expected_value, value)
def testDownloadURLBuildbot(self):
value = self._GetAttributeValue('download_url')
expected_value = ('%s%s/%s/%s' %
(archive_lib.gs.PRIVATE_BASE_HTTPS_URL,
DEFAULT_ARCHIVE_PREFIX,
DEFAULT_BOT_NAME,
self._VERSION))
self.assertEqual(expected_value, value)
if __name__ == '__main__':
cros_test_lib.main(level=logging.DEBUG)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
A Jinja template loader which allows for:
- dotted-notation package loading
- search-path-based overriding of same
Dotted notation
---------------
- Allow a Tool implementer to use a dotted-notation module name
(as occuring in the ``PYTHONPATH``), then the given path within the
module::
@expose('jinja:<module.name>:<path/within/module.html>')
e.g.::
@expose('jinja:allura:templates/repo/file.html')
Overriding dotted notation
--------------------------
Allow a Tool implementer to override the theme baseline (or any
other Tool's) templates. This can be lighter-weight than subclassing
:class:`allura.plugin.ThemeProvider`, plus will allow for more fine-grained
changes.
This will also override ``extends`` and ``import`` Jinja tags.
This approach uses a:
- ``setup.py`` entry point to a class with...
- *magic* files and...
- (optionally) a class property to specify ordering
File Structure for Overriding dotted notation
=============================================
For the examples, assume the following directory structure::
NewTool/
|- setup.py <- entry point specified here
|- newtool/
|- app.py <- entry point target here
|- templates/
| |- index.html <- Tool's regular templates
|- override <- override_root
|- allura/ <- magic directory named after module
|- templates/
|- repo/
|- file.html <- actual template
To override the above example, a Tool implementer would
add the following line to their Tool's ``setup.py`` (assuming usage in Allura,
with the default ``app_cfg``)::
[allura.theme.override]
newtool = newtool.app:NewToolApp
Then, in the neighbor path (see below) for the file containing the
Tool class, add the following path/file::
override/allura/templates/repo/file.html
The template will be overridden. Note that after changing
``setup.py``, it would be required to re-initialize with setuptools::
python setup.py develop
Specifying search path order with template_path_rules
=====================================================
If a highly specific ordering is required, such as if multiple Tools
are trying to override the same template, the entry point target
class can also contain a class property template_path_rules::
class NewToolApp(Application):
template_path_rules = [
['>', 'old-tool'],
]
Each rule specifies a postioner and an entry point or "signpost".
If no rule is provided, the default is ``['>', 'allura']``.
The "signposts" are:
- Any other app's override entry point name
- ``site-theme``
- ``allura`` (you probably shouldn't do this)
- ``project-theme`` **NOT IMPLEMENTED**
- ``tool-theme`` **NOT IMPLEMENTED**
The positioners are:
>
This overrider will be found BEFORE the specified entry point
<
This overrider will be found AFTER the specified entry point
=
This will replace one of the "signpost" entry points (if multiple apps
try to do this for the same signpost, the result is undefined)
**TODO:** Support multiple partial themes
"""
import pkg_resources
import os
import jinja2
from tg import config
from paste.deploy.converters import asbool
from ming.utils import LazyProperty
from allura.lib.helpers import topological_sort, iter_entry_points
class PackagePathLoader(jinja2.BaseLoader):
def __init__(self, override_entrypoint='allura.theme.override',
default_paths=None,
override_root='override',
):
'''
Set up initial values... defaults are for Allura.
'''
# TODO: How does one handle project-theme?
if default_paths is None:
default_paths = [
#['project-theme', None],
['site-theme', None],
['allura', '/'],
]
self.override_entrypoint = override_entrypoint
self.default_paths = default_paths
self.override_root = override_root
@LazyProperty
def fs_loader(self):
return jinja2.FileSystemLoader(self.init_paths())
def _load_paths(self):
"""
Load all the paths to be processed, including defaults, in the default order.
"""
paths = self.default_paths[:] # copy default_paths
paths[-1:0] = [ # insert all eps just before last item, by default
[ep.name, pkg_resources.resource_filename(ep.module_name, "")]
for ep in iter_entry_points(self.override_entrypoint)
]
return paths
def _load_rules(self):
"""
Load and pre-process the rules from the entry points.
Rules are specified per-tool as a list of the form:
template_path_rules = [
['>', 'tool1'], # this tool must be resolved before tool1
['<', 'tool2'], # this tool must be resolved after tool2
['=', 'tool3'], # this tool replaces all of tool3's templates
]
Returns two lists of rules, order_rules and replacement_rules.
order_rules represents all of the '>' and '<' rules and are returned
as a list of pairs of the form ('a', 'b') indicating that path 'a' must
come before path 'b'.
replacement_rules represent all of the '=' rules and are returned as
a dictionary mapping the paths to replace to the paths to replace with.
"""
order_rules = []
replacement_rules = {}
for ep in iter_entry_points(self.override_entrypoint):
for rule in getattr(ep.load(), 'template_path_rules', []):
if rule[0] == '>':
order_rules.append((ep.name, rule[1]))
elif rule[0] == '=':
replacement_rules[rule[1]] = ep.name
elif rule[0] == '<':
order_rules.append((rule[1], ep.name))
else:
raise jinja2.TemplateError(
'Unknown template path rule in {}: {}'.format(
ep.name, ' '.join(rule)))
return order_rules, replacement_rules
def _sort_paths(self, paths, rules):
"""
Process all '>' and '<' rules, providing a partial ordering
of the paths based on the given rules.
The rules should already have been pre-processed by _load_rules
to a list of partial ordering pairs ('a', 'b') indicating that
path 'a' should come before path 'b'.
"""
names = [p[0] for p in paths]
# filter rules that reference non-existent paths to prevent "loops" in
# the graph
rules = [r for r in rules if r[0] in names and r[1] in names]
ordered_paths = topological_sort(names, rules)
if ordered_paths is None:
raise jinja2.TemplateError(
'Loop detected in ordering of overrides')
return paths.sort(key=lambda p: ordered_paths.index(p[0]))
def _replace_signposts(self, paths, rules):
"""
Process all '=' rules, replacing the rule target's path value with
the rule's entry's path value.
Multiple entries replacing the same signpost can cause indeterminate
behavior, as the order of the entries is not entirely defined.
However, if _sort_by_rules is called first, the partial ordering is
respected.
This mutates paths.
"""
p_idx = lambda n: [e[0] for e in paths].index(n)
for target, replacement in rules.items():
try:
removed = paths.pop(p_idx(replacement))
paths[p_idx(target)][1] = removed[1]
except ValueError:
# target or replacement missing (may not be installed)
pass
def init_paths(self):
'''
Set up the setuptools entry point-based paths.
'''
paths = self._load_paths()
order_rules, repl_rules = self._load_rules()
self._sort_paths(paths, order_rules)
self._replace_signposts(paths, repl_rules)
return [p[1] for p in paths if p[1] is not None]
def get_source(self, environment, template):
'''
Returns the source for jinja2 rendered templates. Can understand...
- path/to/template.html
- module:path/to/template.html
'''
# look in all of the customized search locations...
if not asbool(config.get('disable_template_overrides', False)):
try:
parts = [self.override_root] + template.split(':')
if len(parts) > 2:
parts[1:2] = parts[1].split('.')
return self.fs_loader.get_source(environment,
os.path.join(*parts))
except jinja2.TemplateNotFound:
# fall-back to attempt non-override loading
pass
if ':' in template:
package, path = template.split(':', 2)
filename = pkg_resources.resource_filename(package, path)
return self.fs_loader.get_source(environment, filename)
else:
return self.fs_loader.get_source(environment, template)
|
|
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.gui.DirectGui import *
from direct.showbase import PythonUtil
from direct.task import Task
from pandac.PandaModules import *
import DisplaySettingsDialog
import ShtikerPage
from otp.speedchat import SCColorScheme
from otp.speedchat import SCStaticTextTerminal
from otp.speedchat import SpeedChat
from toontown.shtiker.OptionsPageGUI import OptionTab, OptionButton, OptionLabel
from toontown.shtiker import ControlRemapDialog
from toontown.toonbase import TTLocalizer
from toontown.toontowngui import TTDialog
speedChatStyles = (
(
2000,
(200 / 255.0, 60 / 255.0, 229 / 255.0),
(200 / 255.0, 135 / 255.0, 255 / 255.0),
(220 / 255.0, 195 / 255.0, 229 / 255.0)
),
(
2012,
(142 / 255.0, 151 / 255.0, 230 / 255.0),
(173 / 255.0, 180 / 255.0, 237 / 255.0),
(220 / 255.0, 195 / 255.0, 229 / 255.0)
),
(
2001,
(0 / 255.0, 0 / 255.0, 255 / 255.0),
(140 / 255.0, 150 / 255.0, 235 / 255.0),
(201 / 255.0, 215 / 255.0, 255 / 255.0)
),
(
2010,
(0 / 255.0, 119 / 255.0, 190 / 255.0),
(53 / 255.0, 180 / 255.0, 255 / 255.0),
(201 / 255.0, 215 / 255.0, 255 / 255.0)
),
(
2014,
(0 / 255.0, 64 / 255.0, 128 / 255.0),
(0 / 255.0, 64 / 255.0, 128 / 255.0),
(201 / 255.0, 215 / 255.0, 255 / 255.0)
),
(
2002,
(90 / 255.0, 175 / 255.0, 225 / 255.0),
(120 / 255.0, 215 / 255.0, 255 / 255.0),
(208 / 255.0, 230 / 255.0, 250 / 255.0)
),
(
2003,
(130 / 255.0, 235 / 255.0, 235 / 255.0),
(120 / 255.0, 225 / 255.0, 225 / 255.0),
(234 / 255.0, 255 / 255.0, 255 / 255.0)
),
(
2004,
(0 / 255.0, 200 / 255.0, 70 / 255.0),
(0 / 255.0, 200 / 255.0, 80 / 255.0),
(204 / 255.0, 255 / 255.0, 204 / 255.0)
),
(
2015,
(13 / 255.0, 255 / 255.0, 100 / 255.0),
(64 / 255.0, 255 / 255.0, 131 / 255.0),
(204 / 255.0, 255 / 255.0, 204 / 255.0)
),
(
2005,
(235 / 255.0, 230 / 255.0, 0 / 255.0),
(255 / 255.0, 250 / 255.0, 100 / 255.0),
(255 / 255.0, 250 / 255.0, 204 / 255.0)
),
(
2006,
(255 / 255.0, 153 / 255.0, 0 / 255.0),
(229 / 255.0, 147 / 255.0, 0 / 255.0),
(255 / 255.0, 234 / 255.0, 204 / 255.0)
),
(
2011,
(255 / 255.0, 177 / 255.0, 62 / 255.0),
(255 / 255.0, 200 / 255.0, 117 / 255.0),
(255 / 255.0, 234 / 255.0, 204 / 255.0)
),
(
2007,
(255 / 255.0, 0 / 255.0, 50 / 255.0),
(229 / 255.0, 0 / 255.0, 50 / 255.0),
(255 / 255.0, 204 / 255.0, 204 / 255.0)
),
(
2013,
(130 / 255.0, 0 / 255.0, 26 / 255.0),
(179 / 255.0, 0 / 255.0, 50 / 255.0),
(255 / 255.0, 204 / 255.0, 204 / 255.0)
),
(
2016,
(176 / 255.0, 35 / 255.0, 0 / 255.0),
(240 / 255.0, 48 / 255.0, 0 / 255.0),
(255 / 255.0, 204 / 255.0, 204 / 255.0)
),
(
2008,
(255 / 255.0, 153 / 255.0, 193 / 255.0),
(240 / 255.0, 157 / 255.0, 192 / 255.0),
(255 / 255.0, 215 / 255.0, 238 / 255.0)
),
(
2009,
(170 / 255.0, 120 / 255.0, 20 / 255.0),
(165 / 255.0, 120 / 255.0, 50 / 255.0),
(210 / 255.0, 200 / 255.0, 180 / 255.0)
)
)
PageMode = PythonUtil.Enum('Options, Codes, MoreOptions')
class OptionsPage(ShtikerPage.ShtikerPage):
notify = directNotify.newCategory('OptionsPage')
def __init__(self):
ShtikerPage.ShtikerPage.__init__(self)
self.optionsTabPage = None
self.codesTabPage = None
self.moreOptionsTabPage = None
self.title = None
self.optionsTab = None
self.codesTab = None
self.moreOptionsTab = None
def load(self):
ShtikerPage.ShtikerPage.load(self)
self.optionsTabPage = OptionsTabPage(self)
self.optionsTabPage.hide()
self.codesTabPage = CodesTabPage(self)
self.codesTabPage.hide()
self.moreOptionsTabPage = MoreOptionsTabPage(self)
self.moreOptionsTabPage.hide()
self.title = DirectLabel(
parent=self, relief=None, text=TTLocalizer.OptionsPageTitle,
text_scale=0.12, pos=(0, 0, 0.61))
self.optionsTab = OptionTab(
parent=self, tabType=1, text=TTLocalizer.OptionsPageTitle, text_scale=TTLocalizer.OPoptionsTab,
text_pos=(0.01, 0.0, 0.0), image_pos=(0.55, 1, -0.91), pos=(-0.64, 0, 0.77),
command=self.setMode, extraArgs=[PageMode.Options])
self.codesTab = OptionTab(
parent=self, text=TTLocalizer.OptionsPageCodesTab, text_scale=TTLocalizer.OPoptionsTab,
text_pos=(-0.035, 0.0, 0.0), image_pos=(0.12, 1, -0.91), pos=(-0.12, 0, 0.77),
command=self.setMode, extraArgs=[PageMode.Codes])
self.moreOptionsTab = OptionTab(
parent=self, relief=None, text=TTLocalizer.MoreOptionsPageTitle, text_scale=TTLocalizer.OPmoreOptionsTab,
text_pos=(-0.045, 0.0, 0.0), image_pos=(0.12, 1, -0.91), pos=(0.42, 0, 0.77),
command=self.setMode, extraArgs=[PageMode.MoreOptions])
def enter(self):
self.setMode(PageMode.Options, updateAnyways=1)
ShtikerPage.ShtikerPage.enter(self)
def exit(self):
self.optionsTabPage.exit()
self.codesTabPage.exit()
ShtikerPage.ShtikerPage.exit(self)
def unload(self):
if self.optionsTabPage is not None:
self.optionsTabPage.unload()
self.optionsTabPage = None
if self.codesTabPage is not None:
self.codesTabPage.unload()
self.codesTabPage = None
if self.title is not None:
self.title.destroy()
self.title = None
if self.optionsTab is not None:
self.optionsTab.destroy()
self.optionsTab = None
if self.codesTab is not None:
self.codesTab.destroy()
self.codesTab = None
ShtikerPage.ShtikerPage.unload(self)
def setMode(self, mode, updateAnyways=0):
messenger.send('wakeup')
if not updateAnyways:
if self.mode == mode:
return
self.mode = mode
if mode == PageMode.Options:
self.title['text'] = TTLocalizer.OptionsPageTitle
self.optionsTab['state'] = DGG.DISABLED
self.optionsTabPage.enter()
self.codesTab['state'] = DGG.NORMAL
self.codesTabPage.exit()
self.moreOptionsTab['state'] = DGG.NORMAL
self.moreOptionsTabPage.exit()
elif mode == PageMode.Codes:
self.title['text'] = TTLocalizer.CdrPageTitle
self.optionsTab['state'] = DGG.NORMAL
self.optionsTabPage.exit()
self.moreOptionsTab['state'] = DGG.NORMAL
self.moreOptionsTabPage.exit()
self.codesTab['state'] = DGG.DISABLED
self.codesTabPage.enter()
elif mode == PageMode.MoreOptions:
self.title['text'] = TTLocalizer.MoreOptionsPageTitle
self.optionsTab['state'] = DGG.NORMAL
self.optionsTabPage.exit()
self.codesTab['state'] = DGG.NORMAL
self.codesTabPage.exit()
self.moreOptionsTab['state'] = DGG.DISABLED
self.moreOptionsTabPage.enter()
class OptionsTabPage(DirectFrame):
notify = directNotify.newCategory('OptionsTabPage')
DisplaySettingsTaskName = 'save-display-settings'
DisplaySettingsDelay = 60
ChangeDisplaySettings = base.config.GetBool('change-display-settings', 1)
ChangeDisplayAPI = base.config.GetBool('change-display-api', 0)
def __init__(self, parent=aspect2d):
self.parent = parent
self.currentSizeIndex = None
DirectFrame.__init__(
self, parent=self.parent, relief=None, pos=(
0.0, 0.0, 0.0), scale=(
1.0, 1.0, 1.0))
self.load()
def destroy(self):
self.parent = None
DirectFrame.destroy(self)
def load(self):
self.displaySettings = None
self.displaySettingsChanged = 0
self.displaySettingsSize = (None, None)
self.displaySettingsFullscreen = None
self.displaySettingsApi = None
self.displaySettingsApiChanged = 0
self.speed_chat_scale = 0.055
buttonbase_ycoord = 0.45
textRowHeight = 0.145
textStartHeight = 0.45
self.Music_Label = OptionLabel(parent=self)
self.SoundFX_Label = OptionLabel(parent=self, z=textStartHeight - textRowHeight)
self.Friends_Label = OptionLabel(parent=self, z=textStartHeight - 3 * textRowHeight)
self.Whispers_Label = OptionLabel(parent=self, z=textStartHeight - 4 * textRowHeight)
self.DisplaySettings_Label = OptionLabel(parent=self, text_wordwrap=10, z=textStartHeight - 5 * textRowHeight)
self.SpeedChatStyle_Label = OptionLabel(parent=self, text=TTLocalizer.OptionsPageSpeedChatStyleLabel,
text_wordwrap=10, z=textStartHeight - 6 * textRowHeight)
self.ToonChatSounds_Label = OptionLabel(parent=self, z=textStartHeight - 2 * textRowHeight + 0.025)
self.ToonChatSounds_Label.setScale(0.9)
self.Music_toggleButton = OptionButton(parent=self, command=self.__doToggleMusic)
self.SoundFX_toggleButton = OptionButton(parent=self, z=buttonbase_ycoord - textRowHeight,
command=self.__doToggleSfx)
self.Friends_toggleButton = OptionButton(parent=self, z=buttonbase_ycoord - textRowHeight * 3,
command=self.__doToggleAcceptFriends)
self.Whispers_toggleButton = OptionButton(parent=self, z=buttonbase_ycoord - textRowHeight * 4,
command=self.__doToggleAcceptWhispers)
self.DisplaySettingsButton = OptionButton(parent=self, image3_color=Vec4(0.5, 0.5, 0.5, 0.5),
text=TTLocalizer.OptionsPageChange, z=buttonbase_ycoord - textRowHeight * 5,
command=self.__doDisplaySettings)
gui = loader.loadModel('phase_3.5/models/gui/friendslist_gui')
self.speedChatStyleLeftArrow = DirectButton(
parent=self,
relief=None,
image=(
gui.find('**/Horiz_Arrow_UP'),
gui.find('**/Horiz_Arrow_DN'),
gui.find('**/Horiz_Arrow_Rllvr'),
gui.find('**/Horiz_Arrow_UP')),
image3_color=Vec4(
1,
1,
1,
0.5),
scale=(
-1.0,
1.0,
1.0),
pos=(
0.25,
0,
buttonbase_ycoord - textRowHeight * 6),
command=self.__doSpeedChatStyleLeft)
self.speedChatStyleRightArrow = DirectButton(
parent=self,
relief=None,
image=(
gui.find('**/Horiz_Arrow_UP'),
gui.find('**/Horiz_Arrow_DN'),
gui.find('**/Horiz_Arrow_Rllvr'),
gui.find('**/Horiz_Arrow_UP')),
image3_color=Vec4(
1,
1,
1,
0.5),
pos=(
0.65,
0,
buttonbase_ycoord -
textRowHeight *
6),
command=self.__doSpeedChatStyleRight)
self.ToonChatSounds_toggleButton = OptionButton(parent=self, image3_color=Vec4(0.5, 0.5, 0.5, 0.5),
z=buttonbase_ycoord - textRowHeight * 2 + 0.025,
command=self.__doToggleToonChatSounds)
self.ToonChatSounds_toggleButton.setScale(0.8)
self.speedChatStyleText = SpeedChat.SpeedChat(name='OptionsPageStyleText',
structure=[2000],
backgroundModelName='phase_3/models/gui/ChatPanel',
guiModelName='phase_3.5/models/gui/speedChatGui')
self.speedChatStyleText.setScale(self.speed_chat_scale)
self.speedChatStyleText.setPos(0.37, 0, buttonbase_ycoord - textRowHeight * 6 + 0.03)
self.speedChatStyleText.reparentTo(self, DGG.FOREGROUND_SORT_INDEX)
self.exitButton = OptionButton(parent=self, image_scale=1.15, text=TTLocalizer.OptionsPageExitToontown,
pos=(0.45, 0, -0.6), command=self.__handleExitShowWithConfirm)
gui.removeNode()
def enter(self):
self.show()
taskMgr.remove(self.DisplaySettingsTaskName)
self.settingsChanged = 0
self.__setMusicButton()
self.__setSoundFXButton()
self.__setAcceptFriendsButton()
self.__setAcceptWhispersButton()
self.__setDisplaySettings()
self.__setToonChatSoundsButton()
self.speedChatStyleText.enter()
self.speedChatStyleIndex = base.localAvatar.getSpeedChatStyleIndex()
self.updateSpeedChatStyle()
if self.parent.book.safeMode:
self.exitButton.hide()
else:
self.exitButton.show()
def exit(self):
self.ignore('confirmDone')
self.hide()
self.speedChatStyleText.exit()
if self.displaySettingsChanged:
taskMgr.doMethodLater(
self.DisplaySettingsDelay,
self.writeDisplaySettings,
self.DisplaySettingsTaskName)
def unload(self):
self.writeDisplaySettings()
taskMgr.remove(self.DisplaySettingsTaskName)
if self.displaySettings is not None:
self.ignore(self.displaySettings.doneEvent)
self.displaySettings.unload()
self.displaySettings = None
self.exitButton.destroy()
self.Music_toggleButton.destroy()
self.SoundFX_toggleButton.destroy()
self.Friends_toggleButton.destroy()
self.Whispers_toggleButton.destroy()
self.DisplaySettingsButton.destroy()
self.speedChatStyleLeftArrow.destroy()
self.speedChatStyleRightArrow.destroy()
del self.exitButton
del self.SoundFX_Label
del self.Music_Label
del self.Friends_Label
del self.Whispers_Label
del self.SpeedChatStyle_Label
del self.SoundFX_toggleButton
del self.Music_toggleButton
del self.Friends_toggleButton
del self.Whispers_toggleButton
del self.speedChatStyleLeftArrow
del self.speedChatStyleRightArrow
self.speedChatStyleText.exit()
self.speedChatStyleText.destroy()
del self.speedChatStyleText
self.currentSizeIndex = None
def __doToggleMusic(self):
messenger.send('wakeup')
if base.musicActive:
base.enableMusic(0)
settings['music'] = False
else:
base.enableMusic(1)
settings['music'] = True
self.settingsChanged = 1
self.__setMusicButton()
def __setMusicButton(self):
if base.musicActive:
self.Music_Label['text'] = TTLocalizer.OptionsPageMusicOnLabel
self.Music_toggleButton['text'] = TTLocalizer.OptionsPageToggleOff
else:
self.Music_Label['text'] = TTLocalizer.OptionsPageMusicOffLabel
self.Music_toggleButton['text'] = TTLocalizer.OptionsPageToggleOn
def __doToggleSfx(self):
messenger.send('wakeup')
if base.sfxActive:
base.enableSoundEffects(0)
settings['sfx'] = False
else:
base.enableSoundEffects(1)
settings['sfx'] = True
self.settingsChanged = 1
self.__setSoundFXButton()
def __doToggleToonChatSounds(self):
messenger.send('wakeup')
if base.toonChatSounds:
base.toonChatSounds = 0
settings['toonChatSounds'] = False
else:
base.toonChatSounds = 1
settings['toonChatSounds'] = True
self.settingsChanged = 1
self.__setToonChatSoundsButton()
def __setSoundFXButton(self):
if base.sfxActive:
self.SoundFX_Label['text'] = TTLocalizer.OptionsPageSFXOnLabel
self.SoundFX_toggleButton['text'] = TTLocalizer.OptionsPageToggleOff
else:
self.SoundFX_Label['text'] = TTLocalizer.OptionsPageSFXOffLabel
self.SoundFX_toggleButton['text'] = TTLocalizer.OptionsPageToggleOn
self.__setToonChatSoundsButton()
def __setToonChatSoundsButton(self):
if base.toonChatSounds:
self.ToonChatSounds_Label['text'] = TTLocalizer.OptionsPageToonChatSoundsOnLabel
self.ToonChatSounds_toggleButton['text'] = TTLocalizer.OptionsPageToggleOff
else:
self.ToonChatSounds_Label['text'] = TTLocalizer.OptionsPageToonChatSoundsOffLabel
self.ToonChatSounds_toggleButton['text'] = TTLocalizer.OptionsPageToggleOn
if base.sfxActive:
self.ToonChatSounds_Label.setColorScale(1.0, 1.0, 1.0, 1.0)
self.ToonChatSounds_toggleButton['state'] = DGG.NORMAL
else:
self.ToonChatSounds_Label.setColorScale(0.5, 0.5, 0.5, 0.5)
self.ToonChatSounds_toggleButton['state'] = DGG.DISABLED
def __doToggleAcceptFriends(self):
messenger.send('wakeup')
acceptingNewFriends = settings.get('acceptingNewFriends', {})
if base.localAvatar.acceptingNewFriends:
base.localAvatar.acceptingNewFriends = 0
acceptingNewFriends[str(base.localAvatar.doId)] = False
else:
base.localAvatar.acceptingNewFriends = 1
acceptingNewFriends[str(base.localAvatar.doId)] = True
settings['acceptingNewFriends'] = acceptingNewFriends
self.settingsChanged = 1
self.__setAcceptFriendsButton()
def __doToggleAcceptWhispers(self):
messenger.send('wakeup')
acceptingNonFriendWhispers = settings.get('acceptingNonFriendWhispers', {})
if base.localAvatar.acceptingNonFriendWhispers:
base.localAvatar.acceptingNonFriendWhispers = 0
acceptingNonFriendWhispers[str(base.localAvatar.doId)] = False
else:
base.localAvatar.acceptingNonFriendWhispers = 1
acceptingNonFriendWhispers[str(base.localAvatar.doId)] = True
settings['acceptingNonFriendWhispers'] = acceptingNonFriendWhispers
self.settingsChanged = 1
self.__setAcceptWhispersButton()
def __setAcceptFriendsButton(self):
if base.localAvatar.acceptingNewFriends:
self.Friends_Label['text'] = TTLocalizer.OptionsPageFriendsEnabledLabel
self.Friends_toggleButton['text'] = TTLocalizer.OptionsPageToggleOff
else:
self.Friends_Label['text'] = TTLocalizer.OptionsPageFriendsDisabledLabel
self.Friends_toggleButton['text'] = TTLocalizer.OptionsPageToggleOn
def __setAcceptWhispersButton(self):
if base.localAvatar.acceptingNonFriendWhispers:
self.Whispers_Label['text'] = TTLocalizer.OptionsPageWhisperEnabledLabel
self.Whispers_toggleButton['text'] = TTLocalizer.OptionsPageToggleOff
else:
self.Whispers_Label['text'] = TTLocalizer.OptionsPageWhisperDisabledLabel
self.Whispers_toggleButton['text'] = TTLocalizer.OptionsPageToggleOn
def __doDisplaySettings(self):
if self.displaySettings is None:
self.displaySettings = DisplaySettingsDialog.DisplaySettingsDialog()
self.displaySettings.load()
self.accept(self.displaySettings.doneEvent, self.__doneDisplaySettings)
self.displaySettings.enter(self.ChangeDisplaySettings, self.ChangeDisplayAPI)
def __doneDisplaySettings(self, anyChanged, apiChanged):
if anyChanged:
self.__setDisplaySettings()
properties = base.win.getProperties()
self.displaySettingsChanged = 1
self.displaySettingsSize = (properties.getXSize(), properties.getYSize())
self.displaySettingsFullscreen = properties.getFullscreen()
self.displaySettingsApi = base.pipe.getInterfaceName()
self.displaySettingsApiChanged = apiChanged
def __setDisplaySettings(self):
properties = base.win.getProperties()
if properties.getFullscreen():
screensize = '%s x %s' % (properties.getXSize(), properties.getYSize())
else:
screensize = TTLocalizer.OptionsPageDisplayWindowed
api = base.pipe.getInterfaceName()
settings = {'screensize': screensize,
'api': api}
if self.ChangeDisplayAPI:
OptionsPage.notify.debug('change display settings...')
text = TTLocalizer.OptionsPageDisplaySettings % settings
else:
OptionsPage.notify.debug('no change display settings...')
text = TTLocalizer.OptionsPageDisplaySettingsNoApi % settings
self.DisplaySettings_Label['text'] = text
def __doSpeedChatStyleLeft(self):
if self.speedChatStyleIndex > 0:
self.speedChatStyleIndex = self.speedChatStyleIndex - 1
self.updateSpeedChatStyle()
def __doSpeedChatStyleRight(self):
if self.speedChatStyleIndex < len(speedChatStyles) - 1:
self.speedChatStyleIndex = self.speedChatStyleIndex + 1
self.updateSpeedChatStyle()
def updateSpeedChatStyle(self):
nameKey, arrowColor, rolloverColor, frameColor = speedChatStyles[self.speedChatStyleIndex]
newSCColorScheme = SCColorScheme.SCColorScheme(
arrowColor=arrowColor,
rolloverColor=rolloverColor,
frameColor=frameColor)
self.speedChatStyleText.setColorScheme(newSCColorScheme)
self.speedChatStyleText.clearMenu()
colorName = SCStaticTextTerminal.SCStaticTextTerminal(nameKey)
self.speedChatStyleText.append(colorName)
self.speedChatStyleText.finalize()
self.speedChatStyleText.setPos(
0.445 -
self.speedChatStyleText.getWidth() *
self.speed_chat_scale /
2,
0,
self.speedChatStyleText.getPos()[2])
if self.speedChatStyleIndex > 0:
self.speedChatStyleLeftArrow['state'] = DGG.NORMAL
else:
self.speedChatStyleLeftArrow['state'] = DGG.DISABLED
if self.speedChatStyleIndex < len(speedChatStyles) - 1:
self.speedChatStyleRightArrow['state'] = DGG.NORMAL
else:
self.speedChatStyleRightArrow['state'] = DGG.DISABLED
base.localAvatar.b_setSpeedChatStyleIndex(self.speedChatStyleIndex)
def writeDisplaySettings(self, task=None):
if not self.displaySettingsChanged:
return
taskMgr.remove(self.DisplaySettingsTaskName)
settings['res'] = (self.displaySettingsSize[0], self.displaySettingsSize[1])
settings['fullscreen'] = self.displaySettingsFullscreen
return Task.done
def __handleExitShowWithConfirm(self):
self.confirm = TTDialog.TTGlobalDialog(
doneEvent='confirmDone',
message=TTLocalizer.OptionsPageExitConfirm,
style=TTDialog.TwoChoice)
self.confirm.show()
self.parent.doneStatus = {'mode': 'exit',
'exitTo': 'closeShard'}
self.accept('confirmDone', self.__handleConfirm)
def __handleConfirm(self):
status = self.confirm.doneStatus
self.ignore('confirmDone')
self.confirm.cleanup()
del self.confirm
if status == 'ok':
base.cr._userLoggingOut = True
messenger.send(self.parent.doneEvent)
class CodesTabPage(DirectFrame):
notify = directNotify.newCategory('CodesTabPage')
def __init__(self, parent=aspect2d):
self.parent = parent
DirectFrame.__init__(
self, parent=self.parent, relief=None, pos=(
0.0, 0.0, 0.0), scale=(
1.0, 1.0, 1.0))
self.load()
return
def destroy(self):
self.parent = None
DirectFrame.destroy(self)
return
def load(self):
cdrGui = loader.loadModel('phase_3.5/models/gui/tt_m_gui_sbk_codeRedemptionGui')
instructionGui = cdrGui.find('**/tt_t_gui_sbk_cdrPresent')
flippyGui = cdrGui.find('**/tt_t_gui_sbk_cdrFlippy')
codeBoxGui = cdrGui.find('**/tt_t_gui_sbk_cdrCodeBox')
self.resultPanelSuccessGui = cdrGui.find('**/tt_t_gui_sbk_cdrResultPanel_success')
self.resultPanelFailureGui = cdrGui.find('**/tt_t_gui_sbk_cdrResultPanel_failure')
self.resultPanelErrorGui = cdrGui.find('**/tt_t_gui_sbk_cdrResultPanel_error')
self.successSfx = base.loadSfx('phase_3.5/audio/sfx/tt_s_gui_sbk_cdrSuccess.ogg')
self.failureSfx = base.loadSfx('phase_3.5/audio/sfx/tt_s_gui_sbk_cdrFailure.ogg')
self.instructionPanel = DirectFrame(parent=self,
relief=None,
image=instructionGui,
image_scale=0.8,
text=TTLocalizer.CdrInstructions,
text_pos=TTLocalizer.OPCodesInstructionPanelTextPos,
text_align=TextNode.ACenter,
text_scale=TTLocalizer.OPCodesResultPanelTextScale,
text_wordwrap=TTLocalizer.OPCodesInstructionPanelTextWordWrap,
pos=(-0.429,
0,
-0.05))
self.codeBox = DirectFrame(parent=self, relief=None, image=codeBoxGui, pos=(0.433, 0, 0.35))
self.flippyFrame = DirectFrame(
parent=self, relief=None, image=flippyGui, pos=(
0.44, 0, -0.353))
self.codeInput = DirectEntry(parent=self.codeBox,
relief=DGG.GROOVE,
scale=0.08,
pos=(-0.33,
0,
-0.006),
borderWidth=(0.05,
0.05),
frameColor=((1,
1,
1,
1),
(1,
1,
1,
1),
(0.5,
0.5,
0.5,
0.5)),
state=DGG.NORMAL,
text_align=TextNode.ALeft,
text_scale=TTLocalizer.OPCodesInputTextScale,
width=10.5,
numLines=1,
focus=1,
backgroundFocus=0,
cursorKeys=1,
text_fg=(0,
0,
0,
1),
suppressMouse=1,
autoCapitalize=0,
command=self.__submitCode)
submitButtonGui = loader.loadModel('phase_3/models/gui/quit_button')
self.submitButton = DirectButton(
parent=self,
relief=None,
image=(
submitButtonGui.find('**/QuitBtn_UP'),
submitButtonGui.find('**/QuitBtn_DN'),
submitButtonGui.find('**/QuitBtn_RLVR'),
submitButtonGui.find('**/QuitBtn_UP')),
image3_color=Vec4(
0.5,
0.5,
0.5,
0.5),
image_scale=1.15,
state=DGG.NORMAL,
text=TTLocalizer.NameShopSubmitButton,
text_scale=TTLocalizer.OPCodesSubmitTextScale,
text_align=TextNode.ACenter,
text_pos=TTLocalizer.OPCodesSubmitTextPos,
text3_fg=(
0.5,
0.5,
0.5,
0.75),
textMayChange=0,
pos=(
0.45,
0.0,
0.0896),
command=self.__submitCode)
self.resultPanel = DirectFrame(parent=self,
relief=None,
image=self.resultPanelSuccessGui,
text='',
text_pos=TTLocalizer.OPCodesResultPanelTextPos,
text_align=TextNode.ACenter,
text_scale=TTLocalizer.OPCodesResultPanelTextScale,
text_wordwrap=TTLocalizer.OPCodesResultPanelTextWordWrap,
pos=(-0.42,
0,
-0.0567))
self.resultPanel.hide()
closeButtonGui = loader.loadModel('phase_3/models/gui/dialog_box_buttons_gui')
self.closeButton = DirectButton(
parent=self.resultPanel,
pos=(
0.296,
0,
-0.466),
relief=None,
state=DGG.NORMAL,
image=(
closeButtonGui.find('**/CloseBtn_UP'),
closeButtonGui.find('**/CloseBtn_DN'),
closeButtonGui.find('**/CloseBtn_Rllvr')),
image_scale=(
1,
1,
1),
command=self.__hideResultPanel)
closeButtonGui.removeNode()
cdrGui.removeNode()
submitButtonGui.removeNode()
return
def enter(self):
self.show()
localAvatar.chatMgr.fsm.request('otherDialog')
self.codeInput['focus'] = 1
self.codeInput.enterText('')
self.__enableCodeEntry()
def exit(self):
self.resultPanel.hide()
self.hide()
localAvatar.chatMgr.fsm.request('mainMenu')
def unload(self):
self.instructionPanel.destroy()
self.instructionPanel = None
self.codeBox.destroy()
self.codeBox = None
self.flippyFrame.destroy()
self.flippyFrame = None
self.codeInput.destroy()
self.codeInput = None
self.submitButton.destroy()
self.submitButton = None
self.resultPanel.destroy()
self.resultPanel = None
self.closeButton.destroy()
self.closeButton = None
del self.successSfx
del self.failureSfx
return
def __submitCode(self, input=None):
if input is None:
input = self.codeInput.get()
self.codeInput['focus'] = 1
if input == '':
return
messenger.send('wakeup')
if hasattr(base, 'codeRedemptionMgr'):
base.codeRedemptionMgr.redeemCode(input, self.__getCodeResult)
self.codeInput.enterText('')
self.__disableCodeEntry()
return
def __getCodeResult(self, result, awardMgrResult):
self.notify.debug('result = %s' % result)
self.notify.debug('awardMgrResult = %s' % awardMgrResult)
self.__enableCodeEntry()
if result == 0:
self.resultPanel['image'] = self.resultPanelSuccessGui
self.resultPanel['text'] = TTLocalizer.CdrResultSuccess
elif result == 1 or result == 3:
self.resultPanel['image'] = self.resultPanelFailureGui
self.resultPanel['text'] = TTLocalizer.CdrResultInvalidCode
elif result == 2:
self.resultPanel['image'] = self.resultPanelFailureGui
self.resultPanel['text'] = TTLocalizer.CdrResultExpiredCode
elif result == 4:
self.resultPanel['image'] = self.resultPanelErrorGui
if awardMgrResult == 0:
self.resultPanel['text'] = TTLocalizer.CdrResultSuccess
elif awardMgrResult == 1 or awardMgrResult == 2 or awardMgrResult == 15 or awardMgrResult == 16:
self.resultPanel['text'] = TTLocalizer.CdrResultUnknownError
elif awardMgrResult == 3 or awardMgrResult == 4:
self.resultPanel['text'] = TTLocalizer.CdrResultMailboxFull
elif awardMgrResult == 5 or awardMgrResult == 10:
self.resultPanel['text'] = TTLocalizer.CdrResultAlreadyInMailbox
elif awardMgrResult == 6 or awardMgrResult == 7 or awardMgrResult == 11:
self.resultPanel['text'] = TTLocalizer.CdrResultAlreadyInQueue
elif awardMgrResult == 8:
self.resultPanel['text'] = TTLocalizer.CdrResultAlreadyInCloset
elif awardMgrResult == 9:
self.resultPanel['text'] = TTLocalizer.CdrResultAlreadyBeingWorn
elif awardMgrResult == 12 or awardMgrResult == 13 or awardMgrResult == 14:
self.resultPanel['text'] = TTLocalizer.CdrResultAlreadyReceived
elif result == 5:
self.resultPanel['text'] = TTLocalizer.CdrResultTooManyFails
self.__disableCodeEntry()
elif result == 6:
self.resultPanel['text'] = TTLocalizer.CdrResultServiceUnavailable
self.__disableCodeEntry()
if result == 0:
self.successSfx.play()
else:
self.failureSfx.play()
self.resultPanel.show()
def __hideResultPanel(self):
self.resultPanel.hide()
def __disableCodeEntry(self):
self.codeInput['state'] = DGG.DISABLED
self.submitButton['state'] = DGG.DISABLED
def __enableCodeEntry(self):
self.codeInput['state'] = DGG.NORMAL
self.codeInput['focus'] = 1
self.submitButton['state'] = DGG.NORMAL
class MoreOptionsTabPage(DirectFrame):
notify = directNotify.newCategory('MoreOptionsTabPage')
def __init__(self, parent=aspect2d):
self.parent = parent
self.currentSizeIndex = None
DirectFrame.__init__(
self, parent=self.parent, relief=None, pos=(
0.0, 0.0, 0.0), scale=(
1.0, 1.0, 1.0))
self.load()
def destroy(self):
self.parent = None
DirectFrame.destroy(self)
def load(self):
guiButton = loader.loadModel('phase_3/models/gui/quit_button')
gui = loader.loadModel('phase_3.5/models/gui/friendslist_gui')
titleHeight = 0.61
textStartHeight = 0.45
textRowHeight = 0.145
leftMargin = -0.72
buttonbase_xcoord = 0.16
buttonbase_ycoord = 0.45
button_image_scale = (0.7, 1, 1)
button_textpos = (0, -0.02)
options_text_scale = 0.052
disabled_arrow_color = Vec4(0.6, 0.6, 0.6, 1.0)
self.speed_chat_scale = 0.055
self.WASD_Label = DirectLabel(
parent=self,
relief=None,
text='',
text_align=TextNode.ALeft,
text_scale=options_text_scale,
text_wordwrap=16,
pos=(
leftMargin,
0,
textStartHeight))
self.WASD_toggleButton = DirectButton(
parent=self,
relief=None,
image=(
guiButton.find('**/QuitBtn_UP'),
guiButton.find('**/QuitBtn_DN'),
guiButton.find('**/QuitBtn_RLVR')),
image_scale=button_image_scale,
text='',
text_scale=options_text_scale,
text_pos=button_textpos,
pos=(
buttonbase_xcoord,
0.0,
buttonbase_ycoord),
command=self.__doToggleWASD)
self.keymapDialogButton = DirectButton(
parent=self,
relief=None,
image=(
guiButton.find('**/QuitBtn_UP'),
guiButton.find('**/QuitBtn_DN'),
guiButton.find('**/QuitBtn_RLVR')),
image_scale=button_image_scale,
text='Configure Keymap',
text_scale=(0.03, 0.05, 1),
text_pos=button_textpos,
pos=(
buttonbase_xcoord + 0.44,
0.0,
buttonbase_ycoord),
command=self.__openKeyRemapDialog)
self.keymapDialogButton.setScale(
1.55,
1.0,
1.0)
gui.removeNode()
guiButton.removeNode()
def enter(self):
self.show()
self.settingsChanged = 0
self.__setWASDButton()
def exit(self):
self.ignore('confirmDone')
self.hide()
def unload(self):
self.WASD_Label.destroy()
del self.WASD_Label
self.WASD_toggleButton.destroy()
del self.WASD_toggleButton
self.keymapDialogButton.destroy()
del self.keymapDialogButton
def __doToggleWASD(self):
messenger.send('wakeup')
if base.wantCustomControls:
base.wantCustomControls = False
settings['want-Custom-Controls'] = False
else:
base.wantCustomControls = True
settings['want-Custom-Controls'] = True
base.reloadControls()
base.localAvatar.controlManager.reload()
base.localAvatar.chatMgr.reloadWASD()
base.localAvatar.controlManager.disable()
self.settingsChanged = 1
self.__setWASDButton()
def __setWASDButton(self):
if base.wantCustomControls:
self.WASD_Label['text'] = 'Custom Keymapping is on.'
self.WASD_toggleButton['text'] = TTLocalizer.OptionsPageToggleOff
self.keymapDialogButton.show()
else:
self.WASD_Label['text'] = 'Custom Keymapping is off.'
self.WASD_toggleButton['text'] = TTLocalizer.OptionsPageToggleOn
self.keymapDialogButton.hide()
def __openKeyRemapDialog(self):
if base.wantCustomControls:
self.controlDialog = ControlRemapDialog.ControlRemap()
|
|
import mock
import pytest
from olympia import amo
from olympia.amo.tests import TestCase, req_factory_factory
from olympia.addons.models import Addon, AddonUser
from olympia.users.models import UserProfile
from .acl import (action_allowed, check_addon_ownership, check_ownership,
check_addons_reviewer, check_personas_reviewer,
check_unlisted_addons_reviewer, is_editor, match_rules)
pytestmark = pytest.mark.django_db
def test_match_rules():
"""
Unit tests for the match_rules method.
"""
rules = (
'*:*',
'Editors:*,Admin:EditAnyAddon,Admin:flagged,Admin:addons,'
'Admin:EditAnyCollection',
'Tests:*,Admin:serverstatus,Admin:users',
'Admin:EditAnyAddon,Admin:EditAnyLocale,Editors:*,'
'Admin:lists,Admin:applications,Admin:addons,Localizers:*',
'Admin:EditAnyAddon',
'Admin:ViewAnyStats,Admin:ViewAnyCollectionStats',
'Admin:ViewAnyStats',
'Editors:*,Admin:features',
'Admin:Statistics',
'Admin:Features,Editors:*',
'Admin:%',
'Admin:*',
'Admin:Foo',
'Admin:Bar',
)
for rule in rules:
assert match_rules(rule, 'Admin', '%'), "%s != Admin:%%" % rule
rules = (
'Doctors:*',
'Stats:View',
'CollectionStats:View',
'Addons:Review',
'Personas:Review',
'Locales:Edit',
'Locale.de:Edit',
'Reviews:Edit',
'None:None',
)
for rule in rules:
assert not match_rules(rule, 'Admin', '%'), \
"%s == Admin:%% and shouldn't" % rule
def test_anonymous_user():
fake_request = req_factory_factory('/')
assert not action_allowed(fake_request, amo.FIREFOX, 'Admin:%')
class ACLTestCase(TestCase):
"""Test some basic ACLs by going to various locked pages on AMO."""
fixtures = ['access/login.json']
def test_admin_login_anon(self):
# Login form for anonymous user on the admin page.
url = '/en-US/admin/'
self.assertLoginRedirects(self.client.get(url), to=url)
class TestHasPerm(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestHasPerm, self).setUp()
assert self.client.login(email='del@icio.us')
self.user = UserProfile.objects.get(email='del@icio.us')
self.addon = Addon.objects.get(id=3615)
self.au = AddonUser.objects.get(addon=self.addon, user=self.user)
assert self.au.role == amo.AUTHOR_ROLE_OWNER
self.request = self.fake_request_with_user(self.user)
def fake_request_with_user(self, user):
request = mock.Mock()
request.user = user
request.user.is_authenticated = mock.Mock(return_value=True)
return request
def login_admin(self):
assert self.client.login(email='admin@mozilla.com')
return UserProfile.objects.get(email='admin@mozilla.com')
def test_anonymous(self):
self.request.user.is_authenticated.return_value = False
self.client.logout()
assert not check_addon_ownership(self.request, self.addon)
def test_admin(self):
self.request = self.fake_request_with_user(self.login_admin())
assert check_addon_ownership(self.request, self.addon)
assert check_addon_ownership(self.request, self.addon, admin=True)
assert not check_addon_ownership(self.request, self.addon, admin=False)
def test_require_author(self):
assert check_ownership(self.request, self.addon, require_author=True)
def test_require_author_when_admin(self):
self.request = self.fake_request_with_user(self.login_admin())
assert check_ownership(self.request, self.addon, require_author=False)
assert not check_ownership(self.request, self.addon,
require_author=True)
def test_disabled(self):
self.addon.update(status=amo.STATUS_DISABLED)
assert not check_addon_ownership(self.request, self.addon)
self.test_admin()
def test_deleted(self):
self.addon.update(status=amo.STATUS_DELETED)
assert not check_addon_ownership(self.request, self.addon)
self.request.user = self.login_admin()
assert not check_addon_ownership(self.request, self.addon)
def test_ignore_disabled(self):
self.addon.update(status=amo.STATUS_DISABLED)
assert check_addon_ownership(self.request, self.addon,
ignore_disabled=True)
def test_owner(self):
assert check_addon_ownership(self.request, self.addon)
self.au.role = amo.AUTHOR_ROLE_DEV
self.au.save()
assert not check_addon_ownership(self.request, self.addon)
self.au.role = amo.AUTHOR_ROLE_VIEWER
self.au.save()
assert not check_addon_ownership(self.request, self.addon)
self.au.role = amo.AUTHOR_ROLE_SUPPORT
self.au.save()
assert not check_addon_ownership(self.request, self.addon)
def test_dev(self):
assert check_addon_ownership(self.request, self.addon, dev=True)
self.au.role = amo.AUTHOR_ROLE_DEV
self.au.save()
assert check_addon_ownership(self.request, self.addon, dev=True)
self.au.role = amo.AUTHOR_ROLE_VIEWER
self.au.save()
assert not check_addon_ownership(self.request, self.addon, dev=True)
self.au.role = amo.AUTHOR_ROLE_SUPPORT
self.au.save()
assert not check_addon_ownership(self.request, self.addon, dev=True)
def test_viewer(self):
assert check_addon_ownership(self.request, self.addon, viewer=True)
self.au.role = amo.AUTHOR_ROLE_DEV
self.au.save()
assert check_addon_ownership(self.request, self.addon, viewer=True)
self.au.role = amo.AUTHOR_ROLE_VIEWER
self.au.save()
assert check_addon_ownership(self.request, self.addon, viewer=True)
self.au.role = amo.AUTHOR_ROLE_SUPPORT
self.au.save()
assert check_addon_ownership(self.request, self.addon, viewer=True)
def test_support(self):
assert check_addon_ownership(self.request, self.addon, viewer=True)
self.au.role = amo.AUTHOR_ROLE_DEV
self.au.save()
assert not check_addon_ownership(self.request, self.addon,
support=True)
self.au.role = amo.AUTHOR_ROLE_VIEWER
self.au.save()
assert not check_addon_ownership(self.request, self.addon,
support=True)
self.au.role = amo.AUTHOR_ROLE_SUPPORT
self.au.save()
assert check_addon_ownership(self.request, self.addon, support=True)
class TestCheckReviewer(TestCase):
fixtures = ['base/addon_3615', 'addons/persona']
def setUp(self):
super(TestCheckReviewer, self).setUp()
self.user = UserProfile.objects.get()
self.persona = Addon.objects.get(pk=15663)
self.addon = Addon.objects.get(pk=3615)
def test_no_perm(self):
req = req_factory_factory('noop', user=self.user)
assert not check_addons_reviewer(req)
assert not check_unlisted_addons_reviewer(req)
assert not check_personas_reviewer(req)
def test_perm_addons(self):
self.grant_permission(self.user, 'Addons:Review')
req = req_factory_factory('noop', user=self.user)
assert check_addons_reviewer(req)
assert not check_unlisted_addons_reviewer(req)
assert not check_personas_reviewer(req)
def test_perm_themes(self):
self.grant_permission(self.user, 'Personas:Review')
req = req_factory_factory('noop', user=self.user)
assert not check_addons_reviewer(req)
assert not check_unlisted_addons_reviewer(req)
assert check_personas_reviewer(req)
def test_perm_unlisted_addons(self):
self.grant_permission(self.user, 'Addons:ReviewUnlisted')
req = req_factory_factory('noop', user=self.user)
assert not check_addons_reviewer(req)
assert check_unlisted_addons_reviewer(req)
assert not check_personas_reviewer(req)
def test_is_editor_for_addon_reviewer(self):
"""An addon editor is also a persona editor."""
self.grant_permission(self.user, 'Addons:Review')
req = req_factory_factory('noop', user=self.user)
assert is_editor(req, self.persona)
assert is_editor(req, self.addon)
def test_is_editor_for_persona_reviewer(self):
self.grant_permission(self.user, 'Personas:Review')
req = req_factory_factory('noop', user=self.user)
assert is_editor(req, self.persona)
assert not is_editor(req, self.addon)
|
|
"""SmartApp functionality to receive cloud-push notifications."""
import asyncio
import functools
import logging
import secrets
from urllib.parse import urlparse
from uuid import uuid4
from aiohttp import web
from pysmartapp import Dispatcher, SmartAppManager
from pysmartapp.const import SETTINGS_APP_ID
from pysmartthings import (
APP_TYPE_WEBHOOK,
CAPABILITIES,
CLASSIFICATION_AUTOMATION,
App,
AppOAuth,
AppSettings,
InstalledAppStatus,
SmartThings,
SourceType,
Subscription,
SubscriptionEntity,
)
from homeassistant.components import webhook
from homeassistant.const import CONF_WEBHOOK_ID
from homeassistant.core import HomeAssistant
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.network import NoURLAvailableError, get_url
from .const import (
APP_NAME_PREFIX,
APP_OAUTH_CLIENT_NAME,
APP_OAUTH_SCOPES,
CONF_CLOUDHOOK_URL,
CONF_INSTALLED_APP_ID,
CONF_INSTANCE_ID,
CONF_REFRESH_TOKEN,
DATA_BROKERS,
DATA_MANAGER,
DOMAIN,
IGNORED_CAPABILITIES,
SETTINGS_INSTANCE_ID,
SIGNAL_SMARTAPP_PREFIX,
STORAGE_KEY,
STORAGE_VERSION,
SUBSCRIPTION_WARNING_LIMIT,
)
_LOGGER = logging.getLogger(__name__)
def format_unique_id(app_id: str, location_id: str) -> str:
"""Format the unique id for a config entry."""
return f"{app_id}_{location_id}"
async def find_app(hass: HomeAssistant, api):
"""Find an existing SmartApp for this installation of hass."""
apps = await api.apps()
for app in [app for app in apps if app.app_name.startswith(APP_NAME_PREFIX)]:
# Load settings to compare instance id
settings = await app.settings()
if (
settings.settings.get(SETTINGS_INSTANCE_ID)
== hass.data[DOMAIN][CONF_INSTANCE_ID]
):
return app
async def validate_installed_app(api, installed_app_id: str):
"""
Ensure the specified installed SmartApp is valid and functioning.
Query the API for the installed SmartApp and validate that it is tied to
the specified app_id and is in an authorized state.
"""
installed_app = await api.installed_app(installed_app_id)
if installed_app.installed_app_status != InstalledAppStatus.AUTHORIZED:
raise RuntimeWarning(
"Installed SmartApp instance '{}' ({}) is not AUTHORIZED but instead {}".format(
installed_app.display_name,
installed_app.installed_app_id,
installed_app.installed_app_status,
)
)
return installed_app
def validate_webhook_requirements(hass: HomeAssistant) -> bool:
"""Ensure Home Assistant is setup properly to receive webhooks."""
if hass.components.cloud.async_active_subscription():
return True
if hass.data[DOMAIN][CONF_CLOUDHOOK_URL] is not None:
return True
return get_webhook_url(hass).lower().startswith("https://")
def get_webhook_url(hass: HomeAssistant) -> str:
"""
Get the URL of the webhook.
Return the cloudhook if available, otherwise local webhook.
"""
cloudhook_url = hass.data[DOMAIN][CONF_CLOUDHOOK_URL]
if hass.components.cloud.async_active_subscription() and cloudhook_url is not None:
return cloudhook_url
return webhook.async_generate_url(hass, hass.data[DOMAIN][CONF_WEBHOOK_ID])
def _get_app_template(hass: HomeAssistant):
try:
endpoint = f"at {get_url(hass, allow_cloud=False, prefer_external=True)}"
except NoURLAvailableError:
endpoint = ""
cloudhook_url = hass.data[DOMAIN][CONF_CLOUDHOOK_URL]
if cloudhook_url is not None:
endpoint = "via Nabu Casa"
description = f"{hass.config.location_name} {endpoint}"
return {
"app_name": APP_NAME_PREFIX + str(uuid4()),
"display_name": "Home Assistant",
"description": description,
"webhook_target_url": get_webhook_url(hass),
"app_type": APP_TYPE_WEBHOOK,
"single_instance": True,
"classifications": [CLASSIFICATION_AUTOMATION],
}
async def create_app(hass: HomeAssistant, api):
"""Create a SmartApp for this instance of hass."""
# Create app from template attributes
template = _get_app_template(hass)
app = App()
for key, value in template.items():
setattr(app, key, value)
app, client = await api.create_app(app)
_LOGGER.debug("Created SmartApp '%s' (%s)", app.app_name, app.app_id)
# Set unique hass id in settings
settings = AppSettings(app.app_id)
settings.settings[SETTINGS_APP_ID] = app.app_id
settings.settings[SETTINGS_INSTANCE_ID] = hass.data[DOMAIN][CONF_INSTANCE_ID]
await api.update_app_settings(settings)
_LOGGER.debug(
"Updated App Settings for SmartApp '%s' (%s)", app.app_name, app.app_id
)
# Set oauth scopes
oauth = AppOAuth(app.app_id)
oauth.client_name = APP_OAUTH_CLIENT_NAME
oauth.scope.extend(APP_OAUTH_SCOPES)
await api.update_app_oauth(oauth)
_LOGGER.debug("Updated App OAuth for SmartApp '%s' (%s)", app.app_name, app.app_id)
return app, client
async def update_app(hass: HomeAssistant, app):
"""Ensure the SmartApp is up-to-date and update if necessary."""
template = _get_app_template(hass)
template.pop("app_name") # don't update this
update_required = False
for key, value in template.items():
if getattr(app, key) != value:
update_required = True
setattr(app, key, value)
if update_required:
await app.save()
_LOGGER.debug(
"SmartApp '%s' (%s) updated with latest settings", app.app_name, app.app_id
)
def setup_smartapp(hass, app):
"""
Configure an individual SmartApp in hass.
Register the SmartApp with the SmartAppManager so that hass will service
lifecycle events (install, event, etc...). A unique SmartApp is created
for each SmartThings account that is configured in hass.
"""
manager = hass.data[DOMAIN][DATA_MANAGER]
if smartapp := manager.smartapps.get(app.app_id):
# already setup
return smartapp
smartapp = manager.register(app.app_id, app.webhook_public_key)
smartapp.name = app.display_name
smartapp.description = app.description
smartapp.permissions.extend(APP_OAUTH_SCOPES)
return smartapp
async def setup_smartapp_endpoint(hass: HomeAssistant):
"""
Configure the SmartApp webhook in hass.
SmartApps are an extension point within the SmartThings ecosystem and
is used to receive push updates (i.e. device updates) from the cloud.
"""
if hass.data.get(DOMAIN):
# already setup
return
# Get/create config to store a unique id for this hass instance.
store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
if not (config := await store.async_load()):
# Create config
config = {
CONF_INSTANCE_ID: str(uuid4()),
CONF_WEBHOOK_ID: secrets.token_hex(),
CONF_CLOUDHOOK_URL: None,
}
await store.async_save(config)
# Register webhook
webhook.async_register(
hass, DOMAIN, "SmartApp", config[CONF_WEBHOOK_ID], smartapp_webhook
)
# Create webhook if eligible
cloudhook_url = config.get(CONF_CLOUDHOOK_URL)
if (
cloudhook_url is None
and hass.components.cloud.async_active_subscription()
and not hass.config_entries.async_entries(DOMAIN)
):
cloudhook_url = await hass.components.cloud.async_create_cloudhook(
config[CONF_WEBHOOK_ID]
)
config[CONF_CLOUDHOOK_URL] = cloudhook_url
await store.async_save(config)
_LOGGER.debug("Created cloudhook '%s'", cloudhook_url)
# SmartAppManager uses a dispatcher to invoke callbacks when push events
# occur. Use hass' implementation instead of the built-in one.
dispatcher = Dispatcher(
signal_prefix=SIGNAL_SMARTAPP_PREFIX,
connect=functools.partial(async_dispatcher_connect, hass),
send=functools.partial(async_dispatcher_send, hass),
)
# Path is used in digital signature validation
path = (
urlparse(cloudhook_url).path
if cloudhook_url
else webhook.async_generate_path(config[CONF_WEBHOOK_ID])
)
manager = SmartAppManager(path, dispatcher=dispatcher)
manager.connect_install(functools.partial(smartapp_install, hass))
manager.connect_update(functools.partial(smartapp_update, hass))
manager.connect_uninstall(functools.partial(smartapp_uninstall, hass))
hass.data[DOMAIN] = {
DATA_MANAGER: manager,
CONF_INSTANCE_ID: config[CONF_INSTANCE_ID],
DATA_BROKERS: {},
CONF_WEBHOOK_ID: config[CONF_WEBHOOK_ID],
# Will not be present if not enabled
CONF_CLOUDHOOK_URL: config.get(CONF_CLOUDHOOK_URL),
}
_LOGGER.debug(
"Setup endpoint for %s",
cloudhook_url
if cloudhook_url
else webhook.async_generate_url(hass, config[CONF_WEBHOOK_ID]),
)
async def unload_smartapp_endpoint(hass: HomeAssistant):
"""Tear down the component configuration."""
if DOMAIN not in hass.data:
return
# Remove the cloudhook if it was created
cloudhook_url = hass.data[DOMAIN][CONF_CLOUDHOOK_URL]
if cloudhook_url and hass.components.cloud.async_is_logged_in():
await hass.components.cloud.async_delete_cloudhook(
hass.data[DOMAIN][CONF_WEBHOOK_ID]
)
# Remove cloudhook from storage
store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
await store.async_save(
{
CONF_INSTANCE_ID: hass.data[DOMAIN][CONF_INSTANCE_ID],
CONF_WEBHOOK_ID: hass.data[DOMAIN][CONF_WEBHOOK_ID],
CONF_CLOUDHOOK_URL: None,
}
)
_LOGGER.debug("Cloudhook '%s' was removed", cloudhook_url)
# Remove the webhook
webhook.async_unregister(hass, hass.data[DOMAIN][CONF_WEBHOOK_ID])
# Disconnect all brokers
for broker in hass.data[DOMAIN][DATA_BROKERS].values():
broker.disconnect()
# Remove all handlers from manager
hass.data[DOMAIN][DATA_MANAGER].dispatcher.disconnect_all()
# Remove the component data
hass.data.pop(DOMAIN)
async def smartapp_sync_subscriptions(
hass: HomeAssistant,
auth_token: str,
location_id: str,
installed_app_id: str,
devices,
):
"""Synchronize subscriptions of an installed up."""
api = SmartThings(async_get_clientsession(hass), auth_token)
tasks = []
async def create_subscription(target: str):
sub = Subscription()
sub.installed_app_id = installed_app_id
sub.location_id = location_id
sub.source_type = SourceType.CAPABILITY
sub.capability = target
try:
await api.create_subscription(sub)
_LOGGER.debug(
"Created subscription for '%s' under app '%s'", target, installed_app_id
)
except Exception as error: # pylint:disable=broad-except
_LOGGER.error(
"Failed to create subscription for '%s' under app '%s': %s",
target,
installed_app_id,
error,
)
async def delete_subscription(sub: SubscriptionEntity):
try:
await api.delete_subscription(installed_app_id, sub.subscription_id)
_LOGGER.debug(
"Removed subscription for '%s' under app '%s' because it was no longer needed",
sub.capability,
installed_app_id,
)
except Exception as error: # pylint:disable=broad-except
_LOGGER.error(
"Failed to remove subscription for '%s' under app '%s': %s",
sub.capability,
installed_app_id,
error,
)
# Build set of capabilities and prune unsupported ones
capabilities = set()
for device in devices:
capabilities.update(device.capabilities)
# Remove items not defined in the library
capabilities.intersection_update(CAPABILITIES)
# Remove unused capabilities
capabilities.difference_update(IGNORED_CAPABILITIES)
capability_count = len(capabilities)
if capability_count > SUBSCRIPTION_WARNING_LIMIT:
_LOGGER.warning(
"Some device attributes may not receive push updates and there may be subscription "
"creation failures under app '%s' because %s subscriptions are required but "
"there is a limit of %s per app",
installed_app_id,
capability_count,
SUBSCRIPTION_WARNING_LIMIT,
)
_LOGGER.debug(
"Synchronizing subscriptions for %s capabilities under app '%s': %s",
capability_count,
installed_app_id,
capabilities,
)
# Get current subscriptions and find differences
subscriptions = await api.subscriptions(installed_app_id)
for subscription in subscriptions:
if subscription.capability in capabilities:
capabilities.remove(subscription.capability)
else:
# Delete the subscription
tasks.append(delete_subscription(subscription))
# Remaining capabilities need subscriptions created
tasks.extend([create_subscription(c) for c in capabilities])
if tasks:
await asyncio.gather(*tasks)
else:
_LOGGER.debug("Subscriptions for app '%s' are up-to-date", installed_app_id)
async def _continue_flow(
hass: HomeAssistant,
app_id: str,
location_id: str,
installed_app_id: str,
refresh_token: str,
):
"""Continue a config flow if one is in progress for the specific installed app."""
unique_id = format_unique_id(app_id, location_id)
flow = next(
(
flow
for flow in hass.config_entries.flow.async_progress_by_handler(DOMAIN)
if flow["context"]["unique_id"] == unique_id
),
None,
)
if flow is not None:
await hass.config_entries.flow.async_configure(
flow["flow_id"],
{
CONF_INSTALLED_APP_ID: installed_app_id,
CONF_REFRESH_TOKEN: refresh_token,
},
)
_LOGGER.debug(
"Continued config flow '%s' for SmartApp '%s' under parent app '%s'",
flow["flow_id"],
installed_app_id,
app_id,
)
async def smartapp_install(hass: HomeAssistant, req, resp, app):
"""Handle a SmartApp installation and continue the config flow."""
await _continue_flow(
hass, app.app_id, req.location_id, req.installed_app_id, req.refresh_token
)
_LOGGER.debug(
"Installed SmartApp '%s' under parent app '%s'",
req.installed_app_id,
app.app_id,
)
async def smartapp_update(hass: HomeAssistant, req, resp, app):
"""Handle a SmartApp update and either update the entry or continue the flow."""
entry = next(
(
entry
for entry in hass.config_entries.async_entries(DOMAIN)
if entry.data.get(CONF_INSTALLED_APP_ID) == req.installed_app_id
),
None,
)
if entry:
hass.config_entries.async_update_entry(
entry, data={**entry.data, CONF_REFRESH_TOKEN: req.refresh_token}
)
_LOGGER.debug(
"Updated config entry '%s' for SmartApp '%s' under parent app '%s'",
entry.entry_id,
req.installed_app_id,
app.app_id,
)
await _continue_flow(
hass, app.app_id, req.location_id, req.installed_app_id, req.refresh_token
)
_LOGGER.debug(
"Updated SmartApp '%s' under parent app '%s'", req.installed_app_id, app.app_id
)
async def smartapp_uninstall(hass: HomeAssistant, req, resp, app):
"""
Handle when a SmartApp is removed from a location by the user.
Find and delete the config entry representing the integration.
"""
entry = next(
(
entry
for entry in hass.config_entries.async_entries(DOMAIN)
if entry.data.get(CONF_INSTALLED_APP_ID) == req.installed_app_id
),
None,
)
if entry:
# Add as job not needed because the current coroutine was invoked
# from the dispatcher and is not being awaited.
await hass.config_entries.async_remove(entry.entry_id)
_LOGGER.debug(
"Uninstalled SmartApp '%s' under parent app '%s'",
req.installed_app_id,
app.app_id,
)
async def smartapp_webhook(hass: HomeAssistant, webhook_id: str, request):
"""
Handle a smartapp lifecycle event callback from SmartThings.
Requests from SmartThings are digitally signed and the SmartAppManager
validates the signature for authenticity.
"""
manager = hass.data[DOMAIN][DATA_MANAGER]
data = await request.json()
result = await manager.handle_request(data, request.headers)
return web.json_response(result)
|
|
# Requires python3
import re
import sqlite3
import subprocess
import shutil
import os
import codecs
import datetime
import sys
class TskDbDiff(object):
"""Compares two TSK/Autospy SQLite databases.
Attributes:
gold_artifacts:
autopsy_artifacts:
gold_attributes:
autopsy_attributes:
gold_objects:
autopsy_objects:
artifact_comparison:
attribute_comparision:
report_errors: a listof_listof_String, the error messages that will be
printed to screen in the run_diff method
passed: a boolean, did the diff pass?
autopsy_db_file:
gold_db_file:
"""
def __init__(self, output_db, gold_db, output_dir=None, gold_bb_dump=None, gold_dump=None, verbose=False):
"""Constructor for TskDbDiff.
Args:
output_db_path: path to output database (non-gold standard)
gold_db_path: path to gold database
output_dir: (optional) Path to folder where generated files will be put.
gold_bb_dump: (optional) path to file where the gold blackboard dump is located
gold_dump: (optional) path to file where the gold non-blackboard dump is located
verbose: (optional) a boolean, if true, diff results are sent to stdout.
"""
self.output_db_file = output_db
self.gold_db_file = gold_db
self.output_dir = output_dir
self.gold_bb_dump = gold_bb_dump
self.gold_dump = gold_dump
self._generate_gold_dump = True
self._generate_gold_bb_dump = True
self._bb_dump_diff = ""
self._dump_diff = ""
self._bb_dump = ""
self._dump = ""
self.verbose = verbose
def run_diff(self):
"""Compare the databases.
Raises:
TskDbDiffException: if an error occurs while diffing or dumping the database
"""
self._init_diff()
# generate the gold database dumps if necessary
if self._generate_gold_dump:
TskDbDiff._dump_output_db_nonbb(self.gold_db_file, self.gold_dump)
if self._generate_gold_bb_dump:
TskDbDiff._dump_output_db_bb(self.gold_db_file, self.gold_bb_dump)
# generate the output database dumps (both DB and BB)
TskDbDiff._dump_output_db_nonbb(self.output_db_file, self._dump)
TskDbDiff._dump_output_db_bb(self.output_db_file, self._bb_dump)
# Compare non-BB
dump_diff_pass = self._diff(self._dump, self.gold_dump, self._dump_diff)
# Compare BB
bb_dump_diff_pass = self._diff(self._bb_dump, self.gold_bb_dump, self._bb_dump_diff)
self._cleanup_diff()
return dump_diff_pass, bb_dump_diff_pass
def _init_diff(self):
"""Set up the necessary files based on the arguments given at construction"""
if self.output_dir is None:
# No stored files
self._bb_dump = TskDbDiff._get_tmp_file("BlackboardDump", ".txt")
self._bb_dump_diff = TskDbDiff._get_tmp_file("BlackboardDump-Diff", ".txt")
self._dump = TskDbDiff._get_tmp_file("DBDump", ".txt")
self._dump_diff = TskDbDiff._get_tmp_file("DBDump-Diff", ".txt")
else:
self._bb_dump = os.path.join(self.output_dir, "BlackboardDump.txt")
self._bb_dump_diff = os.path.join(self.output_dir, "BlackboardDump-Diff.txt")
self._dump = os.path.join(self.output_dir, "DBDump.txt")
self._dump_diff = os.path.join(self.output_dir, "DBDump-Diff.txt")
if self.gold_bb_dump is None:
self.gold_bb_dump = TskDbDiff._get_tmp_file("GoldBlackboardDump", ".txt")
self.gold_dump = TskDbDiff._get_tmp_file("GoldDBDump", ".txt")
def _cleanup_diff(self):
if self.output_dir is None:
#cleanup temp files
os.remove(self._dump)
os.remove(self._bb_dump)
if os.path.isfile(self._dump_diff):
os.remove(self._dump_diff)
if os.path.isfile(self._bb_dump_diff):
os.remove(self._bb_dump_diff)
if self.gold_bb_dump is None:
os.remove(self.gold_bb_dump)
os.remove(self.gold_dump)
def _diff(self, output_file, gold_file, diff_path):
"""Compare two text files.
Args:
output_file: a pathto_File, the latest text file
gold_file: a pathto_File, the gold text file
diff_path: The file to write the differences to
Returns False if different
"""
if (not os.path.isfile(output_file)):
return False
if (not os.path.isfile(gold_file)):
return False
# It is faster to read the contents in and directly compare
output_data = codecs.open(output_file, "r", "utf_8").read()
gold_data = codecs.open(gold_file, "r", "utf_8").read()
if (gold_data == output_data):
return True
# If they are different, invoke 'diff'
diff_file = codecs.open(diff_path, "wb", "utf_8")
# Gold needs to be passed in as 1st arg and output as 2nd
dffcmdlst = ["diff", gold_file, output_file]
subprocess.call(dffcmdlst, stdout = diff_file)
# create file path for gold files inside output folder. In case of diff, both gold and current run files
# are available in the report output folder. Prefix Gold- is added to the filename.
gold_file_in_output_dir = output_file[:output_file.rfind("/")] + "/Gold-" + output_file[output_file.rfind("/")+1:]
shutil.copy(gold_file, gold_file_in_output_dir)
return False
def _dump_output_db_bb(db_file, bb_dump_file):
"""Dumps sorted text results to the given output location.
Smart method that deals with a blackboard comparison to avoid issues
with different IDs based on when artifacts were created.
Args:
db_file: a pathto_File, the output database.
bb_dump_file: a pathto_File, the sorted dump file to write to
"""
unsorted_dump = TskDbDiff._get_tmp_file("dump_data", ".txt")
conn = sqlite3.connect(db_file)
conn.text_factory = lambda x: x.decode("utf-8", "ignore")
conn.row_factory = sqlite3.Row
artifact_cursor = conn.cursor()
# Get the list of all artifacts (along with type and associated file)
# @@@ Could add a SORT by parent_path in here since that is how we are going to later sort it.
artifact_cursor.execute("SELECT tsk_files.parent_path, tsk_files.name, blackboard_artifact_types.display_name, blackboard_artifacts.artifact_id FROM blackboard_artifact_types INNER JOIN blackboard_artifacts ON blackboard_artifact_types.artifact_type_id = blackboard_artifacts.artifact_type_id INNER JOIN tsk_files ON tsk_files.obj_id = blackboard_artifacts.obj_id")
database_log = codecs.open(unsorted_dump, "wb", "utf_8")
row = artifact_cursor.fetchone()
appnd = False
counter = 0
artifact_count = 0
artifact_fail = 0
# Cycle through artifacts
try:
while (row != None):
# File Name and artifact type
if(row["parent_path"] != None):
database_log.write(row["parent_path"] + row["name"] + ' <artifact type="' + row["display_name"] + '" > ')
else:
database_log.write(row["name"] + ' <artifact type="' + row["display_name"] + '" > ')
# Get attributes for this artifact
attribute_cursor = conn.cursor()
looptry = True
artifact_count += 1
try:
art_id = ""
art_id = str(row["artifact_id"])
attribute_cursor.execute("SELECT blackboard_attributes.source, blackboard_attribute_types.display_name, blackboard_attributes.value_type, blackboard_attributes.value_text, blackboard_attributes.value_int32, blackboard_attributes.value_int64, blackboard_attributes.value_double FROM blackboard_attributes INNER JOIN blackboard_attribute_types ON blackboard_attributes.attribute_type_id = blackboard_attribute_types.attribute_type_id WHERE artifact_id =? ORDER BY blackboard_attributes.source, blackboard_attribute_types.display_name, blackboard_attributes.value_type, blackboard_attributes.value_text, blackboard_attributes.value_int32, blackboard_attributes.value_int64, blackboard_attributes.value_double", [art_id])
attributes = attribute_cursor.fetchall()
# Print attributes
if (len(attributes) == 0):
# @@@@ This should be </artifact>
database_log.write(' <artifact/>\n')
row = artifact_cursor.fetchone()
continue
src = attributes[0][0]
for attr in attributes:
attr_value_index = 3 + attr["value_type"]
numvals = 0
for x in range(3, 6):
if(attr[x] != None):
numvals += 1
if(numvals > 1):
msg = "There were too many values for attribute type: " + attr["display_name"] + " for artifact with id #" + str(row["artifact_id"]) + ".\n"
if(not attr["source"] == src):
msg = "There were inconsistent sources for artifact with id #" + str(row["artifact_id"]) + ".\n"
try:
attr_value_as_string = str(attr[attr_value_index])
#if((type(attr_value_as_string) != 'unicode') or (type(attr_value_as_string) != 'str')):
# attr_value_as_string = str(attr_value_as_string)
patrn = re.compile("[\n\0\a\b\r\f]")
attr_value_as_string = re.sub(patrn, ' ', attr_value_as_string)
database_log.write('<attribute source="' + attr["source"] + '" type="' + attr["display_name"] + '" value="' + attr_value_as_string + '" />')
except IOError as e:
print("IO error")
raise TskDbDiffException("Unexpected IO error while writing to database log." + str(e))
except sqlite3.Error as e:
msg = "Attributes in artifact id (in output DB)# " + str(row["artifact_id"]) + " encountered an error: " + str(e) +" .\n"
print("Attributes in artifact id (in output DB)# ", str(row["artifact_id"]), " encountered an error: ", str(e))
print()
looptry = False
artifact_fail += 1
database_log.write('Error Extracting Attributes')
database_log.close()
raise TskDbDiffException(msg)
finally:
attribute_cursor.close()
# @@@@ This should be </artifact>
database_log.write(' <artifact/>\n')
row = artifact_cursor.fetchone()
if(artifact_fail > 0):
msg ="There were " + str(artifact_count) + " artifacts and " + str(artifact_fail) + " threw an exception while loading.\n"
except Exception as e:
raise TskDbDiffException("Unexpected error while dumping blackboard database: " + str(e))
finally:
database_log.close()
artifact_cursor.close()
conn.close()
# Now sort the file
srtcmdlst = ["sort", unsorted_dump, "-o", bb_dump_file]
subprocess.call(srtcmdlst)
def _dump_output_db_nonbb(db_file, dump_file):
"""Dumps a database to a text file.
Does not dump the artifact and attributes.
Args:
db_file: a pathto_File, the database file to dump
dump_file: a pathto_File, the location to dump the non-blackboard database items
"""
# Make a copy that we can modify
backup_db_file = TskDbDiff._get_tmp_file("tsk_backup_db", ".db")
shutil.copy(db_file, backup_db_file)
#print (backup_db_file)
# We sometimes get situations with messed up permissions
os.chmod (backup_db_file, 0o777)
conn = sqlite3.connect(backup_db_file)
id_path_table = build_id_table(conn.cursor())
conn.text_factory = lambda x: x.decode("utf-8", "ignore")
# Delete the blackboard tables
conn.execute("DROP TABLE blackboard_artifacts")
conn.execute("DROP TABLE blackboard_attributes")
# Write to the database dump
with codecs.open(dump_file, "wb", "utf_8") as db_log:
for line in conn.iterdump():
line = normalize_db_entry(line, id_path_table)
db_log.write('%s\n' % line)
# Now sort the file
srtcmdlst = ["sort", dump_file, "-o", dump_file]
subprocess.call(srtcmdlst)
conn.close()
# cleanup the backup
os.remove(backup_db_file)
def dump_output_db(db_file, dump_file, bb_dump_file):
"""Dumps the given database to text files for later comparison.
Args:
db_file: a pathto_File, the database file to dump
dump_file: a pathto_File, the location to dump the non-blackboard database items
bb_dump_file: a pathto_File, the location to dump the blackboard database items
"""
TskDbDiff._dump_output_db_nonbb(db_file, dump_file)
TskDbDiff._dump_output_db_bb(db_file, bb_dump_file)
def _get_tmp_file(base, ext):
time = datetime.datetime.now().time().strftime("%H%M%f")
return os.path.join(os.environ['TMP'], base + time + ext)
class TskDbDiffException(Exception):
pass
def normalize_db_entry(line, table):
""" Make testing more consistent and reasonable by doctoring certain db entries.
Args:
line: a String, the line to remove the object id from.
table: a map from object ids to file paths.
"""
files_index = line.find('INSERT INTO "tsk_files"')
path_index = line.find('INSERT INTO "tsk_files_path"')
object_index = line.find('INSERT INTO "tsk_objects"')
report_index = line.find('INSERT INTO "reports"')
parens = line[line.find('(') + 1 : line.find(')')]
fields_list = parens.replace(" ", "").split(',')
# remove object ID
if (files_index != -1):
obj_id = fields_list[0]
path = table[int(obj_id)]
newLine = ('INSERT INTO "tsk_files" VALUES(' + ', '.join(fields_list[1:]) + ');')
return newLine
# remove object ID
elif (path_index != -1):
obj_id = fields_list[0]
path = table[int(obj_id)]
newLine = ('INSERT INTO "tsk_files_path" VALUES(' + path + ', '.join(fields_list[1:]) + ');')
return newLine
#remove object ID
elif (object_index != -1):
obj_id = fields_list[0]
parent_id = fields_list[1]
try:
path = table[int(obj_id)]
parent_path = table[int(parent_id)]
newLine = ('INSERT INTO "tsk_objects" VALUES(' + path + ', ' + parent_path + ', ' + ', '.join(fields_list[2:]) + ');')
return newLine
except Exception as e:
# objects table has things that aren't files. if lookup fails, don't replace anything.
return line
# remove time-based information, ie Test_6/11/14 -> Test
elif (report_index != -1):
fields_list[1] = "AutopsyTestCase"
fields_list[2] = "0"
newLine = ('INSERT INTO "reports" VALUES(' + ','.join(fields_list) + ');')
return newLine
else:
return line
def build_id_table(artifact_cursor):
"""Build the map of object ids to file paths.
Args:
artifact_cursor: the database cursor
"""
# for each row in the db, take the object id, parent path, and name, then create a tuple in the dictionary
# with the object id as the key and the full file path (parent + name) as the value
mapping = dict([(row[0], str(row[1]) + str(row[2])) for row in artifact_cursor.execute("SELECT obj_id, parent_path, name FROM tsk_files")])
return mapping
def main():
try:
sys.argv.pop(0)
output_db = sys.argv.pop(0)
gold_db = sys.argv.pop(0)
except:
print("usage: tskdbdiff [OUPUT DB PATH] [GOLD DB PATH]")
sys.exit()
db_diff = TskDbDiff(output_db, gold_db, output_dir=".")
dump_passed, bb_dump_passed = db_diff.run_diff()
if dump_passed and bb_dump_passed:
print("Database comparison passed.")
if not dump_passed:
print("Non blackboard database comparison failed.")
if not bb_dump_passed:
print("Blackboard database comparison failed.")
return 0
if __name__ == "__main__":
if sys.hexversion < 0x03000000:
print("Python 3 required")
sys.exit(1)
sys.exit(main())
|
|
"""The tests for the Cast Media player platform."""
# pylint: disable=protected-access
import json
from typing import Optional
from uuid import UUID
import attr
import pytest
from homeassistant.components import tts
from homeassistant.components.cast import media_player as cast
from homeassistant.components.cast.media_player import ChromecastInfo
from homeassistant.config import async_process_ha_core_config
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.setup import async_setup_component
from tests.async_mock import ANY, AsyncMock, MagicMock, Mock, patch
from tests.common import MockConfigEntry, assert_setup_component
from tests.components.media_player import common
@pytest.fixture()
def mz_mock():
"""Mock pychromecast MultizoneManager."""
return MagicMock()
@pytest.fixture()
def quick_play_mock():
"""Mock pychromecast quick_play."""
return MagicMock()
@pytest.fixture(autouse=True)
def cast_mock(mz_mock, quick_play_mock):
"""Mock pychromecast."""
pycast_mock = MagicMock()
pycast_mock.start_discovery.return_value = (None, Mock())
dial_mock = MagicMock(name="XXX")
dial_mock.get_device_status.return_value.uuid = "fake_uuid"
dial_mock.get_device_status.return_value.manufacturer = "fake_manufacturer"
dial_mock.get_device_status.return_value.model_name = "fake_model_name"
dial_mock.get_device_status.return_value.friendly_name = "fake_friendly_name"
with patch(
"homeassistant.components.cast.media_player.pychromecast", pycast_mock
), patch(
"homeassistant.components.cast.discovery.pychromecast", pycast_mock
), patch(
"homeassistant.components.cast.media_player.MultizoneManager",
return_value=mz_mock,
), patch(
"homeassistant.components.cast.media_player.zeroconf.async_get_instance",
AsyncMock(),
), patch(
"homeassistant.components.cast.media_player.quick_play",
quick_play_mock,
):
yield
# pylint: disable=invalid-name
FakeUUID = UUID("57355bce-9364-4aa6-ac1e-eb849dccf9e2")
FakeUUID2 = UUID("57355bce-9364-4aa6-ac1e-eb849dccf9e4")
FakeGroupUUID = UUID("57355bce-9364-4aa6-ac1e-eb849dccf9e3")
def get_fake_chromecast(info: ChromecastInfo):
"""Generate a Fake Chromecast object with the specified arguments."""
mock = MagicMock(host=info.host, port=info.port, uuid=info.uuid)
mock.media_controller.status = None
return mock
def get_fake_chromecast_info(
host="192.168.178.42", port=8009, uuid: Optional[UUID] = FakeUUID
):
"""Generate a Fake ChromecastInfo with the specified arguments."""
return ChromecastInfo(
host=host,
port=port,
uuid=uuid,
friendly_name="Speaker",
services={"the-service"},
)
def get_fake_zconf(host="192.168.178.42", port=8009):
"""Generate a Fake Zeroconf object with the specified arguments."""
parsed_addresses = MagicMock()
parsed_addresses.return_value = [host]
service_info = MagicMock(parsed_addresses=parsed_addresses, port=port)
zconf = MagicMock()
zconf.get_service_info.return_value = service_info
return zconf
async def async_setup_cast(hass, config=None):
"""Set up the cast platform."""
if config is None:
config = {}
with patch(
"homeassistant.helpers.entity_platform.EntityPlatform._async_schedule_add_entities"
) as add_entities:
MockConfigEntry(domain="cast").add_to_hass(hass)
await async_setup_component(hass, "cast", {"cast": {"media_player": config}})
await hass.async_block_till_done()
return add_entities
async def async_setup_cast_internal_discovery(hass, config=None):
"""Set up the cast platform and the discovery."""
listener = MagicMock(services={})
browser = MagicMock(zc={})
with patch(
"homeassistant.components.cast.discovery.pychromecast.CastListener",
return_value=listener,
) as cast_listener, patch(
"homeassistant.components.cast.discovery.pychromecast.start_discovery",
return_value=browser,
) as start_discovery:
add_entities = await async_setup_cast(hass, config)
await hass.async_block_till_done()
await hass.async_block_till_done()
assert start_discovery.call_count == 1
discovery_callback = cast_listener.call_args[0][0]
def discover_chromecast(service_name: str, info: ChromecastInfo) -> None:
"""Discover a chromecast device."""
listener.services[info.uuid] = (
{service_name},
info.uuid,
info.model_name,
info.friendly_name,
)
discovery_callback(info.uuid, service_name)
return discover_chromecast, add_entities
async def async_setup_media_player_cast(hass: HomeAssistantType, info: ChromecastInfo):
"""Set up the cast platform with async_setup_component."""
listener = MagicMock(services={})
browser = MagicMock(zc={})
chromecast = get_fake_chromecast(info)
zconf = get_fake_zconf(host=info.host, port=info.port)
with patch(
"homeassistant.components.cast.discovery.pychromecast.get_chromecast_from_service",
return_value=chromecast,
) as get_chromecast, patch(
"homeassistant.components.cast.discovery.pychromecast.CastListener",
return_value=listener,
) as cast_listener, patch(
"homeassistant.components.cast.discovery.pychromecast.start_discovery",
return_value=browser,
), patch(
"homeassistant.components.cast.discovery.ChromeCastZeroconf.get_zeroconf",
return_value=zconf,
):
await async_setup_component(
hass, "cast", {"cast": {"media_player": {"uuid": info.uuid}}}
)
await hass.async_block_till_done()
discovery_callback = cast_listener.call_args[0][0]
service_name = "the-service"
listener.services[info.uuid] = (
{service_name},
info.uuid,
info.model_name,
info.friendly_name,
)
discovery_callback(info.uuid, service_name)
await hass.async_block_till_done()
await hass.async_block_till_done()
assert get_chromecast.call_count == 1
return chromecast
def get_status_callbacks(chromecast_mock, mz_mock=None):
"""Get registered status callbacks from the chromecast mock."""
status_listener = chromecast_mock.register_status_listener.call_args[0][0]
cast_status_cb = status_listener.new_cast_status
connection_listener = chromecast_mock.register_connection_listener.call_args[0][0]
conn_status_cb = connection_listener.new_connection_status
mc = chromecast_mock.socket_client.media_controller
media_status_cb = mc.register_status_listener.call_args[0][0].new_media_status
if not mz_mock:
return cast_status_cb, conn_status_cb, media_status_cb
mz_listener = mz_mock.register_listener.call_args[0][1]
group_media_status_cb = mz_listener.multizone_new_media_status
return cast_status_cb, conn_status_cb, media_status_cb, group_media_status_cb
async def test_start_discovery_called_once(hass):
"""Test pychromecast.start_discovery called exactly once."""
with patch(
"homeassistant.components.cast.discovery.pychromecast.start_discovery",
return_value=Mock(),
) as start_discovery:
await async_setup_cast(hass)
assert start_discovery.call_count == 1
await async_setup_cast(hass)
assert start_discovery.call_count == 1
async def test_stop_discovery_called_on_stop(hass):
"""Test pychromecast.stop_discovery called on shutdown."""
browser = MagicMock(zc={})
with patch(
"homeassistant.components.cast.discovery.pychromecast.start_discovery",
return_value=browser,
) as start_discovery:
# start_discovery should be called with empty config
await async_setup_cast(hass, {})
assert start_discovery.call_count == 1
with patch(
"homeassistant.components.cast.discovery.pychromecast.discovery.stop_discovery"
) as stop_discovery:
# stop discovery should be called on shutdown
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
stop_discovery.assert_called_once_with(browser)
async def test_create_cast_device_without_uuid(hass):
"""Test create a cast device with no UUId does not create an entity."""
info = get_fake_chromecast_info(uuid=None)
cast_device = cast._async_create_cast_device(hass, info)
assert cast_device is None
async def test_create_cast_device_with_uuid(hass):
"""Test create cast devices with UUID creates entities."""
added_casts = hass.data[cast.ADDED_CAST_DEVICES_KEY] = set()
info = get_fake_chromecast_info()
cast_device = cast._async_create_cast_device(hass, info)
assert cast_device is not None
assert info.uuid in added_casts
# Sending second time should not create new entity
cast_device = cast._async_create_cast_device(hass, info)
assert cast_device is None
async def test_replay_past_chromecasts(hass):
"""Test cast platform re-playing past chromecasts when adding new one."""
cast_group1 = get_fake_chromecast_info(host="host1", port=8009, uuid=FakeUUID)
cast_group2 = get_fake_chromecast_info(
host="host2", port=8009, uuid=UUID("9462202c-e747-4af5-a66b-7dce0e1ebc09")
)
zconf_1 = get_fake_zconf(host="host1", port=8009)
zconf_2 = get_fake_zconf(host="host2", port=8009)
discover_cast, add_dev1 = await async_setup_cast_internal_discovery(
hass, config={"uuid": FakeUUID}
)
with patch(
"homeassistant.components.cast.discovery.ChromeCastZeroconf.get_zeroconf",
return_value=zconf_2,
):
discover_cast("service2", cast_group2)
await hass.async_block_till_done()
await hass.async_block_till_done() # having tasks that add jobs
assert add_dev1.call_count == 0
with patch(
"homeassistant.components.cast.discovery.ChromeCastZeroconf.get_zeroconf",
return_value=zconf_1,
):
discover_cast("service1", cast_group1)
await hass.async_block_till_done()
await hass.async_block_till_done() # having tasks that add jobs
assert add_dev1.call_count == 1
add_dev2 = Mock()
await cast._async_setup_platform(hass, {"host": "host2"}, add_dev2)
await hass.async_block_till_done()
assert add_dev2.call_count == 1
async def test_manual_cast_chromecasts_uuid(hass):
"""Test only wanted casts are added for manual configuration."""
cast_1 = get_fake_chromecast_info(host="host_1", uuid=FakeUUID)
cast_2 = get_fake_chromecast_info(host="host_2", uuid=FakeUUID2)
zconf_1 = get_fake_zconf(host="host_1")
zconf_2 = get_fake_zconf(host="host_2")
# Manual configuration of media player with host "configured_host"
discover_cast, add_dev1 = await async_setup_cast_internal_discovery(
hass, config={"uuid": FakeUUID}
)
with patch(
"homeassistant.components.cast.discovery.ChromeCastZeroconf.get_zeroconf",
return_value=zconf_2,
):
discover_cast("service2", cast_2)
await hass.async_block_till_done()
await hass.async_block_till_done() # having tasks that add jobs
assert add_dev1.call_count == 0
with patch(
"homeassistant.components.cast.discovery.ChromeCastZeroconf.get_zeroconf",
return_value=zconf_1,
):
discover_cast("service1", cast_1)
await hass.async_block_till_done()
await hass.async_block_till_done() # having tasks that add jobs
assert add_dev1.call_count == 1
async def test_auto_cast_chromecasts(hass):
"""Test all discovered casts are added for default configuration."""
cast_1 = get_fake_chromecast_info(host="some_host")
cast_2 = get_fake_chromecast_info(host="other_host", uuid=FakeUUID2)
zconf_1 = get_fake_zconf(host="some_host")
zconf_2 = get_fake_zconf(host="other_host")
# Manual configuration of media player with host "configured_host"
discover_cast, add_dev1 = await async_setup_cast_internal_discovery(hass)
with patch(
"homeassistant.components.cast.discovery.ChromeCastZeroconf.get_zeroconf",
return_value=zconf_1,
):
discover_cast("service2", cast_2)
await hass.async_block_till_done()
await hass.async_block_till_done() # having tasks that add jobs
assert add_dev1.call_count == 1
with patch(
"homeassistant.components.cast.discovery.ChromeCastZeroconf.get_zeroconf",
return_value=zconf_2,
):
discover_cast("service1", cast_1)
await hass.async_block_till_done()
await hass.async_block_till_done() # having tasks that add jobs
assert add_dev1.call_count == 2
async def test_update_cast_chromecasts(hass):
"""Test discovery of same UUID twice only adds one cast."""
cast_1 = get_fake_chromecast_info(host="old_host")
cast_2 = get_fake_chromecast_info(host="new_host")
zconf_1 = get_fake_zconf(host="old_host")
zconf_2 = get_fake_zconf(host="new_host")
# Manual configuration of media player with host "configured_host"
discover_cast, add_dev1 = await async_setup_cast_internal_discovery(hass)
with patch(
"homeassistant.components.cast.discovery.ChromeCastZeroconf.get_zeroconf",
return_value=zconf_1,
):
discover_cast("service1", cast_1)
await hass.async_block_till_done()
await hass.async_block_till_done() # having tasks that add jobs
assert add_dev1.call_count == 1
with patch(
"homeassistant.components.cast.discovery.ChromeCastZeroconf.get_zeroconf",
return_value=zconf_2,
):
discover_cast("service2", cast_2)
await hass.async_block_till_done()
await hass.async_block_till_done() # having tasks that add jobs
assert add_dev1.call_count == 1
async def test_entity_availability(hass: HomeAssistantType):
"""Test handling of connection status."""
entity_id = "media_player.speaker"
info = get_fake_chromecast_info()
chromecast = await async_setup_media_player_cast(hass, info)
_, conn_status_cb, _ = get_status_callbacks(chromecast)
state = hass.states.get(entity_id)
assert state.state == "unavailable"
connection_status = MagicMock()
connection_status.status = "CONNECTED"
conn_status_cb(connection_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "unknown"
connection_status = MagicMock()
connection_status.status = "DISCONNECTED"
conn_status_cb(connection_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "unavailable"
async def test_entity_cast_status(hass: HomeAssistantType):
"""Test handling of cast status."""
entity_id = "media_player.speaker"
reg = await hass.helpers.entity_registry.async_get_registry()
info = get_fake_chromecast_info()
full_info = attr.evolve(
info, model_name="google home", friendly_name="Speaker", uuid=FakeUUID
)
chromecast = await async_setup_media_player_cast(hass, info)
cast_status_cb, conn_status_cb, _ = get_status_callbacks(chromecast)
connection_status = MagicMock()
connection_status.status = "CONNECTED"
conn_status_cb(connection_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state is not None
assert state.name == "Speaker"
assert state.state == "unknown"
assert entity_id == reg.async_get_entity_id("media_player", "cast", full_info.uuid)
cast_status = MagicMock()
cast_status.volume_level = 0.5
cast_status.volume_muted = False
cast_status_cb(cast_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.attributes.get("volume_level") == 0.5
assert not state.attributes.get("is_volume_muted")
cast_status = MagicMock()
cast_status.volume_level = 0.2
cast_status.volume_muted = True
cast_status_cb(cast_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.attributes.get("volume_level") == 0.2
assert state.attributes.get("is_volume_muted")
async def test_entity_play_media(hass: HomeAssistantType):
"""Test playing media."""
entity_id = "media_player.speaker"
reg = await hass.helpers.entity_registry.async_get_registry()
info = get_fake_chromecast_info()
full_info = attr.evolve(
info, model_name="google home", friendly_name="Speaker", uuid=FakeUUID
)
chromecast = await async_setup_media_player_cast(hass, info)
_, conn_status_cb, _ = get_status_callbacks(chromecast)
connection_status = MagicMock()
connection_status.status = "CONNECTED"
conn_status_cb(connection_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state is not None
assert state.name == "Speaker"
assert state.state == "unknown"
assert entity_id == reg.async_get_entity_id("media_player", "cast", full_info.uuid)
# Play_media
await common.async_play_media(hass, "audio", "best.mp3", entity_id)
chromecast.media_controller.play_media.assert_called_once_with("best.mp3", "audio")
async def test_entity_play_media_cast(hass: HomeAssistantType, quick_play_mock):
"""Test playing media with cast special features."""
entity_id = "media_player.speaker"
reg = await hass.helpers.entity_registry.async_get_registry()
info = get_fake_chromecast_info()
full_info = attr.evolve(
info, model_name="google home", friendly_name="Speaker", uuid=FakeUUID
)
chromecast = await async_setup_media_player_cast(hass, info)
_, conn_status_cb, _ = get_status_callbacks(chromecast)
connection_status = MagicMock()
connection_status.status = "CONNECTED"
conn_status_cb(connection_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state is not None
assert state.name == "Speaker"
assert state.state == "unknown"
assert entity_id == reg.async_get_entity_id("media_player", "cast", full_info.uuid)
# Play_media - cast with app ID
await common.async_play_media(hass, "cast", '{"app_id": "abc123"}', entity_id)
chromecast.start_app.assert_called_once_with("abc123")
# Play_media - cast with app name (quick play)
await common.async_play_media(hass, "cast", '{"app_name": "youtube"}', entity_id)
quick_play_mock.assert_called_once_with(ANY, "youtube", {})
async def test_entity_play_media_cast_invalid(hass, caplog, quick_play_mock):
"""Test playing media."""
entity_id = "media_player.speaker"
reg = await hass.helpers.entity_registry.async_get_registry()
info = get_fake_chromecast_info()
full_info = attr.evolve(
info, model_name="google home", friendly_name="Speaker", uuid=FakeUUID
)
chromecast = await async_setup_media_player_cast(hass, info)
_, conn_status_cb, _ = get_status_callbacks(chromecast)
connection_status = MagicMock()
connection_status.status = "CONNECTED"
conn_status_cb(connection_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state is not None
assert state.name == "Speaker"
assert state.state == "unknown"
assert entity_id == reg.async_get_entity_id("media_player", "cast", full_info.uuid)
# play_media - media_type cast with invalid JSON
with pytest.raises(json.decoder.JSONDecodeError):
await common.async_play_media(hass, "cast", '{"app_id": "abc123"', entity_id)
assert "Invalid JSON in media_content_id" in caplog.text
chromecast.start_app.assert_not_called()
quick_play_mock.assert_not_called()
# Play_media - media_type cast with extra keys
await common.async_play_media(
hass, "cast", '{"app_id": "abc123", "extra": "data"}', entity_id
)
assert "Extra keys dict_keys(['extra']) were ignored" in caplog.text
chromecast.start_app.assert_called_once_with("abc123")
quick_play_mock.assert_not_called()
# Play_media - media_type cast with unsupported app
quick_play_mock.side_effect = NotImplementedError()
await common.async_play_media(hass, "cast", '{"app_name": "unknown"}', entity_id)
quick_play_mock.assert_called_once_with(ANY, "unknown", {})
assert "App unknown not supported" in caplog.text
async def test_entity_play_media_sign_URL(hass: HomeAssistantType):
"""Test playing media."""
entity_id = "media_player.speaker"
await async_process_ha_core_config(
hass,
{"external_url": "http://example.com:8123"},
)
info = get_fake_chromecast_info()
chromecast = await async_setup_media_player_cast(hass, info)
_, conn_status_cb, _ = get_status_callbacks(chromecast)
connection_status = MagicMock()
connection_status.status = "CONNECTED"
conn_status_cb(connection_status)
await hass.async_block_till_done()
# Play_media
await common.async_play_media(hass, "audio", "/best.mp3", entity_id)
chromecast.media_controller.play_media.assert_called_once_with(ANY, "audio")
assert chromecast.media_controller.play_media.call_args[0][0].startswith(
"http://example.com:8123/best.mp3?authSig="
)
async def test_entity_media_content_type(hass: HomeAssistantType):
"""Test various content types."""
entity_id = "media_player.speaker"
reg = await hass.helpers.entity_registry.async_get_registry()
info = get_fake_chromecast_info()
full_info = attr.evolve(
info, model_name="google home", friendly_name="Speaker", uuid=FakeUUID
)
chromecast = await async_setup_media_player_cast(hass, info)
_, conn_status_cb, media_status_cb = get_status_callbacks(chromecast)
connection_status = MagicMock()
connection_status.status = "CONNECTED"
conn_status_cb(connection_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state is not None
assert state.name == "Speaker"
assert state.state == "unknown"
assert entity_id == reg.async_get_entity_id("media_player", "cast", full_info.uuid)
media_status = MagicMock(images=None)
media_status.media_is_movie = False
media_status.media_is_musictrack = False
media_status.media_is_tvshow = False
media_status_cb(media_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.attributes.get("media_content_type") is None
media_status.media_is_tvshow = True
media_status_cb(media_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.attributes.get("media_content_type") == "tvshow"
media_status.media_is_tvshow = False
media_status.media_is_musictrack = True
media_status_cb(media_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.attributes.get("media_content_type") == "music"
media_status.media_is_musictrack = True
media_status.media_is_movie = True
media_status_cb(media_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.attributes.get("media_content_type") == "movie"
async def test_entity_control(hass: HomeAssistantType):
"""Test various device and media controls."""
entity_id = "media_player.speaker"
reg = await hass.helpers.entity_registry.async_get_registry()
info = get_fake_chromecast_info()
full_info = attr.evolve(
info, model_name="google home", friendly_name="Speaker", uuid=FakeUUID
)
chromecast = await async_setup_media_player_cast(hass, info)
_, conn_status_cb, media_status_cb = get_status_callbacks(chromecast)
connection_status = MagicMock()
connection_status.status = "CONNECTED"
conn_status_cb(connection_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state is not None
assert state.name == "Speaker"
assert state.state == "unknown"
assert entity_id == reg.async_get_entity_id("media_player", "cast", full_info.uuid)
# Turn on
await common.async_turn_on(hass, entity_id)
chromecast.play_media.assert_called_once_with(
"https://www.home-assistant.io/images/cast/splash.png", ANY
)
chromecast.quit_app.reset_mock()
# Turn off
await common.async_turn_off(hass, entity_id)
chromecast.quit_app.assert_called_once_with()
# Mute
await common.async_mute_volume(hass, True, entity_id)
chromecast.set_volume_muted.assert_called_once_with(True)
# Volume
await common.async_set_volume_level(hass, 0.33, entity_id)
chromecast.set_volume.assert_called_once_with(0.33)
# Media play
await common.async_media_play(hass, entity_id)
chromecast.media_controller.play.assert_called_once_with()
# Media pause
await common.async_media_pause(hass, entity_id)
chromecast.media_controller.pause.assert_called_once_with()
# Media previous
await common.async_media_previous_track(hass, entity_id)
chromecast.media_controller.queue_prev.assert_not_called()
# Media next
await common.async_media_next_track(hass, entity_id)
chromecast.media_controller.queue_next.assert_not_called()
# Media seek
await common.async_media_seek(hass, 123, entity_id)
chromecast.media_controller.seek.assert_not_called()
# Enable support for queue and seek
media_status = MagicMock(images=None)
media_status.supports_queue_next = True
media_status.supports_seek = True
media_status_cb(media_status)
await hass.async_block_till_done()
# Media previous
await common.async_media_previous_track(hass, entity_id)
chromecast.media_controller.queue_prev.assert_called_once_with()
# Media next
await common.async_media_next_track(hass, entity_id)
chromecast.media_controller.queue_next.assert_called_once_with()
# Media seek
await common.async_media_seek(hass, 123, entity_id)
chromecast.media_controller.seek.assert_called_once_with(123)
async def test_entity_media_states(hass: HomeAssistantType):
"""Test various entity media states."""
entity_id = "media_player.speaker"
reg = await hass.helpers.entity_registry.async_get_registry()
info = get_fake_chromecast_info()
full_info = attr.evolve(
info, model_name="google home", friendly_name="Speaker", uuid=FakeUUID
)
chromecast = await async_setup_media_player_cast(hass, info)
_, conn_status_cb, media_status_cb = get_status_callbacks(chromecast)
connection_status = MagicMock()
connection_status.status = "CONNECTED"
conn_status_cb(connection_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state is not None
assert state.name == "Speaker"
assert state.state == "unknown"
assert entity_id == reg.async_get_entity_id("media_player", "cast", full_info.uuid)
media_status = MagicMock(images=None)
media_status.player_is_playing = True
media_status_cb(media_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "playing"
media_status.player_is_playing = False
media_status.player_is_paused = True
media_status_cb(media_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "paused"
media_status.player_is_paused = False
media_status.player_is_idle = True
media_status_cb(media_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "idle"
media_status.player_is_idle = False
chromecast.is_idle = True
media_status_cb(media_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "off"
chromecast.is_idle = False
media_status_cb(media_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "unknown"
async def test_url_replace(hass: HomeAssistantType):
"""Test functionality of replacing URL for HTTPS."""
entity_id = "media_player.speaker"
reg = await hass.helpers.entity_registry.async_get_registry()
info = get_fake_chromecast_info()
full_info = attr.evolve(
info, model_name="google home", friendly_name="Speaker", uuid=FakeUUID
)
chromecast = await async_setup_media_player_cast(hass, info)
_, conn_status_cb, media_status_cb = get_status_callbacks(chromecast)
connection_status = MagicMock()
connection_status.status = "CONNECTED"
conn_status_cb(connection_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state is not None
assert state.name == "Speaker"
assert state.state == "unknown"
assert entity_id == reg.async_get_entity_id("media_player", "cast", full_info.uuid)
class FakeHTTPImage:
url = "http://example.com/test.png"
class FakeHTTPSImage:
url = "https://example.com/test.png"
media_status = MagicMock(images=[FakeHTTPImage()])
media_status.player_is_playing = True
media_status_cb(media_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.attributes.get("entity_picture") == "//example.com/test.png"
media_status.images = [FakeHTTPSImage()]
media_status_cb(media_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.attributes.get("entity_picture") == "https://example.com/test.png"
async def test_group_media_states(hass, mz_mock):
"""Test media states are read from group if entity has no state."""
entity_id = "media_player.speaker"
reg = await hass.helpers.entity_registry.async_get_registry()
info = get_fake_chromecast_info()
full_info = attr.evolve(
info, model_name="google home", friendly_name="Speaker", uuid=FakeUUID
)
chromecast = await async_setup_media_player_cast(hass, info)
_, conn_status_cb, media_status_cb, group_media_status_cb = get_status_callbacks(
chromecast, mz_mock
)
connection_status = MagicMock()
connection_status.status = "CONNECTED"
conn_status_cb(connection_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state is not None
assert state.name == "Speaker"
assert state.state == "unknown"
assert entity_id == reg.async_get_entity_id("media_player", "cast", full_info.uuid)
group_media_status = MagicMock(images=None)
player_media_status = MagicMock(images=None)
# Player has no state, group is playing -> Should report 'playing'
group_media_status.player_is_playing = True
group_media_status_cb(str(FakeGroupUUID), group_media_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "playing"
# Player is paused, group is playing -> Should report 'paused'
player_media_status.player_is_playing = False
player_media_status.player_is_paused = True
media_status_cb(player_media_status)
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "paused"
# Player is in unknown state, group is playing -> Should report 'playing'
player_media_status.player_state = "UNKNOWN"
media_status_cb(player_media_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "playing"
async def test_group_media_control(hass, mz_mock):
"""Test media states are read from group if entity has no state."""
entity_id = "media_player.speaker"
reg = await hass.helpers.entity_registry.async_get_registry()
info = get_fake_chromecast_info()
full_info = attr.evolve(
info, model_name="google home", friendly_name="Speaker", uuid=FakeUUID
)
chromecast = await async_setup_media_player_cast(hass, info)
_, conn_status_cb, media_status_cb, group_media_status_cb = get_status_callbacks(
chromecast, mz_mock
)
connection_status = MagicMock()
connection_status.status = "CONNECTED"
conn_status_cb(connection_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state is not None
assert state.name == "Speaker"
assert state.state == "unknown"
assert entity_id == reg.async_get_entity_id("media_player", "cast", full_info.uuid)
group_media_status = MagicMock(images=None)
player_media_status = MagicMock(images=None)
# Player has no state, group is playing -> Should forward calls to group
group_media_status.player_is_playing = True
group_media_status_cb(str(FakeGroupUUID), group_media_status)
await common.async_media_play(hass, entity_id)
grp_media = mz_mock.get_multizone_mediacontroller(str(FakeGroupUUID))
assert grp_media.play.called
assert not chromecast.media_controller.play.called
# Player is paused, group is playing -> Should not forward
player_media_status.player_is_playing = False
player_media_status.player_is_paused = True
media_status_cb(player_media_status)
await common.async_media_pause(hass, entity_id)
grp_media = mz_mock.get_multizone_mediacontroller(str(FakeGroupUUID))
assert not grp_media.pause.called
assert chromecast.media_controller.pause.called
# Player is in unknown state, group is playing -> Should forward to group
player_media_status.player_state = "UNKNOWN"
media_status_cb(player_media_status)
await common.async_media_stop(hass, entity_id)
grp_media = mz_mock.get_multizone_mediacontroller(str(FakeGroupUUID))
assert grp_media.stop.called
assert not chromecast.media_controller.stop.called
# Verify play_media is not forwarded
await common.async_play_media(hass, "music", "best.mp3", entity_id)
assert not grp_media.play_media.called
assert chromecast.media_controller.play_media.called
async def test_failed_cast_on_idle(hass, caplog):
"""Test no warning when unless player went idle with reason "ERROR"."""
info = get_fake_chromecast_info()
chromecast = await async_setup_media_player_cast(hass, info)
_, _, media_status_cb = get_status_callbacks(chromecast)
media_status = MagicMock(images=None)
media_status.player_is_idle = False
media_status.idle_reason = "ERROR"
media_status.content_id = "http://example.com:8123/tts.mp3"
media_status_cb(media_status)
assert "Failed to cast media" not in caplog.text
media_status = MagicMock(images=None)
media_status.player_is_idle = True
media_status.idle_reason = "Other"
media_status.content_id = "http://example.com:8123/tts.mp3"
media_status_cb(media_status)
assert "Failed to cast media" not in caplog.text
media_status = MagicMock(images=None)
media_status.player_is_idle = True
media_status.idle_reason = "ERROR"
media_status.content_id = "http://example.com:8123/tts.mp3"
media_status_cb(media_status)
assert "Failed to cast media http://example.com:8123/tts.mp3." in caplog.text
async def test_failed_cast_other_url(hass, caplog):
"""Test warning when casting from internal_url fails."""
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(
hass,
tts.DOMAIN,
{tts.DOMAIN: {"platform": "demo", "base_url": "http://example.local:8123"}},
)
info = get_fake_chromecast_info()
chromecast = await async_setup_media_player_cast(hass, info)
_, _, media_status_cb = get_status_callbacks(chromecast)
media_status = MagicMock(images=None)
media_status.player_is_idle = True
media_status.idle_reason = "ERROR"
media_status.content_id = "http://example.com:8123/tts.mp3"
media_status_cb(media_status)
assert "Failed to cast media http://example.com:8123/tts.mp3." in caplog.text
async def test_failed_cast_internal_url(hass, caplog):
"""Test warning when casting from internal_url fails."""
await async_process_ha_core_config(
hass,
{"internal_url": "http://example.local:8123"},
)
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(
hass, tts.DOMAIN, {tts.DOMAIN: {"platform": "demo"}}
)
info = get_fake_chromecast_info()
chromecast = await async_setup_media_player_cast(hass, info)
_, _, media_status_cb = get_status_callbacks(chromecast)
media_status = MagicMock(images=None)
media_status.player_is_idle = True
media_status.idle_reason = "ERROR"
media_status.content_id = "http://example.local:8123/tts.mp3"
media_status_cb(media_status)
assert (
"Failed to cast media http://example.local:8123/tts.mp3 from internal_url"
in caplog.text
)
async def test_failed_cast_external_url(hass, caplog):
"""Test warning when casting from external_url fails."""
await async_process_ha_core_config(
hass,
{"external_url": "http://example.com:8123"},
)
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(
hass,
tts.DOMAIN,
{tts.DOMAIN: {"platform": "demo", "base_url": "http://example.com:8123"}},
)
info = get_fake_chromecast_info()
chromecast = await async_setup_media_player_cast(hass, info)
_, _, media_status_cb = get_status_callbacks(chromecast)
media_status = MagicMock(images=None)
media_status.player_is_idle = True
media_status.idle_reason = "ERROR"
media_status.content_id = "http://example.com:8123/tts.mp3"
media_status_cb(media_status)
assert (
"Failed to cast media http://example.com:8123/tts.mp3 from external_url"
in caplog.text
)
async def test_failed_cast_tts_base_url(hass, caplog):
"""Test warning when casting from tts.base_url fails."""
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(
hass,
tts.DOMAIN,
{tts.DOMAIN: {"platform": "demo", "base_url": "http://example.local:8123"}},
)
info = get_fake_chromecast_info()
chromecast = await async_setup_media_player_cast(hass, info)
_, _, media_status_cb = get_status_callbacks(chromecast)
media_status = MagicMock(images=None)
media_status.player_is_idle = True
media_status.idle_reason = "ERROR"
media_status.content_id = "http://example.local:8123/tts.mp3"
media_status_cb(media_status)
assert (
"Failed to cast media http://example.local:8123/tts.mp3 from tts.base_url"
in caplog.text
)
async def test_disconnect_on_stop(hass: HomeAssistantType):
"""Test cast device disconnects socket on stop."""
info = get_fake_chromecast_info()
chromecast = await async_setup_media_player_cast(hass, info)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
assert chromecast.disconnect.call_count == 1
async def test_entry_setup_no_config(hass: HomeAssistantType):
"""Test setting up entry with no config.."""
await async_setup_component(hass, "cast", {})
await hass.async_block_till_done()
with patch(
"homeassistant.components.cast.media_player._async_setup_platform",
) as mock_setup:
await cast.async_setup_entry(hass, MockConfigEntry(), None)
assert len(mock_setup.mock_calls) == 1
assert mock_setup.mock_calls[0][1][1] == {}
async def test_entry_setup_single_config(hass: HomeAssistantType):
"""Test setting up entry and having a single config option."""
await async_setup_component(
hass, "cast", {"cast": {"media_player": {"uuid": "bla"}}}
)
await hass.async_block_till_done()
with patch(
"homeassistant.components.cast.media_player._async_setup_platform",
) as mock_setup:
await cast.async_setup_entry(hass, MockConfigEntry(), None)
assert len(mock_setup.mock_calls) == 1
assert mock_setup.mock_calls[0][1][1] == {"uuid": "bla"}
async def test_entry_setup_list_config(hass: HomeAssistantType):
"""Test setting up entry and having multiple config options."""
await async_setup_component(
hass, "cast", {"cast": {"media_player": [{"uuid": "bla"}, {"uuid": "blu"}]}}
)
await hass.async_block_till_done()
with patch(
"homeassistant.components.cast.media_player._async_setup_platform",
) as mock_setup:
await cast.async_setup_entry(hass, MockConfigEntry(), None)
assert len(mock_setup.mock_calls) == 2
assert mock_setup.mock_calls[0][1][1] == {"uuid": "bla"}
assert mock_setup.mock_calls[1][1][1] == {"uuid": "blu"}
async def test_entry_setup_platform_not_ready(hass: HomeAssistantType):
"""Test failed setting up entry will raise PlatformNotReady."""
await async_setup_component(
hass, "cast", {"cast": {"media_player": {"uuid": "bla"}}}
)
await hass.async_block_till_done()
with patch(
"homeassistant.components.cast.media_player._async_setup_platform",
side_effect=Exception,
) as mock_setup:
with pytest.raises(PlatformNotReady):
await cast.async_setup_entry(hass, MockConfigEntry(), None)
assert len(mock_setup.mock_calls) == 1
assert mock_setup.mock_calls[0][1][1] == {"uuid": "bla"}
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals, division, print_function
import os.path
import collections
from pymatgen.util.testing import PymatgenTest
from pymatgen.io.abinit.pseudos import *
_test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files', "abinit")
def ref_file(filename):
return os.path.join(_test_dir, filename)
def ref_files(*filenames):
return list(map(ref_file, filenames))
class PseudoTestCase(PymatgenTest):
def setUp(self):
nc_pseudo_fnames = collections.defaultdict(list)
nc_pseudo_fnames["Si"] = ref_files("14si.pspnc", "14si.4.hgh", "14-Si.LDA.fhi")
self.nc_pseudos = collections.defaultdict(list)
for symbol, fnames in nc_pseudo_fnames.items():
for fname in fnames:
root, ext = os.path.splitext(fname)
pseudo = Pseudo.from_file(fname)
self.nc_pseudos[symbol].append(pseudo)
# Save the pseudo as instance attribute whose name
# is constructed with the rule: symbol_ppformat
attr_name = symbol + "_" + ext[1:]
if hasattr(self, attr_name):
raise RuntimeError("self has already the attribute %s" % attr_name)
setattr(self, attr_name, pseudo)
def test_nc_pseudos(self):
"""Test norm-conserving pseudopotentials"""
for symbol, pseudos in self.nc_pseudos.items():
for pseudo in pseudos:
assert repr(pseudo)
assert str(pseudo)
self.assertTrue(pseudo.isnc)
self.assertFalse(pseudo.ispaw)
self.assertEqual(pseudo.Z, 14)
self.assertEqual(pseudo.symbol, symbol)
self.assertEqual(pseudo.Z_val, 4)
self.assertGreaterEqual(pseudo.nlcc_radius, 0.0)
# Test pickle
self.serialize_with_pickle(pseudo, test_eq=False)
# Test MSONable
self.assertMSONable(pseudo)
# HGH pseudos
pseudo = self.Si_hgh
self.assertFalse(pseudo.has_nlcc)
self.assertEqual(pseudo.l_max, 1)
self.assertEqual(pseudo.l_local, 0)
assert not pseudo.supports_soc
assert self.Si_hgh.md5 is not None
assert self.Si_hgh == self.Si_hgh
# TM pseudos
pseudo = self.Si_pspnc
self.assertTrue(pseudo.has_nlcc)
self.assertEqual(pseudo.l_max, 2)
self.assertEqual(pseudo.l_local, 2)
assert not pseudo.supports_soc
assert self.Si_hgh != self.Si_pspnc
# FHI pseudos
pseudo = self.Si_fhi
self.assertFalse(pseudo.has_nlcc)
self.assertEqual(pseudo.l_max, 3)
self.assertEqual(pseudo.l_local, 2)
assert not pseudo.supports_soc
# Test PseudoTable.
table = PseudoTable(self.nc_pseudos["Si"])
assert repr(table)
assert str(table)
self.assertTrue(table.allnc)
self.assertTrue(not table.allpaw)
self.assertFalse(not table.is_complete)
assert len(table) == 3
assert len(table[14]) == 3
assert len(table.select_symbols("Si")) == 3
assert table.zlist == [14]
# Test pickle
self.serialize_with_pickle(table, test_eq=False)
def test_pawxml_pseudos(self):
"""Test O.GGA_PBE-JTH-paw.xml."""
oxygen = Pseudo.from_file(ref_file("O.GGA_PBE-JTH-paw.xml"))
assert repr(oxygen)
assert str(oxygen)
assert isinstance(oxygen.as_dict(), dict)
self.assertTrue(oxygen.ispaw)
self.assertTrue(oxygen.symbol == "O" and
(oxygen.Z, oxygen.core, oxygen.valence) == (8, 2, 6),
oxygen.Z_val == 6,
)
assert oxygen.xc.type == "GGA" and oxygen.xc.name == "PBE"
assert oxygen.supports_soc
assert oxygen.md5 is not None
self.assertAlmostEqual(oxygen.paw_radius, 1.4146523028)
# Test pickle
new_objs = self.serialize_with_pickle(oxygen, test_eq=False)
# Test MSONable
self.assertMSONable(oxygen)
for o in new_objs:
self.assertTrue(o.ispaw)
self.assertTrue(o.symbol == "O" and
(o.Z, o.core, o.valence) == (8, 2, 6),
o.Z_val == 6,
)
self.assertAlmostEqual(o.paw_radius, 1.4146523028)
def test_oncvpsp_pseudo_sr(self):
"""
Test the ONCVPSP Ge pseudo (scalar relativistic version).
"""
ger = Pseudo.from_file(ref_file("ge.oncvpsp"))
assert repr(ger)
assert str(ger)
assert isinstance(ger.as_dict(), dict)
ger.as_tmpfile()
self.assertTrue(ger.symbol == "Ge")
self.assertEqual(ger.Z, 32.0)
self.assertEqual(ger.Z_val, 4.0)
self.assertTrue(ger.isnc)
self.assertFalse(ger.ispaw)
self.assertEqual(ger.l_max, 2)
self.assertEqual(ger.l_local, 4)
self.assertEqual(ger.rcore, None)
assert not ger.supports_soc
# Data persistence
self.serialize_with_pickle(ger, test_eq=False)
self.assertMSONable(ger)
def test_oncvpsp_pseudo_fr(self):
"""
Test the ONCVPSP Pb pseudo (relativistic version with SO).
"""
pb = Pseudo.from_file(ref_file("Pb-d-3_r.psp8"))
repr(pb)
str(pb)
# Data persistence
self.serialize_with_pickle(pb, test_eq=False)
self.assertMSONable(pb)
self.assertTrue(pb.symbol == "Pb")
self.assertEqual(pb.Z, 82.0)
self.assertEqual(pb.Z_val, 14.0)
self.assertTrue(pb.isnc)
self.assertFalse(pb.ispaw)
self.assertEqual(pb.l_max, 2)
self.assertEqual(pb.l_local, 4)
self.assertTrue(pb.supports_soc)
class PseudoTableTest(PymatgenTest):
def test_methods(self):
"""Test PseudoTable methods"""
table = PseudoTable(ref_files("14si.pspnc", "14si.4.hgh", "14-Si.LDA.fhi"))
assert str(table)
assert len(table) == 3
for pseudo in table:
assert pseudo.isnc
assert table.allnc and not table.allpaw
assert table.zlist == [14]
# Data persistence
self.serialize_with_pickle(table, test_eq=False)
d = table.as_dict()
PseudoTable.from_dict(d)
self.assertMSONable(table)
selected = table.select_symbols("Si")
assert len(selected) == len(table) and selected.__class__ is table.__class__
with self.assertRaises(ValueError):
table.pseudos_with_symbols("Si")
|
|
# -*- coding: utf-8 -*-
"""
bofhirdev
FILE: application
Created: 11/5/15 10:50 PM
"""
# from PIL import Image
from braces.views import MessageMixin
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse_lazy
from django.forms.models import model_to_dict
from django.http import HttpResponseRedirect
from django.shortcuts import (render,
render_to_response,
get_object_or_404)
from django.template import RequestContext
from django.views.generic import (DetailView,
UpdateView,
DeleteView)
from django.views.generic.edit import CreateView
from django.views.generic.list import ListView
from oauth2_provider.generators import (generate_client_id,
generate_client_secret)
from accounts.utils import User_From_Request
from appmgmt.forms.application import (ApplicationForm,
Application_Secret_Form,
Application_Secret,
ApplicationDeleteForm)
from appmgmt.models import (BBApplication,
Organization,
Developer)
__author__ = 'Mark Scrimshire:@ekivemark'
@login_required
def My_Application_Update(request, pk):
"""
Edit a BBApplication entry
:param request:
:return:
"""
# Accounts.utils - get user model and key field and return user or None
u = User_From_Request(request.user)
app = BBApplication.objects.get(pk=pk)
if request.method == 'POST':
form = ApplicationForm(request.POST or None,
request.FILES or None,
instance=app)
if form.is_valid():
# Preserve the key and secret
app = form.save(commit=False)
app.owner = u
app.user = u
app.organization = u.organization
if settings.DEBUG:
print("App:", app )
print("logo:", app.logo)
print("form logo:", form.cleaned_data['logo'])
app.save()
# Check
return HttpResponseRedirect(reverse_lazy('appmgmt:manage_applications'))
else:
form = ApplicationForm(instance=app,
initial={'name': app.name,
'logo': app.logo,
'about': app.about,
'privacy_url': app.privacy_url,
'support_url': app.support_url,
'client_type': app.client_type,
'authorization_grant_type': app.authorization_grant_type,
}
)
return render(request, 'appmgmt/bbapplication_form.html',
{'form': form,
'application': app,
'owner': u,
'organization': u.organization,})
class MyApplicationListView(ListView):
"""
View for Applications
"""
model = BBApplication
template_name = 'appmgmt/application_list.html'
def get_queryset(self):
if settings.DEBUG:
print("Queryset User:", self.request.user)
qs = super(MyApplicationListView, self).get_queryset()
return qs.filter(organization=self.request.user.organization).values()
class MyApplicationDetailView(DetailView):
"""
Display the Application Detail View for a single item in the
application list
"""
model = BBApplication
fields = ['name', 'about', 'logo',
'privacy_url', 'support_url',
'redirect_uris', 'client_type',
'authorization_grant_type',
]
context_object_name = 'application'
def get_context_data(self, **kwargs):
# call the base implementation first to get a context
context = super(MyApplicationDetailView, self).get_context_data(**kwargs)
# add in a QuerySet of all Applications
if settings.DEBUG:
print("Context:", context)
return context
class MyApplicationUpdateView(UpdateView):
"""
Edit view for Application
"""
model = BBApplication
fields = ['name', 'about','logo',
'privacy_url', 'support_url',
'redirect_uris' ,
'client_type', 'authorization_grant_type',
]
context_object_name = "application"
def get_context_data(self, **kwargs):
# call the base implementation first to get a context
context = super(MyApplicationUpdateView, self).get_context_data(**kwargs)
# add in a QuerySet of all Applications
if settings.DEBUG:
print("Context:", context)
return context
class MyApplicationCreate(CreateView):
"""
Create Application
"""
model = BBApplication
form_class = ApplicationForm
# fields = ['name', 'about',
# 'logo', 'privacy_url', 'support_url',
# 'redirect_uris', 'client_type',
# 'authorization_grant_type',
# ]
context_object_name = 'application'
def get_context_data(self, **kwargs):
context = super(MyApplicationCreate, self).get_context_data(**kwargs)
context['organization'] = self.request.user.organization
context['owner'] = self.request.user
return context
def get_object(self):
return get_object_or_404(BBApplication, pk=self.kwargs.get("pk"))
def get_initial(self):
if self.request.user.is_authenticated():
print("user is:", self.request.user)
org = Organization.objects.filter(name=self.request.user.organization)
if settings.DEBUG:
print("org:",org )
self.initial.update({ 'owner': self.request.user,
'organization': org,
'user': self.request.user
})
return self.initial
def post(self, request, *args, **kwargs):
form = self.get_form()
if self.request.user.is_authenticated():
print("user is:", self.request.user)
print("self:", self)
print("form:", form)
if form.is_valid():
if settings.DEBUG:
print("logo:", form.instance.logo)
form.instance.user = self.request.user
form.instance.owner = self.request.user
form.instance.organization = self.request.user.organization
form.save()
form.organization = self.request.user.organization
return super(MyApplicationCreate, self).form_valid(form)
return super(MyApplicationCreate, self).form_valid(form)
return HttpResponseRedirect(self.success_url)
# def get(self, request, *args, **kwargs):
#
# if self.request.user.is_authenticated():
# print("user is:", self.request.user)
# u = self.request.user
# org = Organization.objects.filter(name=u.organization)
# if settings.DEBUG:
# print("u:", u)
# print("org:", org)
# # self.object = self.get_object()
# success_url = reverse_lazy('appmgmt:application_view')
# if settings.DEBUG:
# print("object:", self.get_object().id)
# print("success goes to:",success_url)
#
# return HttpResponseRedirect(success_url,
# kwargs={'pk': self.get_object().id})
def Application_Update_Secret(request, pk):
"""
Replace client_id and client_secret
:param request:
:param pk:
:return:
"""
if request.method == 'POST':
a=BBApplication.objects.get(pk=pk)
form = Application_Secret(request.POST)
if form.is_valid():
if form.cleaned_data['confirm'] == '1':
a.client_id = generate_client_id()
a.client_secret = generate_client_secret()
a.save()
messages.success(request,"Client Id and Secret updated")
if settings.DEBUG:
print("Confirm:", form.cleaned_data['confirm'])
print("Id:", a.client_id)
print("Secret:", a.client_secret)
return HttpResponseRedirect(reverse_lazy('appmgmt:manage_applications'))
else:
if settings.DEBUG:
print("form has a problem")
else:
a=BBApplication.objects.get(pk=pk)
if settings.DEBUG:
print("BBApplication:", a)
form = Application_Secret(initial={'confirm': '0'})
return render_to_response('appmgmt/application_secret_form.html',
RequestContext(request,{'form': form, 'application': a,}))
class MyApplicationDeleteView(MessageMixin, DeleteView):
"""
Delete an Application
"""
model = BBApplication
context_object_name = 'application'
#form_class = ApplicationDeleteForm
success_message = "Application Deleted Successfully"
def get_context_data(self, **kwargs):
context = super(MyApplicationDeleteView, self).get_context_data(**kwargs)
context.update({'organization': self.request.user.organization,
'owner': self.request.user,
'key': self.object.id,
'verb': "delete",
# 'application': BBApplication.objects.get(pk=self.object.id)
})
if settings.DEBUG:
print("Context:", context, "kwargs", kwargs, "self", self.object.id,
)
return context
def get_queryset(self):
qs = super(MyApplicationDeleteView, self).get_queryset()
return qs.filter(owner=self.request.user)
@login_required()
def Manage_Applications(request):
# Manage Organization's Applications entry page
account_model = get_user_model()
access_field = settings.USERNAME_FIELD
user = account_model.objects.get(**{access_field:request.user})
org_name = user.organization
if settings.DEBUG:
print(settings.APPLICATION_TITLE,
"in accounts.views.manage_account")
print("with Organization Record:", org_name)
if org_name == None:
return HttpResponseRedirect(reverse_lazy('accounts:manage_account'))
try:
org = Organization.objects.get(name=org_name)
except Organization.DoesNotExist:
org = {}
return HttpResponseRedirect(reverse_lazy("accounts:manage_account"))
# get my Developer role
try:
my_dev = Developer.objects.get(member=user)
my_role = my_dev.role
if my_dev.role in ['1','2']:
org_owner = True
else:
org_owner = False
except Developer.DoesNotExist:
my_dev = {}
my_role = ""
org_owner = False
# Get the Applications for an Organization
try:
my_apps = BBApplication.objects.filter(organization=org_name).order_by('name')
except BBApplication.DoesNotExist:
my_apps = {}
if settings.DEBUG:
print("User:", user)
print("Organization:", org, "[", org.name, "]")
print("My_apps :", my_apps)
print("Media is here:[ROOT]", settings.MEDIA_ROOT,
"[URL]", settings.MEDIA_URL)
context = {"user": user,
"org": org,
"org_owner": org_owner,
"my_apps": my_apps,
}
# Using manage_applications template
return render_to_response('appmgmt/manage_applications.html',
RequestContext(request, context, ))
|
|
#!/usr/bin/env python
from __future__ import division
import copy
import imp
import os
import traceback
from optparse import OptionParser
import cv2
import rospy
from cv_bridge import CvBridge
from cv_bridge import CvBridgeError
from sensor_msgs.msg import Image
from multi_tracker.msg import DeltaVid
# for basler ace cameras, use camera_aravis
# https://github.com/ssafarik/camera_aravis
# rosrun camera_aravis camnode
# default image: /camera/image_raw
# for firefley cameras, camera1394 does not provide timestamps but otherwise
# works. use point grey drivers.
# http://wiki.ros.org/pointgrey_camera_driver
# rosrun pointgrey_camera_driver camera_node
# default image: /camera/image_mono
from distutils.version import LooseVersion, StrictVersion
print 'Using open cv: ' + cv2.__version__
if StrictVersion(cv2.__version__.split('-')[0]) >= StrictVersion("3.0.0"):
OPENCV_VERSION = 3
print 'Open CV 3'
else:
OPENCV_VERSION = 2
print 'Open CV 2'
# The main tracking class, a ROS node
class DeCompressor:
def __init__(self, topic_in, topic_out, directory, config=None, mode='mono',
saveto='', fps=5.0):
'''
Default image_topic for:
Basler ace cameras with camera_aravis driver: camera/image_raw
Pt Grey Firefly cameras with pt grey driver : camera/image_mono
'''
# initialize the node
rospy.init_node('delta_decompressor')
# Publishers - publish contours
self.pubDeltaVid = rospy.Publisher(topic_out, Image, queue_size=30)
self.subDeltaVid = rospy.Subscriber(topic_in, DeltaVid,
self.delta_image_callback, queue_size=30)
self.cvbridge = CvBridge()
#rospy.get_param('multi_tracker/delta_video/directory', default='')
self.directory = directory
self.backgroundImage = None
self.background_img_filename = 'none'
self.config = config
self.mode = mode
if len(saveto) > 0:
self.saveto = saveto
self.videowriter = None
else:
self.saveto = None
self.videowriter = None
self.fps = fps
sim_time = rospy.get_param('/use_sim_time', False)
# TODO TODO How to get original images (to republish w/o bg image in
# this case). I should... right? Just integrate r.t. decompression in to
# compressor?
self.real_time = (not sim_time)
def delta_image_callback(self, delta_vid):
if (self.background_img_filename != delta_vid.background_image or
self.backgroundImage is None):
self.background_img_filename = delta_vid.background_image
basename = os.path.basename(self.background_img_filename)
directory_with_basename = os.path.join(self.directory, basename)
try:
if not os.path.exists(directory_with_basename):
raise IOError('background image file ' +
directory_with_basename + ' did not exist')
if not os.path.getsize(directory_with_basename) > 0:
raise IOError('background image file ' +
directory_with_basename + ' was empty')
except IOError:
traceback.print_exc()
# this (should) just shutdown the current node, which can be
# marked as required in the launch file (bringing everything
# down if it goes down)
rospy.signal_shutdown(
'cannot proceed without background images.')
self.backgroundImage = cv2.imread(directory_with_basename,
cv2.CV_8UC1)
try:
# for hydro
self.backgroundImage = self.backgroundImage.reshape(
[self.backgroundImage.shape[0], self.backgroundImage[1], 1])
# TODO handle cases by version explicitly or at least specify
# expected error
except:
# for indigo
pass
if self.backgroundImage is not None:
new_image = copy.copy(self.backgroundImage)
if delta_vid.values is not None:
if len(delta_vid.values) > 0:
# TODO TODO check whether range of delta_vid.<>pixels is
# same as that of original frame, or cropped. trying to set
# values outside of frame? what's behavior in that case?
# assertion?
try:
# for hydro
new_image[delta_vid.xpixels, delta_vid.ypixels, 0] = \
delta_vid.values
except:
# for indigo
new_image[delta_vid.xpixels, delta_vid.ypixels] = \
delta_vid.values
if self.mode == 'color':
new_image = cv2.cvtColor(new_image, cv2.COLOR_GRAY2RGB)
if self.config is not None:
# just use ros time conversion func
t = (delta_vid.header.stamp.secs +
delta_vid.header.stamp.nsecs * 1e-9)
self.config.draw(new_image, t)
if self.saveto is not None:
# TODO why not move this to init?
if self.videowriter is not None:
self.videowriter.write(new_image)
else:
'''
if OPENCV_VERSION == 2:
# works on Linux and Windows
self.videowriter = cv2.VideoWriter(self.saveto,
cv2.cv.CV_FOURCC('m','p','4','v'), 300,
(new_image.shape[1], new_image.shape[0]), True)
elif OPENCV_VERSION == 3:
self.videowriter = cv2.VideoWriter(self.saveto,
cv2.VideoWriter_fourcc('m','p','4','v'), 300,
(new_image.shape[1], new_image.shape[0]), True)
'''
# TODO handle iscolor flag appropriately
self.videowriter = cv2.VideoWriter(self.saveto, \
cv2.VideoWriter_fourcc(*'XVID'), self.fps, \
(new_image.shape[1], new_image.shape[0]), False)
#self.videowriter.open(self.saveto,
# cv.CV_FOURCC('P','I','M','1'), 30,
# (new_image.shape[0], new_image.shape[1]))
if self.mode == 'mono':
image_message = self.cvbridge.cv2_to_imgmsg(new_image,
encoding="mono8")
elif self.mode == 'color':
image_message = self.cvbridge.cv2_to_imgmsg(new_image,
encoding="bgr8")
image_message.header = delta_vid.header
self.pubDeltaVid.publish(image_message)
def main(self):
rospy.spin()
if self.videowriter is not None:
self.videowriter.release()
print ('Note: use this command to make a mac / quicktime friendly' +
'video: avconv -i test.avi -c:v libx264 -c:a copy ' +
'outputfile.mp4')
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("--in", type="str", dest="input",
default='multi_tracker/delta_video', help="input topic name")
parser.add_option("--out", type="str", dest="output",
default='camera/image_decompressed', help="output topic name")
parser.add_option("--directory", type="str", dest="directory",
default=os.getcwd(),
help="directory where background images can be found")
# TODO --draw or something less generic than config? previous uses?
parser.add_option("--config", type="str", dest="config", default='',
help=("configuration file, which should describe a class " +
"that has a method draw"))
parser.add_option("--mode", type="str", dest="mode", default='mono',
help="color if desired to convert to color image")
parser.add_option("--saveto", type="str", dest="saveto", default='',
help=("filename where to save video, default is none. Note: use this" +
"command to make a mac / quicktime friendly video: avconv -i " +
"test.avi -c:v libx264 -c:a copy outputfile.mp4"))
# TODO add fps option
(options, args) = parser.parse_args()
if len(options.config) > 0:
config = imp.load_source('config', options.config)
c = config.Config(options.config)
else:
c = None
decompressor = DeCompressor(options.input, options.output,
options.directory, c, options.mode, options.saveto)
decompressor.main()
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017, Zhijiang Yao, Jie Dong and Dongsheng Cao
# All rights reserved.
# This file is part of the PyBioMed.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the PyBioMed source tree.
"""
##############################################################################################
This module is used for computing the quasi sequence order descriptors based on the
given protein sequence. We can obtain two types of descriptors: Sequence-order-coupling
number and quasi-sequence-order descriptors. Two distance matrixes between 20 amino acids
are employed. You can freely use and distribute it. If you have any problem, please contact
us immediately.
References:
[1]:Kuo-Chen Chou. Prediction of Protein Subcellar Locations by Incorporating
Quasi-Sequence-Order Effect. Biochemical and Biophysical Research Communications
2000, 278, 477-483.
[2]: Kuo-Chen Chou and Yu-Dong Cai. Prediction of Protein sucellular locations by
GO-FunD-PseAA predictor, Biochemical and Biophysical Research Communications,
2004, 320, 1236-1239.
[3]:Gisbert Schneider and Paul wrede. The Rational Design of Amino Acid
Sequences by Artifical Neural Networks and Simulated Molecular Evolution: Do
Novo Design of an Idealized Leader Cleavge Site. Biophys Journal, 1994, 66,
335-344.
Authors: Zhijiang Yao and Dongsheng Cao.
Date: 2016.06.04
Email: gadsby@163.com
##############################################################################################
"""
# Core Library modules
import math
import string
AALetter = [
"A",
"R",
"N",
"D",
"C",
"E",
"Q",
"G",
"H",
"I",
"L",
"K",
"M",
"F",
"P",
"S",
"T",
"W",
"Y",
"V",
]
## Distance is the Schneider-Wrede physicochemical distance matrix used by Chou et. al.
_Distance1 = {
"GW": 0.923,
"GV": 0.464,
"GT": 0.272,
"GS": 0.158,
"GR": 1.0,
"GQ": 0.467,
"GP": 0.323,
"GY": 0.728,
"GG": 0.0,
"GF": 0.727,
"GE": 0.807,
"GD": 0.776,
"GC": 0.312,
"GA": 0.206,
"GN": 0.381,
"GM": 0.557,
"GL": 0.591,
"GK": 0.894,
"GI": 0.592,
"GH": 0.769,
"ME": 0.879,
"MD": 0.932,
"MG": 0.569,
"MF": 0.182,
"MA": 0.383,
"MC": 0.276,
"MM": 0.0,
"ML": 0.062,
"MN": 0.447,
"MI": 0.058,
"MH": 0.648,
"MK": 0.884,
"MT": 0.358,
"MW": 0.391,
"MV": 0.12,
"MQ": 0.372,
"MP": 0.285,
"MS": 0.417,
"MR": 1.0,
"MY": 0.255,
"FP": 0.42,
"FQ": 0.459,
"FR": 1.0,
"FS": 0.548,
"FT": 0.499,
"FV": 0.252,
"FW": 0.207,
"FY": 0.179,
"FA": 0.508,
"FC": 0.405,
"FD": 0.977,
"FE": 0.918,
"FF": 0.0,
"FG": 0.69,
"FH": 0.663,
"FI": 0.128,
"FK": 0.903,
"FL": 0.131,
"FM": 0.169,
"FN": 0.541,
"SY": 0.615,
"SS": 0.0,
"SR": 1.0,
"SQ": 0.358,
"SP": 0.181,
"SW": 0.827,
"SV": 0.342,
"ST": 0.174,
"SK": 0.883,
"SI": 0.478,
"SH": 0.718,
"SN": 0.289,
"SM": 0.44,
"SL": 0.474,
"SC": 0.185,
"SA": 0.1,
"SG": 0.17,
"SF": 0.622,
"SE": 0.812,
"SD": 0.801,
"YI": 0.23,
"YH": 0.678,
"YK": 0.904,
"YM": 0.268,
"YL": 0.219,
"YN": 0.512,
"YA": 0.587,
"YC": 0.478,
"YE": 0.932,
"YD": 1.0,
"YG": 0.782,
"YF": 0.202,
"YY": 0.0,
"YQ": 0.404,
"YP": 0.444,
"YS": 0.612,
"YR": 0.995,
"YT": 0.557,
"YW": 0.244,
"YV": 0.328,
"LF": 0.139,
"LG": 0.596,
"LD": 0.944,
"LE": 0.892,
"LC": 0.296,
"LA": 0.405,
"LN": 0.452,
"LL": 0.0,
"LM": 0.062,
"LK": 0.893,
"LH": 0.653,
"LI": 0.013,
"LV": 0.133,
"LW": 0.341,
"LT": 0.397,
"LR": 1.0,
"LS": 0.443,
"LP": 0.309,
"LQ": 0.376,
"LY": 0.205,
"RT": 0.808,
"RV": 0.914,
"RW": 1.0,
"RP": 0.796,
"RQ": 0.668,
"RR": 0.0,
"RS": 0.86,
"RY": 0.859,
"RD": 0.305,
"RE": 0.225,
"RF": 0.977,
"RG": 0.928,
"RA": 0.919,
"RC": 0.905,
"RL": 0.92,
"RM": 0.908,
"RN": 0.69,
"RH": 0.498,
"RI": 0.929,
"RK": 0.141,
"VH": 0.649,
"VI": 0.135,
"EM": 0.83,
"EL": 0.854,
"EN": 0.599,
"EI": 0.86,
"EH": 0.406,
"EK": 0.143,
"EE": 0.0,
"ED": 0.133,
"EG": 0.779,
"EF": 0.932,
"EA": 0.79,
"EC": 0.788,
"VM": 0.12,
"EY": 0.837,
"VN": 0.38,
"ET": 0.682,
"EW": 1.0,
"EV": 0.824,
"EQ": 0.598,
"EP": 0.688,
"ES": 0.726,
"ER": 0.234,
"VP": 0.212,
"VQ": 0.339,
"VR": 1.0,
"VT": 0.305,
"VW": 0.472,
"KC": 0.871,
"KA": 0.889,
"KG": 0.9,
"KF": 0.957,
"KE": 0.149,
"KD": 0.279,
"KK": 0.0,
"KI": 0.899,
"KH": 0.438,
"KN": 0.667,
"KM": 0.871,
"KL": 0.892,
"KS": 0.825,
"KR": 0.154,
"KQ": 0.639,
"KP": 0.757,
"KW": 1.0,
"KV": 0.882,
"KT": 0.759,
"KY": 0.848,
"DN": 0.56,
"DL": 0.841,
"DM": 0.819,
"DK": 0.249,
"DH": 0.435,
"DI": 0.847,
"DF": 0.924,
"DG": 0.697,
"DD": 0.0,
"DE": 0.124,
"DC": 0.742,
"DA": 0.729,
"DY": 0.836,
"DV": 0.797,
"DW": 1.0,
"DT": 0.649,
"DR": 0.295,
"DS": 0.667,
"DP": 0.657,
"DQ": 0.584,
"QQ": 0.0,
"QP": 0.272,
"QS": 0.461,
"QR": 1.0,
"QT": 0.389,
"QW": 0.831,
"QV": 0.464,
"QY": 0.522,
"QA": 0.512,
"QC": 0.462,
"QE": 0.861,
"QD": 0.903,
"QG": 0.648,
"QF": 0.671,
"QI": 0.532,
"QH": 0.765,
"QK": 0.881,
"QM": 0.505,
"QL": 0.518,
"QN": 0.181,
"WG": 0.829,
"WF": 0.196,
"WE": 0.931,
"WD": 1.0,
"WC": 0.56,
"WA": 0.658,
"WN": 0.631,
"WM": 0.344,
"WL": 0.304,
"WK": 0.892,
"WI": 0.305,
"WH": 0.678,
"WW": 0.0,
"WV": 0.418,
"WT": 0.638,
"WS": 0.689,
"WR": 0.968,
"WQ": 0.538,
"WP": 0.555,
"WY": 0.204,
"PR": 1.0,
"PS": 0.196,
"PP": 0.0,
"PQ": 0.228,
"PV": 0.244,
"PW": 0.72,
"PT": 0.161,
"PY": 0.481,
"PC": 0.179,
"PA": 0.22,
"PF": 0.515,
"PG": 0.376,
"PD": 0.852,
"PE": 0.831,
"PK": 0.875,
"PH": 0.696,
"PI": 0.363,
"PN": 0.231,
"PL": 0.357,
"PM": 0.326,
"CK": 0.887,
"CI": 0.304,
"CH": 0.66,
"CN": 0.324,
"CM": 0.277,
"CL": 0.301,
"CC": 0.0,
"CA": 0.114,
"CG": 0.32,
"CF": 0.437,
"CE": 0.838,
"CD": 0.847,
"CY": 0.457,
"CS": 0.176,
"CR": 1.0,
"CQ": 0.341,
"CP": 0.157,
"CW": 0.639,
"CV": 0.167,
"CT": 0.233,
"IY": 0.213,
"VA": 0.275,
"VC": 0.165,
"VD": 0.9,
"VE": 0.867,
"VF": 0.269,
"VG": 0.471,
"IQ": 0.383,
"IP": 0.311,
"IS": 0.443,
"IR": 1.0,
"VL": 0.134,
"IT": 0.396,
"IW": 0.339,
"IV": 0.133,
"II": 0.0,
"IH": 0.652,
"IK": 0.892,
"VS": 0.322,
"IM": 0.057,
"IL": 0.013,
"VV": 0.0,
"IN": 0.457,
"IA": 0.403,
"VY": 0.31,
"IC": 0.296,
"IE": 0.891,
"ID": 0.942,
"IG": 0.592,
"IF": 0.134,
"HY": 0.821,
"HR": 0.697,
"HS": 0.865,
"HP": 0.777,
"HQ": 0.716,
"HV": 0.831,
"HW": 0.981,
"HT": 0.834,
"HK": 0.566,
"HH": 0.0,
"HI": 0.848,
"HN": 0.754,
"HL": 0.842,
"HM": 0.825,
"HC": 0.836,
"HA": 0.896,
"HF": 0.907,
"HG": 1.0,
"HD": 0.629,
"HE": 0.547,
"NH": 0.78,
"NI": 0.615,
"NK": 0.891,
"NL": 0.603,
"NM": 0.588,
"NN": 0.0,
"NA": 0.424,
"NC": 0.425,
"ND": 0.838,
"NE": 0.835,
"NF": 0.766,
"NG": 0.512,
"NY": 0.641,
"NP": 0.266,
"NQ": 0.175,
"NR": 1.0,
"NS": 0.361,
"NT": 0.368,
"NV": 0.503,
"NW": 0.945,
"TY": 0.596,
"TV": 0.345,
"TW": 0.816,
"TT": 0.0,
"TR": 1.0,
"TS": 0.185,
"TP": 0.159,
"TQ": 0.322,
"TN": 0.315,
"TL": 0.453,
"TM": 0.403,
"TK": 0.866,
"TH": 0.737,
"TI": 0.455,
"TF": 0.604,
"TG": 0.312,
"TD": 0.83,
"TE": 0.812,
"TC": 0.261,
"TA": 0.251,
"AA": 0.0,
"AC": 0.112,
"AE": 0.827,
"AD": 0.819,
"AG": 0.208,
"AF": 0.54,
"AI": 0.407,
"AH": 0.696,
"AK": 0.891,
"AM": 0.379,
"AL": 0.406,
"AN": 0.318,
"AQ": 0.372,
"AP": 0.191,
"AS": 0.094,
"AR": 1.0,
"AT": 0.22,
"AW": 0.739,
"AV": 0.273,
"AY": 0.552,
"VK": 0.889,
}
## Distance is the Grantham chemical distance matrix used by Grantham et. al.
_Distance2 = {
"GW": 0.923,
"GV": 0.464,
"GT": 0.272,
"GS": 0.158,
"GR": 1.0,
"GQ": 0.467,
"GP": 0.323,
"GY": 0.728,
"GG": 0.0,
"GF": 0.727,
"GE": 0.807,
"GD": 0.776,
"GC": 0.312,
"GA": 0.206,
"GN": 0.381,
"GM": 0.557,
"GL": 0.591,
"GK": 0.894,
"GI": 0.592,
"GH": 0.769,
"ME": 0.879,
"MD": 0.932,
"MG": 0.569,
"MF": 0.182,
"MA": 0.383,
"MC": 0.276,
"MM": 0.0,
"ML": 0.062,
"MN": 0.447,
"MI": 0.058,
"MH": 0.648,
"MK": 0.884,
"MT": 0.358,
"MW": 0.391,
"MV": 0.12,
"MQ": 0.372,
"MP": 0.285,
"MS": 0.417,
"MR": 1.0,
"MY": 0.255,
"FP": 0.42,
"FQ": 0.459,
"FR": 1.0,
"FS": 0.548,
"FT": 0.499,
"FV": 0.252,
"FW": 0.207,
"FY": 0.179,
"FA": 0.508,
"FC": 0.405,
"FD": 0.977,
"FE": 0.918,
"FF": 0.0,
"FG": 0.69,
"FH": 0.663,
"FI": 0.128,
"FK": 0.903,
"FL": 0.131,
"FM": 0.169,
"FN": 0.541,
"SY": 0.615,
"SS": 0.0,
"SR": 1.0,
"SQ": 0.358,
"SP": 0.181,
"SW": 0.827,
"SV": 0.342,
"ST": 0.174,
"SK": 0.883,
"SI": 0.478,
"SH": 0.718,
"SN": 0.289,
"SM": 0.44,
"SL": 0.474,
"SC": 0.185,
"SA": 0.1,
"SG": 0.17,
"SF": 0.622,
"SE": 0.812,
"SD": 0.801,
"YI": 0.23,
"YH": 0.678,
"YK": 0.904,
"YM": 0.268,
"YL": 0.219,
"YN": 0.512,
"YA": 0.587,
"YC": 0.478,
"YE": 0.932,
"YD": 1.0,
"YG": 0.782,
"YF": 0.202,
"YY": 0.0,
"YQ": 0.404,
"YP": 0.444,
"YS": 0.612,
"YR": 0.995,
"YT": 0.557,
"YW": 0.244,
"YV": 0.328,
"LF": 0.139,
"LG": 0.596,
"LD": 0.944,
"LE": 0.892,
"LC": 0.296,
"LA": 0.405,
"LN": 0.452,
"LL": 0.0,
"LM": 0.062,
"LK": 0.893,
"LH": 0.653,
"LI": 0.013,
"LV": 0.133,
"LW": 0.341,
"LT": 0.397,
"LR": 1.0,
"LS": 0.443,
"LP": 0.309,
"LQ": 0.376,
"LY": 0.205,
"RT": 0.808,
"RV": 0.914,
"RW": 1.0,
"RP": 0.796,
"RQ": 0.668,
"RR": 0.0,
"RS": 0.86,
"RY": 0.859,
"RD": 0.305,
"RE": 0.225,
"RF": 0.977,
"RG": 0.928,
"RA": 0.919,
"RC": 0.905,
"RL": 0.92,
"RM": 0.908,
"RN": 0.69,
"RH": 0.498,
"RI": 0.929,
"RK": 0.141,
"VH": 0.649,
"VI": 0.135,
"EM": 0.83,
"EL": 0.854,
"EN": 0.599,
"EI": 0.86,
"EH": 0.406,
"EK": 0.143,
"EE": 0.0,
"ED": 0.133,
"EG": 0.779,
"EF": 0.932,
"EA": 0.79,
"EC": 0.788,
"VM": 0.12,
"EY": 0.837,
"VN": 0.38,
"ET": 0.682,
"EW": 1.0,
"EV": 0.824,
"EQ": 0.598,
"EP": 0.688,
"ES": 0.726,
"ER": 0.234,
"VP": 0.212,
"VQ": 0.339,
"VR": 1.0,
"VT": 0.305,
"VW": 0.472,
"KC": 0.871,
"KA": 0.889,
"KG": 0.9,
"KF": 0.957,
"KE": 0.149,
"KD": 0.279,
"KK": 0.0,
"KI": 0.899,
"KH": 0.438,
"KN": 0.667,
"KM": 0.871,
"KL": 0.892,
"KS": 0.825,
"KR": 0.154,
"KQ": 0.639,
"KP": 0.757,
"KW": 1.0,
"KV": 0.882,
"KT": 0.759,
"KY": 0.848,
"DN": 0.56,
"DL": 0.841,
"DM": 0.819,
"DK": 0.249,
"DH": 0.435,
"DI": 0.847,
"DF": 0.924,
"DG": 0.697,
"DD": 0.0,
"DE": 0.124,
"DC": 0.742,
"DA": 0.729,
"DY": 0.836,
"DV": 0.797,
"DW": 1.0,
"DT": 0.649,
"DR": 0.295,
"DS": 0.667,
"DP": 0.657,
"DQ": 0.584,
"QQ": 0.0,
"QP": 0.272,
"QS": 0.461,
"QR": 1.0,
"QT": 0.389,
"QW": 0.831,
"QV": 0.464,
"QY": 0.522,
"QA": 0.512,
"QC": 0.462,
"QE": 0.861,
"QD": 0.903,
"QG": 0.648,
"QF": 0.671,
"QI": 0.532,
"QH": 0.765,
"QK": 0.881,
"QM": 0.505,
"QL": 0.518,
"QN": 0.181,
"WG": 0.829,
"WF": 0.196,
"WE": 0.931,
"WD": 1.0,
"WC": 0.56,
"WA": 0.658,
"WN": 0.631,
"WM": 0.344,
"WL": 0.304,
"WK": 0.892,
"WI": 0.305,
"WH": 0.678,
"WW": 0.0,
"WV": 0.418,
"WT": 0.638,
"WS": 0.689,
"WR": 0.968,
"WQ": 0.538,
"WP": 0.555,
"WY": 0.204,
"PR": 1.0,
"PS": 0.196,
"PP": 0.0,
"PQ": 0.228,
"PV": 0.244,
"PW": 0.72,
"PT": 0.161,
"PY": 0.481,
"PC": 0.179,
"PA": 0.22,
"PF": 0.515,
"PG": 0.376,
"PD": 0.852,
"PE": 0.831,
"PK": 0.875,
"PH": 0.696,
"PI": 0.363,
"PN": 0.231,
"PL": 0.357,
"PM": 0.326,
"CK": 0.887,
"CI": 0.304,
"CH": 0.66,
"CN": 0.324,
"CM": 0.277,
"CL": 0.301,
"CC": 0.0,
"CA": 0.114,
"CG": 0.32,
"CF": 0.437,
"CE": 0.838,
"CD": 0.847,
"CY": 0.457,
"CS": 0.176,
"CR": 1.0,
"CQ": 0.341,
"CP": 0.157,
"CW": 0.639,
"CV": 0.167,
"CT": 0.233,
"IY": 0.213,
"VA": 0.275,
"VC": 0.165,
"VD": 0.9,
"VE": 0.867,
"VF": 0.269,
"VG": 0.471,
"IQ": 0.383,
"IP": 0.311,
"IS": 0.443,
"IR": 1.0,
"VL": 0.134,
"IT": 0.396,
"IW": 0.339,
"IV": 0.133,
"II": 0.0,
"IH": 0.652,
"IK": 0.892,
"VS": 0.322,
"IM": 0.057,
"IL": 0.013,
"VV": 0.0,
"IN": 0.457,
"IA": 0.403,
"VY": 0.31,
"IC": 0.296,
"IE": 0.891,
"ID": 0.942,
"IG": 0.592,
"IF": 0.134,
"HY": 0.821,
"HR": 0.697,
"HS": 0.865,
"HP": 0.777,
"HQ": 0.716,
"HV": 0.831,
"HW": 0.981,
"HT": 0.834,
"HK": 0.566,
"HH": 0.0,
"HI": 0.848,
"HN": 0.754,
"HL": 0.842,
"HM": 0.825,
"HC": 0.836,
"HA": 0.896,
"HF": 0.907,
"HG": 1.0,
"HD": 0.629,
"HE": 0.547,
"NH": 0.78,
"NI": 0.615,
"NK": 0.891,
"NL": 0.603,
"NM": 0.588,
"NN": 0.0,
"NA": 0.424,
"NC": 0.425,
"ND": 0.838,
"NE": 0.835,
"NF": 0.766,
"NG": 0.512,
"NY": 0.641,
"NP": 0.266,
"NQ": 0.175,
"NR": 1.0,
"NS": 0.361,
"NT": 0.368,
"NV": 0.503,
"NW": 0.945,
"TY": 0.596,
"TV": 0.345,
"TW": 0.816,
"TT": 0.0,
"TR": 1.0,
"TS": 0.185,
"TP": 0.159,
"TQ": 0.322,
"TN": 0.315,
"TL": 0.453,
"TM": 0.403,
"TK": 0.866,
"TH": 0.737,
"TI": 0.455,
"TF": 0.604,
"TG": 0.312,
"TD": 0.83,
"TE": 0.812,
"TC": 0.261,
"TA": 0.251,
"AA": 0.0,
"AC": 0.112,
"AE": 0.827,
"AD": 0.819,
"AG": 0.208,
"AF": 0.54,
"AI": 0.407,
"AH": 0.696,
"AK": 0.891,
"AM": 0.379,
"AL": 0.406,
"AN": 0.318,
"AQ": 0.372,
"AP": 0.191,
"AS": 0.094,
"AR": 1.0,
"AT": 0.22,
"AW": 0.739,
"AV": 0.273,
"AY": 0.552,
"VK": 0.889,
}
#############################################################################################
#############################################################################################
def GetSequenceOrderCouplingNumber(ProteinSequence, d=1, distancematrix=_Distance1):
"""
###############################################################################
Computing the dth-rank sequence order coupling number for a protein.
Usage:
result = GetSequenceOrderCouplingNumber(protein,d)
Input: protein is a pure protein sequence.
d is the gap between two amino acids.
Output: result is numeric value.
###############################################################################
"""
NumProtein = len(ProteinSequence)
tau = 0.0
for i in range(NumProtein - d):
temp1 = ProteinSequence[i]
temp2 = ProteinSequence[i + d]
tau = tau + math.pow(distancematrix[temp1 + temp2], 2)
return round(tau, 3)
#############################################################################################
def GetSequenceOrderCouplingNumberp(ProteinSequence, maxlag=30, distancematrix={}):
"""
###############################################################################
Computing the sequence order coupling numbers from 1 to maxlag
for a given protein sequence based on the user-defined property.
Usage:
result = GetSequenceOrderCouplingNumberp(protein, maxlag,distancematrix)
Input: protein is a pure protein sequence
maxlag is the maximum lag and the length of the protein should be larger
than maxlag. default is 30.
distancematrix is the a dict form containing 400 distance values
Output: result is a dict form containing all sequence order coupling numbers based
on the given property
###############################################################################
"""
NumProtein = len(ProteinSequence)
Tau = {}
for i in range(maxlag):
Tau["tau" + str(i + 1)] = GetSequenceOrderCouplingNumber(
ProteinSequence, i + 1, distancematrix
)
return Tau
#############################################################################################
def GetSequenceOrderCouplingNumberSW(
ProteinSequence, maxlag=30, distancematrix=_Distance1
):
"""
###############################################################################
Computing the sequence order coupling numbers from 1 to maxlag
for a given protein sequence based on the Schneider-Wrede physicochemical
distance matrix
Usage:
result = GetSequenceOrderCouplingNumberSW(protein, maxlag,distancematrix)
Input: protein is a pure protein sequence
maxlag is the maximum lag and the length of the protein should be larger
than maxlag. default is 30.
distancematrix is a dict form containing Schneider-Wrede physicochemical
distance matrix. omitted!
Output: result is a dict form containing all sequence order coupling numbers based
on the Schneider-Wrede physicochemical distance matrix
###############################################################################
"""
NumProtein = len(ProteinSequence)
Tau = {}
for i in range(maxlag):
Tau["tausw" + str(i + 1)] = GetSequenceOrderCouplingNumber(
ProteinSequence, i + 1, distancematrix
)
return Tau
#############################################################################################
def GetSequenceOrderCouplingNumberGrant(
ProteinSequence, maxlag=30, distancematrix=_Distance2
):
"""
###############################################################################
Computing the sequence order coupling numbers from 1 to maxlag
for a given protein sequence based on the Grantham chemical distance
matrix.
Usage:
result = GetSequenceOrderCouplingNumberGrant(protein, maxlag,distancematrix)
Input: protein is a pure protein sequence
maxlag is the maximum lag and the length of the protein should be larger
than maxlag. default is 30.
distancematrix is a dict form containing Grantham chemical distance
matrix. omitted!
Output: result is a dict form containing all sequence order coupling numbers
based on the Grantham chemical distance matrix
###############################################################################
"""
NumProtein = len(ProteinSequence)
Tau = {}
for i in range(maxlag):
Tau["taugrant" + str(i + 1)] = GetSequenceOrderCouplingNumber(
ProteinSequence, i + 1, distancematrix
)
return Tau
#############################################################################################
def GetSequenceOrderCouplingNumberTotal(ProteinSequence, maxlag=30):
"""
###############################################################################
Computing the sequence order coupling numbers from 1 to maxlag
for a given protein sequence.
Usage:
result = GetSequenceOrderCouplingNumberTotal(protein, maxlag)
Input: protein is a pure protein sequence
maxlag is the maximum lag and the length of the protein should be larger
than maxlag. default is 30.
Output: result is a dict form containing all sequence order coupling numbers
###############################################################################
"""
Tau = {}
Tau.update(GetSequenceOrderCouplingNumberSW(ProteinSequence, maxlag=maxlag))
Tau.update(GetSequenceOrderCouplingNumberGrant(ProteinSequence, maxlag=maxlag))
return Tau
#############################################################################################
def GetAAComposition(ProteinSequence):
"""
###############################################################################
Calculate the composition of Amino acids
for a given protein sequence.
Usage:
result=CalculateAAComposition(protein)
Input: protein is a pure protein sequence.
Output: result is a dict form containing the composition of
20 amino acids.
###############################################################################
"""
LengthSequence = len(ProteinSequence)
Result = {}
for i in AALetter:
Result[i] = round(float(ProteinSequence.count(i)) / LengthSequence, 3)
return Result
#############################################################################################
def GetQuasiSequenceOrder1(ProteinSequence, maxlag=30, weight=0.1, distancematrix={}):
"""
###############################################################################
Computing the first 20 quasi-sequence-order descriptors for
a given protein sequence.
Usage:
result = GetQuasiSequenceOrder1(protein,maxlag,weigt)
see method GetQuasiSequenceOrder for the choice of parameters.
###############################################################################
"""
rightpart = 0.0
for i in range(maxlag):
rightpart = rightpart + GetSequenceOrderCouplingNumber(
ProteinSequence, i + 1, distancematrix
)
AAC = GetAAComposition(ProteinSequence)
result = {}
temp = 1 + weight * rightpart
for index, i in enumerate(AALetter):
result["QSO" + str(index + 1)] = round(AAC[i] / temp, 6)
return result
#############################################################################################
def GetQuasiSequenceOrder2(ProteinSequence, maxlag=30, weight=0.1, distancematrix={}):
"""
###############################################################################
Computing the last maxlag quasi-sequence-order descriptors for
a given protein sequence.
Usage:
result = GetQuasiSequenceOrder2(protein,maxlag,weigt)
see method GetQuasiSequenceOrder for the choice of parameters.
###############################################################################
"""
rightpart = []
for i in range(maxlag):
rightpart.append(
GetSequenceOrderCouplingNumber(ProteinSequence, i + 1, distancematrix)
)
AAC = GetAAComposition(ProteinSequence)
result = {}
temp = 1 + weight * sum(rightpart)
for index in range(20, 20 + maxlag):
result["QSO" + str(index + 1)] = round(weight * rightpart[index - 20] / temp, 6)
return result
#############################################################################################
def GetQuasiSequenceOrder1SW(
ProteinSequence, maxlag=30, weight=0.1, distancematrix=_Distance1
):
"""
###############################################################################
Computing the first 20 quasi-sequence-order descriptors for
a given protein sequence.
Usage:
result = GetQuasiSequenceOrder1SW(protein,maxlag,weigt)
see method GetQuasiSequenceOrder for the choice of parameters.
###############################################################################
"""
rightpart = 0.0
for i in range(maxlag):
rightpart = rightpart + GetSequenceOrderCouplingNumber(
ProteinSequence, i + 1, distancematrix
)
AAC = GetAAComposition(ProteinSequence)
result = {}
temp = 1 + weight * rightpart
for index, i in enumerate(AALetter):
result["QSOSW" + str(index + 1)] = round(AAC[i] / temp, 6)
return result
#############################################################################################
def GetQuasiSequenceOrder2SW(
ProteinSequence, maxlag=30, weight=0.1, distancematrix=_Distance1
):
"""
###############################################################################
Computing the last maxlag quasi-sequence-order descriptors for
a given protein sequence.
Usage:
result = GetQuasiSequenceOrder2SW(protein,maxlag,weigt)
see method GetQuasiSequenceOrder for the choice of parameters.
###############################################################################
"""
rightpart = []
for i in range(maxlag):
rightpart.append(
GetSequenceOrderCouplingNumber(ProteinSequence, i + 1, distancematrix)
)
AAC = GetAAComposition(ProteinSequence)
result = {}
temp = 1 + weight * sum(rightpart)
for index in range(20, 20 + maxlag):
result["QSOSW" + str(index + 1)] = round(
weight * rightpart[index - 20] / temp, 6
)
return result
#############################################################################################
def GetQuasiSequenceOrder1Grant(
ProteinSequence, maxlag=30, weight=0.1, distancematrix=_Distance2
):
"""
###############################################################################
Computing the first 20 quasi-sequence-order descriptors for
a given protein sequence.
Usage:
result = GetQuasiSequenceOrder1Grant(protein,maxlag,weigt)
see method GetQuasiSequenceOrder for the choice of parameters.
###############################################################################
"""
rightpart = 0.0
for i in range(maxlag):
rightpart = rightpart + GetSequenceOrderCouplingNumber(
ProteinSequence, i + 1, distancematrix
)
AAC = GetAAComposition(ProteinSequence)
result = {}
temp = 1 + weight * rightpart
for index, i in enumerate(AALetter):
result["QSOgrant" + str(index + 1)] = round(AAC[i] / temp, 6)
return result
#############################################################################################
def GetQuasiSequenceOrder2Grant(
ProteinSequence, maxlag=30, weight=0.1, distancematrix=_Distance2
):
"""
###############################################################################
Computing the last maxlag quasi-sequence-order descriptors for
a given protein sequence.
Usage:
result = GetQuasiSequenceOrder2Grant(protein,maxlag,weigt)
see method GetQuasiSequenceOrder for the choice of parameters.
###############################################################################
"""
rightpart = []
for i in range(maxlag):
rightpart.append(
GetSequenceOrderCouplingNumber(ProteinSequence, i + 1, distancematrix)
)
AAC = GetAAComposition(ProteinSequence)
result = {}
temp = 1 + weight * sum(rightpart)
for index in range(20, 20 + maxlag):
result["QSOgrant" + str(index + 1)] = round(
weight * rightpart[index - 20] / temp, 6
)
return result
#############################################################################################
def GetQuasiSequenceOrder(ProteinSequence, maxlag=30, weight=0.1):
"""
###############################################################################
Computing quasi-sequence-order descriptors for a given protein.
[1]:Kuo-Chen Chou. Prediction of Protein Subcellar Locations by
Incorporating Quasi-Sequence-Order Effect. Biochemical and Biophysical
Research Communications 2000, 278, 477-483.
Usage:
result = GetQuasiSequenceOrder(protein,maxlag,weight)
Input: protein is a pure protein sequence
maxlag is the maximum lag and the length of the protein should be larger
than maxlag. default is 30.
weight is a weight factor. please see reference 1 for its choice. default is 0.1.
Output: result is a dict form containing all quasi-sequence-order descriptors
###############################################################################
"""
result = dict()
result.update(GetQuasiSequenceOrder1SW(ProteinSequence, maxlag, weight, _Distance1))
result.update(GetQuasiSequenceOrder2SW(ProteinSequence, maxlag, weight, _Distance1))
result.update(
GetQuasiSequenceOrder1Grant(ProteinSequence, maxlag, weight, _Distance2)
)
result.update(
GetQuasiSequenceOrder2Grant(ProteinSequence, maxlag, weight, _Distance2)
)
return result
#############################################################################################
def GetQuasiSequenceOrderp(ProteinSequence, maxlag=30, weight=0.1, distancematrix={}):
"""
###############################################################################
Computing quasi-sequence-order descriptors for a given protein.
[1]:Kuo-Chen Chou. Prediction of Protein Subcellar Locations by
Incorporating Quasi-Sequence-Order Effect. Biochemical and Biophysical
Research Communications 2000, 278, 477-483.
Usage:
result = GetQuasiSequenceOrderp(protein,maxlag,weight,distancematrix)
Input: protein is a pure protein sequence
maxlag is the maximum lag and the length of the protein should be larger
than maxlag. default is 30.
weight is a weight factor. please see reference 1 for its choice. default is 0.1.
distancematrix is a dict form containing 400 distance values
Output: result is a dict form containing all quasi-sequence-order descriptors
###############################################################################
"""
result = dict()
result.update(
GetQuasiSequenceOrder1(ProteinSequence, maxlag, weight, distancematrix)
)
result.update(
GetQuasiSequenceOrder2(ProteinSequence, maxlag, weight, distancematrix)
)
return result
#############################################################################################
if __name__ == "__main__":
protein = "ELRLRYCAPAGFALLKCNDADYDGFKTNCSNVSVVHCTNLMNTTVTTGLLLNGSYSENRT\
QIWQKHRTSNDSALILLNKHYNLTVTCKRPGNKTVLPVTIMAGLVFHSQKYNLRLRQAWC\
HFPSNWKGAWKEVKEEIVNLPKERYRGTNDPKRIFFQRQWGDPETANLWFNCHGEFFYCK\
MDWFLNYLNNLTVDADHNECKNTSGTKSGNKRAPGPCVQRTYVACHIRSVIIWLETISKK\
TYAPPREGHLECTSTVTGMTVELNYIPKNRTNVTLSPQIESIWAAELDRYKLVEITPIGF\
APTEVRRYTGGHERQKRVPFVVQSQHLLAGILQQQKNLLAAVEAQQQMLKLTIWGVK"
print(len(protein))
SCN = GetSequenceOrderCouplingNumberTotal(protein, maxlag=30)
print(len(SCN))
for i in SCN:
print(i, SCN[i])
QSO1 = GetQuasiSequenceOrder1(
protein, maxlag=30, weight=0.1, distancematrix=_Distance1
)
print(QSO1)
for i in QSO1:
print(i, QSO1[i])
QSO2 = GetQuasiSequenceOrder2(
protein, maxlag=30, weight=0.1, distancematrix=_Distance1
)
print(QSO2)
for i in QSO2:
print(i, QSO2[i])
QSO = GetQuasiSequenceOrder(protein, maxlag=30, weight=0.1)
print(len(QSO))
for i in QSO:
print(i, QSO[i])
SCN = GetSequenceOrderCouplingNumberp(protein, maxlag=30, distancematrix=_Distance1)
print(len(SCN))
for i in SCN:
print(i, SCN[i])
QSO = GetQuasiSequenceOrderp(protein, maxlag=30, distancematrix=_Distance1)
print(len(QSO))
for i in QSO:
print(i, QSO[i])
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
#
# FreeType high-level python API - Copyright 2011 Nicolas P. Rougier
# Distributed under the terms of the new BSD license.
#
# -----------------------------------------------------------------------------
'''
Subpixel rendering AND positioning using OpenGL and shaders.
'''
import numpy as np
import OpenGL.GL as gl
import OpenGL.GLUT as glut
from texture_font import TextureFont, TextureAtlas
from shader import Shader
vert='''
uniform sampler2D texture;
uniform vec2 pixel;
attribute float modulo;
varying float m;
void main() {
gl_FrontColor = gl_Color;
gl_TexCoord[0].xy = gl_MultiTexCoord0.xy;
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
m = modulo;
}
'''
frag='''
uniform sampler2D texture;
uniform vec2 pixel;
varying float m;
void main() {
vec2 uv = gl_TexCoord[0].xy;
vec4 current = texture2D(texture, uv);
vec4 previous= texture2D(texture, uv+vec2(-1,0)*pixel);
float r = current.r;
float g = current.g;
float b = current.b;
float a = current.a;
if( m <= 0.333 )
{
float z = m/0.333;
r = mix(current.r, previous.b, z);
g = mix(current.g, current.r, z);
b = mix(current.b, current.g, z);
}
else if( m <= 0.666 )
{
float z = (m-0.33)/0.333;
r = mix(previous.b, previous.g, z);
g = mix(current.r, previous.b, z);
b = mix(current.g, current.r, z);
}
else if( m < 1.0 )
{
float z = (m-0.66)/0.334;
r = mix(previous.g, previous.r, z);
g = mix(previous.b, previous.g, z);
b = mix(current.r, previous.b, z);
}
gl_FragColor = vec4(r,g,b,a);
}
'''
class Label:
def __init__(self, text, font, color=(1.0, 1.0, 1.0, 0.0), x=0, y=0,
width=None, height=None, anchor_x='left', anchor_y='baseline'):
self.text = text
self.vertices = np.zeros((len(text)*4,3), dtype=np.float32)
self.indices = np.zeros((len(text)*6, ), dtype=np.uint)
self.colors = np.zeros((len(text)*4,4), dtype=np.float32)
self.texcoords= np.zeros((len(text)*4,2), dtype=np.float32)
self.attrib = np.zeros((len(text)*4,1), dtype=np.float32)
pen = [x,y]
prev = None
for i,charcode in enumerate(text):
glyph = font[charcode]
kerning = glyph.get_kerning(prev)/64.0
x0 = pen[0] + glyph.offset[0] + kerning
dx = x0-int(x0)
x0 = int(x0)
y0 = pen[1] + glyph.offset[1]
x1 = x0 + glyph.size[0]
y1 = y0 - glyph.size[1]
u0 = glyph.texcoords[0]
v0 = glyph.texcoords[1]
u1 = glyph.texcoords[2]
v1 = glyph.texcoords[3]
index = i*4
indices = [index, index+1, index+2, index, index+2, index+3]
vertices = [[x0,y0,1],[x0,y1,1],[x1,y1,1], [x1,y0,1]]
texcoords = [[u0,v0],[u0,v1],[u1,v1], [u1,v0]]
colors = [color,]*4
self.vertices[i*4:i*4+4] = vertices
self.indices[i*6:i*6+6] = indices
self.texcoords[i*4:i*4+4] = texcoords
self.colors[i*4:i*4+4] = colors
self.attrib[i*4:i*4+4] = dx
pen[0] = pen[0]+glyph.advance[0]/64.0 + kerning
pen[1] = pen[1]+glyph.advance[1]/64.0
prev = charcode
width = pen[0]-glyph.advance[0]/64.0+glyph.size[0]
if anchor_y == 'top':
dy = -round(font.ascender)
elif anchor_y == 'center':
dy = +round(-font.height/2-font.descender)
elif anchor_y == 'bottom':
dy = -round(font.descender)
else:
dy = 0
if anchor_x == 'right':
dx = -width/1.0
elif anchor_x == 'center':
dx = -width/2.0
else:
dx = 0
self.vertices += (round(dx), round(dy), 0)
def draw(self):
gl.glEnable( gl.GL_TEXTURE_2D )
gl.glDisable( gl.GL_DEPTH_TEST )
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
gl.glEnableClientState(gl.GL_COLOR_ARRAY)
gl.glEnableClientState(gl.GL_TEXTURE_COORD_ARRAY)
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
gl.glVertexPointer(3, gl.GL_FLOAT, 0, self.vertices)
gl.glColorPointer(4, gl.GL_FLOAT, 0, self.colors)
gl.glTexCoordPointer(2, gl.GL_FLOAT, 0, self.texcoords)
alpha = 1
gl.glEnable( gl.GL_COLOR_MATERIAL )
gl.glBlendFunc( gl.GL_CONSTANT_COLOR_EXT,
gl.GL_ONE_MINUS_SRC_COLOR )
gl.glEnable( gl.GL_BLEND )
gl.glColor3f( alpha, alpha, alpha )
gl.glBlendColor( 1-alpha, 1-alpha, 1-alpha, 1 )
gl.glEnableVertexAttribArray( 1 );
gl.glVertexAttribPointer( 1, 1, gl.GL_FLOAT, gl.GL_FALSE, 0, self.attrib)
shader.bind()
shader.uniformi('texture', 0)
shader.uniformf('pixel', 1.0/512, 1.0/512)
gl.glDrawElements(gl.GL_TRIANGLES, len(self.indices),
gl.GL_UNSIGNED_INT, self.indices)
shader.unbind()
gl.glDisableVertexAttribArray( 1 );
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
gl.glDisableClientState(gl.GL_COLOR_ARRAY)
gl.glDisableClientState(gl.GL_TEXTURE_COORD_ARRAY)
gl.glDisable( gl.GL_TEXTURE_2D )
gl.glDisable( gl.GL_BLEND )
if __name__ == '__main__':
import sys
atlas = TextureAtlas(512,512,3)
def on_display( ):
gl.glClearColor(1,1,1,1)
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
gl.glBindTexture( gl.GL_TEXTURE_2D, atlas.texid )
for label in labels: label.draw()
gl.glColor(0,0,0,1)
gl.glBegin(gl.GL_LINES)
gl.glVertex2i(15,0)
gl.glVertex2i(15, 330)
gl.glVertex2i(225, 0)
gl.glVertex2i(225, 330)
gl.glEnd()
glut.glutSwapBuffers( )
def on_reshape( width, height ):
gl.glViewport( 0, 0, width, height )
gl.glMatrixMode( gl.GL_PROJECTION )
gl.glLoadIdentity( )
gl.glOrtho( 0, width, 0, height, -1, 1 )
gl.glMatrixMode( gl.GL_MODELVIEW )
gl.glLoadIdentity( )
def on_keyboard( key, x, y ):
if key == '\033':
sys.exit( )
glut.glutInit( sys.argv )
glut.glutInitDisplayMode( glut.GLUT_DOUBLE | glut.GLUT_RGBA | glut.GLUT_DEPTH )
glut.glutCreateWindow( "Freetype OpenGL" )
glut.glutReshapeWindow( 240, 330 )
glut.glutDisplayFunc( on_display )
glut.glutReshapeFunc( on_reshape )
glut.glutKeyboardFunc( on_keyboard )
font = TextureFont(atlas, './Arial.ttf', 9)
text = "|... A Quick Brown Fox Jumps Over The Lazy Dog"
labels = []
x,y = 20,310
for i in range(30):
labels.append(Label(text=text, font=font, x=x, y=y))
x += 0.1000000000001
y -= 10
atlas.upload()
shader = Shader(vert,frag)
glut.glutMainLoop( )
|
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/eoyilmaz/Documents/development/oyProjectManager/oyProjectManager/ui/version_replacer.ui'
#
# Created: Sat Oct 20 20:42:57 2012
# by: pyside-uic 0.2.13 running on PySide 1.1.1
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.setWindowModality(QtCore.Qt.ApplicationModal)
Dialog.resize(1043, 414)
self.horizontalLayout_5 = QtGui.QHBoxLayout(Dialog)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.verticalWidget = QtGui.QWidget(Dialog)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(3)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.verticalWidget.sizePolicy().hasHeightForWidth())
self.verticalWidget.setSizePolicy(sizePolicy)
self.verticalWidget.setMinimumSize(QtCore.QSize(10, 0))
self.verticalWidget.setObjectName("verticalWidget")
self.verticalLayout = QtGui.QVBoxLayout(self.verticalWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtGui.QLabel(self.verticalWidget)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.assetList_tableWidget = QtGui.QTableWidget(self.verticalWidget)
self.assetList_tableWidget.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.assetList_tableWidget.setAlternatingRowColors(True)
self.assetList_tableWidget.setCornerButtonEnabled(False)
self.assetList_tableWidget.setColumnCount(2)
self.assetList_tableWidget.setObjectName("assetList_tableWidget")
self.assetList_tableWidget.setColumnCount(2)
self.assetList_tableWidget.setRowCount(0)
self.assetList_tableWidget.horizontalHeader().setStretchLastSection(True)
self.verticalLayout.addWidget(self.assetList_tableWidget)
self.horizontalLayout_5.addWidget(self.verticalWidget)
self.verticalWidget1 = QtGui.QWidget(Dialog)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(2)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.verticalWidget1.sizePolicy().hasHeightForWidth())
self.verticalWidget1.setSizePolicy(sizePolicy)
self.verticalWidget1.setObjectName("verticalWidget1")
self.verticalLayout_2 = QtGui.QVBoxLayout(self.verticalWidget1)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.gridWidget1 = QtGui.QWidget(self.verticalWidget1)
self.gridWidget1.setObjectName("gridWidget1")
self.gridLayout1 = QtGui.QGridLayout(self.gridWidget1)
self.gridLayout1.setContentsMargins(0, 0, 0, 0)
self.gridLayout1.setObjectName("gridLayout1")
self.sequence_label = QtGui.QLabel(self.gridWidget1)
self.sequence_label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.sequence_label.setObjectName("sequence_label")
self.gridLayout1.addWidget(self.sequence_label, 1, 0, 1, 1)
self.assetType_label1 = QtGui.QLabel(self.gridWidget1)
self.assetType_label1.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.assetType_label1.setObjectName("assetType_label1")
self.gridLayout1.addWidget(self.assetType_label1, 3, 0, 1, 1)
self.assetType_comboBox1 = QtGui.QComboBox(self.gridWidget1)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.assetType_comboBox1.sizePolicy().hasHeightForWidth())
self.assetType_comboBox1.setSizePolicy(sizePolicy)
self.assetType_comboBox1.setObjectName("assetType_comboBox1")
self.gridLayout1.addWidget(self.assetType_comboBox1, 3, 1, 1, 1)
self.line_4 = QtGui.QFrame(self.gridWidget1)
self.line_4.setFrameShape(QtGui.QFrame.HLine)
self.line_4.setFrameShadow(QtGui.QFrame.Sunken)
self.line_4.setObjectName("line_4")
self.gridLayout1.addWidget(self.line_4, 6, 0, 1, 2)
self.project_comboBox = QtGui.QComboBox(self.gridWidget1)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.project_comboBox.sizePolicy().hasHeightForWidth())
self.project_comboBox.setSizePolicy(sizePolicy)
self.project_comboBox.setEditable(False)
self.project_comboBox.setObjectName("project_comboBox")
self.gridLayout1.addWidget(self.project_comboBox, 0, 1, 1, 1)
self.subName_label = QtGui.QLabel(self.gridWidget1)
self.subName_label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.subName_label.setObjectName("subName_label")
self.gridLayout1.addWidget(self.subName_label, 7, 0, 1, 1)
self.baseName_label = QtGui.QLabel(self.gridWidget1)
self.baseName_label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.baseName_label.setObjectName("baseName_label")
self.gridLayout1.addWidget(self.baseName_label, 5, 0, 1, 1)
self.sequence_comboBox = QtGui.QComboBox(self.gridWidget1)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.sequence_comboBox.sizePolicy().hasHeightForWidth())
self.sequence_comboBox.setSizePolicy(sizePolicy)
self.sequence_comboBox.setEditable(False)
self.sequence_comboBox.setObjectName("sequence_comboBox")
self.gridLayout1.addWidget(self.sequence_comboBox, 1, 1, 1, 1)
self.project_label = QtGui.QLabel(self.gridWidget1)
self.project_label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.project_label.setObjectName("project_label")
self.gridLayout1.addWidget(self.project_label, 0, 0, 1, 1)
self.line_2 = QtGui.QFrame(self.gridWidget1)
self.line_2.setFrameShape(QtGui.QFrame.HLine)
self.line_2.setFrameShadow(QtGui.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.gridLayout1.addWidget(self.line_2, 2, 0, 1, 2)
self.line_3 = QtGui.QFrame(self.gridWidget1)
self.line_3.setFrameShape(QtGui.QFrame.HLine)
self.line_3.setFrameShadow(QtGui.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.gridLayout1.addWidget(self.line_3, 4, 0, 1, 2)
self.baseName_comboBox = QtGui.QComboBox(self.gridWidget1)
self.baseName_comboBox.setEnabled(True)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.baseName_comboBox.sizePolicy().hasHeightForWidth())
self.baseName_comboBox.setSizePolicy(sizePolicy)
self.baseName_comboBox.setObjectName("baseName_comboBox")
self.gridLayout1.addWidget(self.baseName_comboBox, 5, 1, 1, 1)
self.subName_comboBox = QtGui.QComboBox(self.gridWidget1)
self.subName_comboBox.setEnabled(True)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.subName_comboBox.sizePolicy().hasHeightForWidth())
self.subName_comboBox.setSizePolicy(sizePolicy)
self.subName_comboBox.setObjectName("subName_comboBox")
self.gridLayout1.addWidget(self.subName_comboBox, 7, 1, 1, 1)
self.assetFile_label = QtGui.QLabel(self.gridWidget1)
self.assetFile_label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.assetFile_label.setObjectName("assetFile_label")
self.gridLayout1.addWidget(self.assetFile_label, 9, 0, 1, 1)
self.assetFile_comboBox = QtGui.QComboBox(self.gridWidget1)
self.assetFile_comboBox.setEnabled(True)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.assetFile_comboBox.sizePolicy().hasHeightForWidth())
self.assetFile_comboBox.setSizePolicy(sizePolicy)
self.assetFile_comboBox.setObjectName("assetFile_comboBox")
self.gridLayout1.addWidget(self.assetFile_comboBox, 9, 1, 1, 1)
self.line_5 = QtGui.QFrame(self.gridWidget1)
self.line_5.setFrameShape(QtGui.QFrame.HLine)
self.line_5.setFrameShadow(QtGui.QFrame.Sunken)
self.line_5.setObjectName("line_5")
self.gridLayout1.addWidget(self.line_5, 8, 0, 1, 2)
self.verticalLayout_2.addWidget(self.gridWidget1)
self.removeReplacement_pushButton = QtGui.QPushButton(self.verticalWidget1)
self.removeReplacement_pushButton.setObjectName("removeReplacement_pushButton")
self.verticalLayout_2.addWidget(self.removeReplacement_pushButton)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem)
self.horizontalWidget = QtGui.QWidget(self.verticalWidget1)
self.horizontalWidget.setObjectName("horizontalWidget")
self.horizontalLayout = QtGui.QHBoxLayout(self.horizontalWidget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.replace_pushButton = QtGui.QPushButton(self.horizontalWidget)
self.replace_pushButton.setObjectName("replace_pushButton")
self.horizontalLayout.addWidget(self.replace_pushButton)
self.cancel_pushButton = QtGui.QPushButton(self.horizontalWidget)
self.cancel_pushButton.setObjectName("cancel_pushButton")
self.horizontalLayout.addWidget(self.cancel_pushButton)
self.verticalLayout_2.addWidget(self.horizontalWidget)
self.horizontalLayout_5.addWidget(self.verticalWidget1)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QtGui.QApplication.translate("Dialog", "Version Replacer", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("Dialog", "Please check the Versions that needs to be replaced", None, QtGui.QApplication.UnicodeUTF8))
self.sequence_label.setText(QtGui.QApplication.translate("Dialog", "Sequence", None, QtGui.QApplication.UnicodeUTF8))
self.assetType_label1.setText(QtGui.QApplication.translate("Dialog", "Version Type", None, QtGui.QApplication.UnicodeUTF8))
self.assetType_comboBox1.setToolTip(QtGui.QApplication.translate("Dialog", "Select an asset type", None, QtGui.QApplication.UnicodeUTF8))
self.assetType_comboBox1.setStatusTip(QtGui.QApplication.translate("Dialog", "Select an asset type", None, QtGui.QApplication.UnicodeUTF8))
self.assetType_comboBox1.setWhatsThis(QtGui.QApplication.translate("Dialog", "Asset Type:\n"
"\n"
"Assets has types that lets you distinguish their purpose of existance. This comboBox lists the asset types that the sequence and the current host environment (MAYA, NUKE etc.) can support.", None, QtGui.QApplication.UnicodeUTF8))
self.project_comboBox.setToolTip(QtGui.QApplication.translate("Dialog", "Select a Project", None, QtGui.QApplication.UnicodeUTF8))
self.project_comboBox.setStatusTip(QtGui.QApplication.translate("Dialog", "Select a Project", None, QtGui.QApplication.UnicodeUTF8))
self.project_comboBox.setWhatsThis(QtGui.QApplication.translate("Dialog", "Project:\n"
"\n"
"A project is a collection of sequences. So projects can only contain sequences. This comboBox lists the current projects on the server. If you select one it will update the available sequences for that project...", None, QtGui.QApplication.UnicodeUTF8))
self.subName_label.setText(QtGui.QApplication.translate("Dialog", "Take Name", None, QtGui.QApplication.UnicodeUTF8))
self.baseName_label.setText(QtGui.QApplication.translate("Dialog", "Base Name", None, QtGui.QApplication.UnicodeUTF8))
self.sequence_comboBox.setToolTip(QtGui.QApplication.translate("Dialog", "Select a Sequence", None, QtGui.QApplication.UnicodeUTF8))
self.sequence_comboBox.setStatusTip(QtGui.QApplication.translate("Dialog", "Select a Sequence", None, QtGui.QApplication.UnicodeUTF8))
self.sequence_comboBox.setWhatsThis(QtGui.QApplication.translate("Dialog", "Sequence:\n"
"\n"
"Sequences are collections of folders. Each folder has a special meaning and can may contain one type of asset file. Every sequence has its own settings. So while one sequence supports some features, others may not...", None, QtGui.QApplication.UnicodeUTF8))
self.project_label.setText(QtGui.QApplication.translate("Dialog", "Project", None, QtGui.QApplication.UnicodeUTF8))
self.baseName_comboBox.setToolTip(QtGui.QApplication.translate("Dialog", "if the type is a shot dependent type this will be activated", None, QtGui.QApplication.UnicodeUTF8))
self.baseName_comboBox.setStatusTip(QtGui.QApplication.translate("Dialog", "if the type is a shot dependent type this will be activated", None, QtGui.QApplication.UnicodeUTF8))
self.baseName_comboBox.setWhatsThis(QtGui.QApplication.translate("Dialog", "Shot:\n"
"\n"
"If an asset is shot dependent, its base name is a shot string (SH001, SH010 etc. ). So, shot and base name comboBoxes are actually showing the base name of the asset, but it lets the user to separate the shot dependent asset types from shot independent types.", None, QtGui.QApplication.UnicodeUTF8))
self.subName_comboBox.setToolTip(QtGui.QApplication.translate("Dialog", "if the type is a shot dependent type this will be activated", None, QtGui.QApplication.UnicodeUTF8))
self.subName_comboBox.setStatusTip(QtGui.QApplication.translate("Dialog", "if the type is a shot dependent type this will be activated", None, QtGui.QApplication.UnicodeUTF8))
self.subName_comboBox.setWhatsThis(QtGui.QApplication.translate("Dialog", "Shot:\n"
"\n"
"If an asset is shot dependent, its base name is a shot string (SH001, SH010 etc. ). So, shot and base name comboBoxes are actually showing the base name of the asset, but it lets the user to separate the shot dependent asset types from shot independent types.", None, QtGui.QApplication.UnicodeUTF8))
self.assetFile_label.setText(QtGui.QApplication.translate("Dialog", "Asset File", None, QtGui.QApplication.UnicodeUTF8))
self.assetFile_comboBox.setToolTip(QtGui.QApplication.translate("Dialog", "if the type is a shot dependent type this will be activated", None, QtGui.QApplication.UnicodeUTF8))
self.assetFile_comboBox.setStatusTip(QtGui.QApplication.translate("Dialog", "if the type is a shot dependent type this will be activated", None, QtGui.QApplication.UnicodeUTF8))
self.assetFile_comboBox.setWhatsThis(QtGui.QApplication.translate("Dialog", "Shot:\n"
"\n"
"If an asset is shot dependent, its base name is a shot string (SH001, SH010 etc. ). So, shot and base name comboBoxes are actually showing the base name of the asset, but it lets the user to separate the shot dependent asset types from shot independent types.", None, QtGui.QApplication.UnicodeUTF8))
self.removeReplacement_pushButton.setText(QtGui.QApplication.translate("Dialog", "Remove Replacement", None, QtGui.QApplication.UnicodeUTF8))
self.replace_pushButton.setText(QtGui.QApplication.translate("Dialog", "Replace", None, QtGui.QApplication.UnicodeUTF8))
self.cancel_pushButton.setText(QtGui.QApplication.translate("Dialog", "Cancel", None, QtGui.QApplication.UnicodeUTF8))
|
|
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing Neutron Routers.
"""
import logging
from django.urls import reverse
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
class CreateForm(forms.SelfHandlingForm):
name = forms.CharField(max_length=255, label=_("Router Name"),
required=False)
admin_state_up = forms.BooleanField(label=_("Enable Admin State"),
initial=True,
required=False)
external_network = forms.ThemableChoiceField(label=_("External Network"),
required=False)
enable_snat = forms.BooleanField(label=_("Enable SNAT"),
initial=True,
required=False)
mode = forms.ThemableChoiceField(label=_("Router Type"))
ha = forms.ThemableChoiceField(label=_("High Availability Mode"))
az_hints = forms.MultipleChoiceField(
label=_("Availability Zone Hints"),
required=False,
help_text=_("Availability Zones where the router may be scheduled. "
"Leaving this unset is equivalent to selecting all "
"Availability Zones"))
failure_url = 'horizon:project:routers:index'
def __init__(self, request, *args, **kwargs):
super(CreateForm, self).__init__(request, *args, **kwargs)
self.dvr_allowed = api.neutron.get_feature_permission(self.request,
"dvr", "create")
if self.dvr_allowed:
mode_choices = [('server_default', _('Use Server Default')),
('centralized', _('Centralized')),
('distributed', _('Distributed'))]
self.fields['mode'].choices = mode_choices
else:
del self.fields['mode']
self.ha_allowed = api.neutron.get_feature_permission(self.request,
"l3-ha", "create")
if self.ha_allowed:
ha_choices = [('server_default', _('Use Server Default')),
('enabled', _('Enable HA mode')),
('disabled', _('Disable HA mode'))]
self.fields['ha'].choices = ha_choices
else:
del self.fields['ha']
networks = self._get_network_list(request)
if networks:
self.fields['external_network'].choices = networks
else:
del self.fields['external_network']
self.enable_snat_allowed = self.initial['enable_snat_allowed']
if (not networks or not self.enable_snat_allowed):
del self.fields['enable_snat']
try:
az_supported = api.neutron.is_extension_supported(
self.request, 'router_availability_zone')
if az_supported:
zones = api.neutron.list_availability_zones(
self.request, 'router', 'available')
self.fields['az_hints'].choices = [(zone['name'], zone['name'])
for zone in zones]
else:
del self.fields['az_hints']
except Exception:
msg = _("Failed to get availability zone list.")
exceptions.handle(self.request, msg)
del self.fields['az_hints']
def _get_network_list(self, request):
search_opts = {'router:external': True}
try:
networks = api.neutron.network_list(request, **search_opts)
except Exception as e:
LOG.info('Failed to get network list: %s', e)
msg = _('Failed to get network list.')
messages.warning(request, msg)
networks = []
choices = [(network.id, network.name or network.id)
for network in networks]
if choices:
choices.insert(0, ("", _("Select network")))
return choices
def handle(self, request, data):
try:
params = {'name': data['name'],
'admin_state_up': data['admin_state_up']}
# NOTE: admin form allows to specify tenant_id.
# We have the logic here to simplify the logic.
if 'tenant_id' in data and data['tenant_id']:
params['tenant_id'] = data['tenant_id']
if 'external_network' in data and data['external_network']:
params['external_gateway_info'] = {'network_id':
data['external_network']}
if self.enable_snat_allowed:
params['external_gateway_info']['enable_snat'] = \
data['enable_snat']
if 'az_hints' in data and data['az_hints']:
params['availability_zone_hints'] = data['az_hints']
if (self.dvr_allowed and data['mode'] != 'server_default'):
params['distributed'] = (data['mode'] == 'distributed')
if (self.ha_allowed and data['ha'] != 'server_default'):
params['ha'] = (data['ha'] == 'enabled')
router = api.neutron.router_create(request, **params)
message = (_('Router %s was successfully created.') %
router.name_or_id)
messages.success(request, message)
return router
except Exception as exc:
LOG.info('Failed to create router: %s', exc)
if exc.status_code == 409:
msg = _('Quota exceeded for resource router.')
else:
msg = _('Failed to create router "%s".') % data['name']
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
return False
class UpdateForm(forms.SelfHandlingForm):
name = forms.CharField(label=_("Name"), required=False)
admin_state = forms.BooleanField(label=_("Enable Admin State"),
required=False)
mode = forms.ThemableChoiceField(label=_("Router Type"))
ha = forms.BooleanField(label=_("High Availability Mode"), required=False)
redirect_url = reverse_lazy('horizon:project:routers:index')
def __init__(self, request, *args, **kwargs):
super(UpdateForm, self).__init__(request, *args, **kwargs)
self.dvr_allowed = api.neutron.get_feature_permission(self.request,
"dvr", "update")
if not self.dvr_allowed:
del self.fields['mode']
elif self.initial.get('mode') == 'distributed':
# Neutron supports only changing from centralized to
# distributed now.
mode_choices = [('distributed', _('Distributed'))]
self.fields['mode'].widget = forms.TextInput(attrs={'readonly':
'readonly'})
self.fields['mode'].choices = mode_choices
else:
mode_choices = [('centralized', _('Centralized')),
('distributed', _('Distributed'))]
self.fields['mode'].choices = mode_choices
# TODO(amotoki): Due to Neutron Bug 1378525, Neutron disables
# PUT operation. It will be fixed in Kilo cycle.
# self.ha_allowed = api.neutron.get_feature_permission(
# self.request, "l3-ha", "update")
self.ha_allowed = False
if not self.ha_allowed:
del self.fields['ha']
def handle(self, request, data):
try:
params = {'admin_state_up': data['admin_state'],
'name': data['name']}
if self.dvr_allowed:
params['distributed'] = (data['mode'] == 'distributed')
if self.ha_allowed:
params['ha'] = data['ha']
router = api.neutron.router_update(request,
self.initial['router_id'],
**params)
msg = _('Router %s was successfully updated.') % router.name_or_id
messages.success(request, msg)
return router
except Exception as exc:
LOG.info('Failed to update router %(id)s: %(exc)s',
{'id': self.initial['router_id'], 'exc': exc})
msg = _('Failed to update router %s') % data['name']
exceptions.handle(request, msg, redirect=self.redirect_url)
|
|
import math
import warnings
import numpy as np
import pandas as pd
from .utils import _gca, is_classifier, is_regressor
class ModelXRay(object):
"""This class executes a model over a broad range of modified data points to analyze aspects of its performance.
For each point in the data set, and for every feature involved of the prediction of the model, a new set of data
points is created where the chosen feature is varied across its (empirical) range. These modified data points are
fed into the model to get a set of model predictions for each feature-data point combination.
It is desirable that the "data" object passed in be relatively large in size, since the algorithm will make
some heuristic choices based on the ranges of values it sees. We suggest using at least 100 data points and preferably
more than 500.
It returns a results object, which can then be passed to functions such as feature_effect_summary and
feature_dependence_plots to gain insight on the how the various features affect the target. The results
object can also be used directly by a user who wants to operate at a low-level.
Parameters
----------
model : A model object from sklearn or similar styled objects. The `predict` method will be used if it is
a regression model, while `predict_proba` will be used if it is a (binary) classification model. Multi-class
classifiers are not supported at this time.
data : A DataFrame possessing the sameucture that the model would take as an argument. These methods are designed
to be used on "test" data (i.e. data that was not used in the training of the model). However, there is nothing
structural to prevent it from being used on training data, and there may be some insight gained by doing so.
columns : a specific subset of columns to be used. Default is None, which means to use all available columns in *data*
resolution : how many different "grid points" to use for each feature. The algorithm will use only the unique values
it sees in *data* if there are fewer than *resolution* unique values. Otherwise it will use *resolution* linearly spaced
values ranging from the min observed value to the max observed value.
"""
def __init__(self, model, data, columns=None, resolution=100, normalize_loc=None, pred_col_name = None, pred_col_index=1):
self.model = model
self.data = data
self.pred_col_index = pred_col_index
if type(data) == pd.DataFrame:
if (pred_col_name != None) and (is_classifier(self.model)):
self.pred_col_index = np.where(self.model.classes_ == pred_col_name)[0][0]
self.pred_col_name = data.columns[self.pred_col_index]
self.data_values = data.values
else:
self.data_values = data
self.columns = columns
self.results = self._model_xray(columns, resolution, normalize_loc)
def _get_data_rows(self, row_nums):
if type(self.data) == pd.DataFrame:
return self.data.iloc[row_nums]
else:
return self.data[row_nums, :]
def _get_predictions(self, rows):
# Catch deprecated warnings from Predict call
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if is_classifier(self.model):
y_pred = self.model.predict_proba(rows)[:,self.pred_col_index]
else:
#print('off')
y_pred = self.model.predict(rows)
return y_pred
def gen_model_pred(self, row, col_idx, values):
rows = []
for val in values:
sim_row = row.copy()
sim_row[col_idx] = val
rows.append(sim_row)
# If the row is a Series, make it into a DF
if type(rows[0]) == pd.Series:
rows = pd.DataFrame(rows)
y_pred = self._get_predictions(rows)
return y_pred
def _model_xray(self, columns, resolution, normalize_loc):
'''This function executes a model over a broad range of conditions to analyze aspects of its performance.
For each point in the data set, and for every feature involved of the prediction of the model, a new set of data
points is created where the chosen feature is varied across its (empirical) range. These modified data points are
fed into the model to get a set of model predictions for each feature-data point combination.
It is desirable that the "data" object passed in be relatively large in size, since the algorithm will make
some heuristic choices based on the ranges of values it sees. We suggest using at least 100 data points and preferably
more than 500.
It returns a results object, which can then be passed to functions such as feature_effect_summary and
feature_dependence_plots to gain insight on the how the various features affect the target. The results
object can also be used directly by a user who wants to operate at a low-level.
Parameters
----------
model : A model object from sklearn or similar styled objects. The `predict` method will be used if it is
a regression model, while `predict_proba` will be used if it is a (binary) classification model. Multi-class
classifiers are not supported at this time.
data : A DataFrame possessing the same structure that the model would take as an argument. These methods are designed
to be used on "test" data (i.e. data that was not used in the training of the model). However, there is nothing
structural to prevent it from being used on training data, and there may be some insight gained by doing so.
columns : a specific subset of columns to be used. Default is None, which means to use all available columns in *data*
resolution : how many different "grid points" to use for each feature. The algorithm will use only the unique values
it sees in *data* if there are fewer than *resolution* unique values. Otherwise it will use *resolution* linearly spaced
values ranging from the min observed value to the max observed value.
Returns
-------
results : The "results" object is a dictionary where the keys are the feature names and the values are a 2-tuple. This
object is intended primarily to be passed to other functions to interact with and display the data. However, advanced
users may wish to understand and/or use the object directly.
The first element in the tuple is the set of different feature values that were substituted in for each data point. The
second element in the tuple is matrix where the number of rows is the number of data points and the number of columns
is the number of different feature values. The (i,j)th element of the matrix is the result of the model prediction when
data point i has the feature in question set to jth value.
'''
## Convert Pandas DataFrame to nparray explicitly to make life easier
#print('hello!!!')
## Determine the range of values to plot for the chosen column
if columns is None:
if type(self.data) == pd.DataFrame:
columns = self.data.columns
if type(self.data)==np.ndarray:
columns = range(len(self.data[0])) # Assuming a 2-D Dataset
else:
# Verify that columns is an iterable
try:
iterator = iter(columns)
except TypeError:
# not iterable
columns = [columns]
else:
# iterable
pass
# Build Column Index
column_nums = []
if type(self.data) == pd.DataFrame:
for column in columns:
try:
column_nums.append(self.data.columns.get_loc(column))
except KeyError:
## TODO
pass
else:
# Column Index and Column Names are the same
if type(columns[0]) == int:
column_nums = columns
else:
column_nums = range(len(columns))
# Use the Numpy array of data values to ease indexing by col. numbers
results = {}
num_pts = len(self.data_values)
for column_num, column_name in zip(column_nums, columns):
if (len(np.unique(self.data_values[:,column_num])) > resolution):
col_values = np.linspace(np.nanmin(self.data_values[:,column_num]),
np.nanmax(self.data_values[:,column_num]),resolution)
else:
col_values = np.sort(np.unique(self.data_values[:,column_num]))
## Define the empty data structure to output
out_matrix = np.zeros([num_pts,len(col_values)])
## Generate predictions
if type(self.data) == pd.DataFrame:
rows = self.data.iterrows()
else:
rows = enumerate(self.data)
for loop_idx, (row_idx, row) in enumerate(rows):
y_pred = self.gen_model_pred(row, column_num, col_values)
if normalize_loc=='start':
y_pred = y_pred - y_pred[0]
if normalize_loc=='end':
y_pred = y_pred - y_pred[-1]
if (type(normalize_loc)==int and normalize_loc>=0 and normalize_loc<resolution):
y_pred = y_pred - y_pred[normalize_loc]
out_matrix[loop_idx,:] = y_pred
results[column_name] = (col_values, out_matrix)
return results
def feature_effect_summary(self, kind="boxh", num_features=20, y_scaling = 'none', ax=None):
'''This function plots a comparison of the effects of different features in a complex predictive model.
In more complicated predictive models, the effect of an individual feature can be highly dependent on the values
of the other features. It could be that a feature has a large effect in one context but a negligible effect in another.
This visualization attempts to shed light on the range of possibilities of the effect of a feature, by giving a boxplot
showing the range of possibilities of the effect of a feature.
The features are ranked by their "median" effect across a range of data points, where the "effect"
is measured by the "peak to trough" distance that occurs as that feature varies across its possible range.
Parameters
----------
results : This is a results object from a call to model_xray. Ideally, the model_xray was given a reasonably large amount of data
so that we can empirically see a broad range of possibilities.
kind : Currently only 'boxh' (horizontal boxplot) is supported
ax : If desired, a particular axis on which to generate the plot can be passed to the function
num_features : This specifies the maximum number of features to include in the boxplot. The function chooses the most significant
features as measured by the median peak-to-trough effect size.
Returns
-------
'''
## Convert Pandas DataFrame to nparray explicitly to make life easier
columns = list(self.results.keys())
logit_func = lambda x: np.log(x/(1-x))
logit10_func = lambda x: np.log10(x/(1-x))
logit2_func = lambda x: np.log2(x/(1-x))
if y_scaling=='logit':
result_data = [importance_distribution_of_variable(logit_func(self.results[col_name][1])) for col_name in columns]
elif y_scaling=='logit10':
result_data = [importance_distribution_of_variable(logit10_func(self.results[col_name][1])) for col_name in columns]
elif y_scaling=='logit2':
result_data = [importance_distribution_of_variable(logit2_func(self.results[col_name][1])) for col_name in columns]
else:
result_data = [importance_distribution_of_variable(self.results[col_name][1]) for col_name in columns]
sortind = np.argsort([np.median(d) for d in result_data])
if num_features and num_features > 0:
num_features = min(num_features, len(columns))
else:
num_features = len(columns)
plot_data = [result_data[idx] for idx in sortind][-num_features:]
if ax is None:
ax = _gca()
fig = ax.get_figure()
fig.set_figwidth(10)
fig.set_figheight(max(6, int(math.ceil(num_features*0.5))))
ax.boxplot(plot_data, notch=0, sym='+', vert=0, whis=1.5)
ax.set_yticklabels([columns[idx] for idx in sortind][-num_features:]);
def feature_dependence_plots(self, y_scaling='none', show_base_points=True, pts_selected='sample',
columns = None, num_pts=5, figsize=None):
'''This function visualizes the effect of a single variable in models with complicated dependencies.
Given a dataset, it will select points in that dataset, and then change the select column across
different values to view the effect of the model prediction given that variable. These have been called
Individual Conditional Expectation plots (or ICE-plots), see Goldstein, Kapelner, Bleich,
Pitkin. Peeking Inside the Black Box: Visualizing Statistical Learning With Plots of Individual
Conditional Expectation. Journal of Computational and Graphical Statistics (March 2014)
'''
import matplotlib.pyplot as plt
if columns == None:
columns = sorted(list(self.results.keys()))
num_rows = len(self.results[columns[0]][1]) # Get number of sample rows
if (type(pts_selected)==str and pts_selected=='sample'):
row_indexes = np.random.choice(np.arange(num_rows), num_pts)
else:
row_indexes = pts_selected
if show_base_points:
base_rows = self._get_data_rows(row_indexes)
y_base_points = self._get_predictions(base_rows)
if y_scaling=='logit':
y_base_points = np.log(y_base_points/(1-y_base_points))
if y_scaling=='logit10':
y_base_points = np.log10(y_base_points/(1-y_base_points))
if y_scaling=='logit2':
y_base_points = np.log2(y_base_points/(1-y_base_points))
else:
y_base_points = None
n_cols = min(3, len(columns))
n_rows = int(math.ceil(len(columns) / n_cols))
figsize = (n_cols * 4, n_rows * 4)
fig, axes = plt.subplots(n_rows, n_cols, figsize=figsize)
for col_name, ax in zip(columns, axes.flatten()):
x = self.results[col_name][0]
y_values = self.results[col_name][1][row_indexes]
y_plot = y_values
if y_scaling=='logit':
y_plot = np.log(y_values/(1-y_values))
if y_scaling=='logit10':
y_plot = np.log10(y_values/(1-y_values))
if y_scaling=='logit2':
y_plot = np.log2(y_values/(1-y_values))
for y in y_plot:
ax.plot(x, y)
# Plot Base Points
if y_base_points is not None:
ax.scatter(base_rows[col_name], y_base_points)
ax.set_title(col_name[:30])
plt.tight_layout()
return row_indexes
def explain_prediction_difference(self, index_1, index_2, tol=.03, verbose=True, decimals=4):
'''Given the indices of two points in the "xray"-ed data set, this function gives an explanation
of the factors contributing to the difference in the predictions.
Starting with the first point given, the considers changing each feature from its current value to that
possessed by the second point. The function evaluates the target in both scenarios and determines the
feature value change that creates the biggest (absolute) change in the target. This change is selected
and the current point becomes the new point with the new feature value. This is repeated until the new
target value is within a factor of 1+tol of the second point.
'''
data_row_1 = self._get_data_rows(index_1)
data_row_2 = self._get_data_rows(index_2)
return explain_prediction_difference(self.model, data_row_1, data_row_2, tol, verbose, decimals, self.pred_col_index)
def importance_distribution_of_variable(model_result_array):
max_result_vec = np.array(list(map(np.max,model_result_array)))
min_result_vec = np.array(list(map(np.min,model_result_array)))
return max_result_vec - min_result_vec
def explain_prediction_difference(model, data_row_1, data_row_2, tol=.03, verbose=True, decimals = 4, pred_col_index=1):
'''Given a model and two single row data frames, this function gives an explanation
of the factors contributing to the difference in the predictions.
Starting with the first point given, the considers changing each feature from its current value to that
possessed by the second point. The function evaluates the target in both scenarios and determines the
feature value change that creates the biggest (absolute) change in the target. This change is selected
and the current point becomes the new point with the new feature value. This is repeated until the new
target value is within a factor of 1+tol of the second point.
'''
column_names = data_row_1.index
num_columns = len(column_names)
dr_1 = data_row_1.values.reshape(1,-1)
dr_2 = data_row_2.values.reshape(1,-1)
column_list = list(range(num_columns))
curr_pt = np.copy(dr_1)
if is_classifier(model):
val1 = model.predict_proba(dr_1)[0,pred_col_index]
val2 = model.predict_proba(dr_2)[0,pred_col_index]
else:
val1 = model.predict(dr_1)[0]
val2 = model.predict(dr_2)[0]
if verbose:
print(val1, val2)
print('Your initial point has a target value of {}'.format(np.round(val1,decimals=decimals)))
print('Your final point has a target value of {}'.format(np.round(val2,decimals=decimals)))
pt_list = [dr_1]
val_list = [val1]
curr_val = val1
final_val = val2
feat_list =[]
move_list = []
feat_val_change_list = []
#for num_steps in range(4):
while (((curr_val/final_val) >(1+tol)) or ((curr_val/final_val) <(1-tol))):
biggest_move = 0
best_column = -1
best_val = curr_val
for i in column_list:
test_pt = np.copy(curr_pt)
prev_feat_val = test_pt[0,i]
subst_val = dr_2[0,i]
test_pt[0,i] = subst_val
if is_classifier(model):
test_val = model.predict_proba(test_pt)[0,pred_col_index]
else:
test_val = model.predict(test_pt)[0]
move_size = (test_val - curr_val)
if(np.abs(move_size)>=np.abs(biggest_move)):
biggest_move = move_size
best_column = i
best_val = test_val
old_feat_val = prev_feat_val
new_feat_val = subst_val
subst_val = dr_2[0,best_column]
curr_pt[0,best_column] = subst_val
val_list.append(best_val)
curr_val = best_val
if verbose:
print('Changing {} from {} to {}'.format(column_names[best_column],np.round(old_feat_val,decimals=decimals),np.round(new_feat_val,decimals=decimals)))
print('\t\tchanges your target by {} to {}'.format(np.round(biggest_move,decimals=decimals), np.round(best_val,decimals=decimals)))
print('----------')
if not (((curr_val/final_val) >(1+tol)) or ((curr_val/final_val) <(1-tol))):
print('Tolerance of {} reached'.format(tol))
print('Current value of {} is within {}% of {}'.format(np.round(curr_val,decimals=decimals),(100*tol),np.round(final_val,decimals=decimals)))
feat_list.append(column_names[best_column])
column_list.remove(best_column)
move_list.append(biggest_move)
feat_val_change_list.append((old_feat_val, new_feat_val))
return feat_list, feat_val_change_list, move_list, val_list
def explain_prediction_difference_xgboost(model, data_row_1, data_row_2, tol=.03, verbose=True, decimals = 4, pred_col_index=1):
'''Given a model and two single row data frames, this function gives an explanation
of the factors contributing to the difference in the predictions.
Starting with the first point given, the considers changing each feature from its current value to that
possessed by the second point. The function evaluates the target in both scenarios and determines the
feature value change that creates the biggest (absolute) change in the target. This change is selected
and the current point becomes the new point with the new feature value. This is repeated until the new
target value is within a factor of 1+tol of the second point.
'''
column_names = data_row_1.columns
num_columns = len(column_names)
#dr_1 = data_row_1.values.reshape(1,-1)
#dr_2 = data_row_2.values.reshape(1,-1)
dr_1 = data_row_1
dr_2 = data_row_2
column_list = list(range(num_columns))
curr_pt = (dr_1).copy()
if is_classifier(model):
val1 = model.predict_proba(dr_1)[0,pred_col_index]
val2 = model.predict_proba(dr_2)[0,pred_col_index]
else:
val1 = model.predict(dr_1)[0]
val2 = model.predict(dr_2)[0]
if verbose:
print(val1, val2)
print('Your initial point has a target value of {}'.format(np.round(val1,decimals=decimals)))
print('Your final point has a target value of {}'.format(np.round(val2,decimals=decimals)))
pt_list = [dr_1]
val_list = [val1]
curr_val = val1
final_val = val2
feat_list =[]
move_list = []
feat_val_change_list = []
#for num_steps in range(4):
while (((curr_val/final_val) >(1+tol)) or ((curr_val/final_val) <(1-tol))):
biggest_move = 0
best_column = -1
best_val = curr_val
for i in column_list:
test_pt = (curr_pt).copy()
prev_feat_val = test_pt.iloc[0,i]
subst_val = dr_2.iloc[0,i]
test_pt.iloc[0,i] = subst_val
if is_classifier(model):
test_val = model.predict_proba(test_pt)[0,pred_col_index]
else:
test_val = model.predict(test_pt)[0]
move_size = (test_val - curr_val)
if(np.abs(move_size)>=np.abs(biggest_move)):
biggest_move = move_size
best_column = i
best_val = test_val
old_feat_val = prev_feat_val
new_feat_val = subst_val
subst_val = dr_2.iloc[0,best_column]
curr_pt.iloc[0,best_column] = subst_val
val_list.append(best_val)
curr_val = best_val
if verbose:
print('Changing {} from {} to {}'.format(column_names[best_column],np.round(old_feat_val,decimals=decimals),np.round(new_feat_val,decimals=decimals)))
print('\t\tchanges your target by {} to {}'.format(np.round(biggest_move,decimals=decimals), np.round(best_val,decimals=decimals)))
print('----------')
if not (((curr_val/final_val) >(1+tol)) or ((curr_val/final_val) <(1-tol))):
print('Tolerance of {} reached'.format(tol))
print('Current value of {} is within {}% of {}'.format(np.round(curr_val,decimals=decimals),(100*tol),np.round(final_val,decimals=decimals)))
feat_list.append(column_names[best_column])
column_list.remove(best_column)
move_list.append(biggest_move)
feat_val_change_list.append((old_feat_val, new_feat_val))
return feat_list, feat_val_change_list, move_list, val_list
|
|
#
# Test script for the curses module
#
# This script doesn't actually display anything very coherent. but it
# does call every method and function.
#
# Functions not tested: {def,reset}_{shell,prog}_mode, getch(), getstr(),
# getmouse(), ungetmouse(), init_color()
#
import curses, sys, tempfile
# Optionally test curses module. This currently requires that the
# 'curses' resource be given on the regrtest command line using the -u
# option. If not available, nothing after this line will be executed.
import test_support
test_support.requires('curses')
def window_funcs(stdscr):
"Test the methods of windows"
win = curses.newwin(10,10)
win = curses.newwin(5,5, 5,5)
win2 = curses.newwin(15,15, 5,5)
for meth in [stdscr.addch, stdscr.addstr]:
for args in [('a'), ('a', curses.A_BOLD),
(4,4, 'a'), (5,5, 'a', curses.A_BOLD)]:
apply(meth, args)
for meth in [stdscr.box, stdscr.clear, stdscr.clrtobot,
stdscr.clrtoeol, stdscr.cursyncup, stdscr.delch,
stdscr.deleteln, stdscr.erase, stdscr.getbegyx,
stdscr.getbkgd, stdscr.getkey, stdscr.getmaxyx,
stdscr.getparyx, stdscr.getyx, stdscr.inch,
stdscr.insertln, stdscr.instr, stdscr.is_wintouched,
win.noutrefresh, stdscr.redrawwin, stdscr.refresh,
stdscr.standout, stdscr.standend, stdscr.syncdown,
stdscr.syncup, stdscr.touchwin, stdscr.untouchwin]:
meth()
stdscr.addnstr('1234', 3)
stdscr.addnstr('1234', 3, curses.A_BOLD)
stdscr.addnstr(4,4, '1234', 3)
stdscr.addnstr(5,5, '1234', 3, curses.A_BOLD)
stdscr.attron(curses.A_BOLD)
stdscr.attroff(curses.A_BOLD)
stdscr.attrset(curses.A_BOLD)
stdscr.bkgd(' ')
stdscr.bkgd(' ', curses.A_REVERSE)
stdscr.bkgdset(' ')
stdscr.bkgdset(' ', curses.A_REVERSE)
win.border(65, 66, 67, 68,
69, 70, 71, 72)
win.border('|', '!', '-', '_',
'+', '\\', '#', '/')
try:
win.border(65, 66, 67, 68,
69, [], 71, 72)
except TypeError:
pass
else:
raise RuntimeError, "Expected win.border() to raise TypeError"
stdscr.clearok(1)
win4 = stdscr.derwin(2,2)
win4 = stdscr.derwin(1,1, 5,5)
win4.mvderwin(9,9)
stdscr.echochar('a')
stdscr.echochar('a', curses.A_BOLD)
stdscr.hline('-', 5)
stdscr.hline('-', 5, curses.A_BOLD)
stdscr.hline(1,1,'-', 5)
stdscr.hline(1,1,'-', 5, curses.A_BOLD)
stdscr.idcok(1)
stdscr.idlok(1)
stdscr.immedok(1)
stdscr.insch('c')
stdscr.insdelln(1)
stdscr.insnstr('abc', 3)
stdscr.insnstr('abc', 3, curses.A_BOLD)
stdscr.insnstr(5, 5, 'abc', 3)
stdscr.insnstr(5, 5, 'abc', 3, curses.A_BOLD)
stdscr.insstr('def')
stdscr.insstr('def', curses.A_BOLD)
stdscr.insstr(5, 5, 'def')
stdscr.insstr(5, 5, 'def', curses.A_BOLD)
stdscr.is_linetouched(0)
stdscr.keypad(1)
stdscr.leaveok(1)
stdscr.move(3,3)
win.mvwin(2,2)
stdscr.nodelay(1)
stdscr.notimeout(1)
win2.overlay(win)
win2.overwrite(win)
stdscr.redrawln(1,2)
stdscr.scrollok(1)
stdscr.scroll()
stdscr.scroll(2)
stdscr.scroll(-3)
stdscr.setscrreg(10,15)
win3 = stdscr.subwin(10,10)
win3 = stdscr.subwin(10,10, 5,5)
stdscr.syncok(1)
stdscr.timeout(5)
stdscr.touchline(5,5)
stdscr.touchline(5,5,0)
stdscr.vline('a', 3)
stdscr.vline('a', 3, curses.A_STANDOUT)
stdscr.vline(1,1, 'a', 3)
stdscr.vline(1,1, 'a', 3, curses.A_STANDOUT)
if hasattr(curses, 'resize'):
stdscr.resize()
if hasattr(curses, 'enclose'):
stdscr.enclose()
def module_funcs(stdscr):
"Test module-level functions"
for func in [curses.baudrate, curses.beep, curses.can_change_color,
curses.cbreak, curses.def_prog_mode, curses.doupdate,
curses.filter, curses.flash, curses.flushinp,
curses.has_colors, curses.has_ic, curses.has_il,
curses.isendwin, curses.killchar, curses.longname,
curses.nocbreak, curses.noecho, curses.nonl,
curses.noqiflush, curses.noraw,
curses.reset_prog_mode, curses.termattrs,
curses.termname, curses.erasechar, curses.getsyx]:
func()
# Functions that actually need arguments
curses.curs_set(1)
curses.delay_output(1)
curses.echo() ; curses.echo(1)
f = tempfile.TemporaryFile()
stdscr.putwin(f)
f.seek(0)
curses.getwin(f)
f.close()
curses.halfdelay(1)
curses.intrflush(1)
curses.meta(1)
curses.napms(100)
curses.newpad(50,50)
win = curses.newwin(5,5)
win = curses.newwin(5,5, 1,1)
curses.nl() ; curses.nl(1)
curses.putp('abc')
curses.qiflush()
curses.raw() ; curses.raw(1)
curses.setsyx(5,5)
curses.setupterm(fd=sys.__stdout__.fileno())
curses.tigetflag('hc')
curses.tigetnum('co')
curses.tigetstr('cr')
curses.tparm('cr')
curses.typeahead(sys.__stdin__.fileno())
curses.unctrl('a')
curses.ungetch('a')
curses.use_env(1)
# Functions only available on a few platforms
if curses.has_colors():
curses.start_color()
curses.init_pair(2, 1,1)
curses.color_content(1)
curses.color_pair(2)
curses.pair_content(curses.COLOR_PAIRS)
curses.pair_number(0)
if hasattr(curses, 'keyname'):
curses.keyname(13)
if hasattr(curses, 'has_key'):
curses.has_key(13)
if hasattr(curses, 'getmouse'):
curses.mousemask(curses.BUTTON1_PRESSED)
curses.mouseinterval(10)
def main(stdscr):
curses.savetty()
try:
module_funcs(stdscr)
window_funcs(stdscr)
finally:
curses.resetty()
if __name__ == '__main__':
curses.wrapper(main)
else:
try:
stdscr = curses.initscr()
main(stdscr)
finally:
curses.endwin()
|
|
"""Makes .caffemodel files readable for sklearn-theano"""
# Authors: Michael Eickenberg
# Kyle Kastner
# Erfan Noury
# Li Yao
# License: BSD 3 Clause
from __future__ import print_function
import os
import numpy as np
from collections import OrderedDict
import theano.tensor as T
from ...datasets import get_dataset_dir, download
from sklearn_theano.base import Convolution, Relu, LRN, Feedforward, ZeroPad
from sklearn_theano.base import CaffePool
import warnings
def _get_caffe_dir():
"""Function to find caffe installation. First checks for pycaffe. If not
present, checks for $CAFFE_DIR environment variable."""
try:
import caffe
from os.path import dirname
caffe_dir = dirname(dirname(dirname(caffe.__file__)))
except ImportError:
caffe_dir = os.environ.get("CAFFE_DIR", None)
return caffe_dir
def _compile_caffe_protobuf(caffe_proto=None,
proto_src_dir=None,
python_out_dir=None):
"""Compiles protocol buffer to python_out_dir"""
if caffe_proto is None:
caffe_dir = _get_caffe_dir()
if caffe_dir is None:
# No CAFFE_DIR found, neither could pycaffe be imported.
# Search for caffe.proto locally
caffe_dataset_dir = get_dataset_dir('caffe')
caffe_proto = os.path.join(caffe_dataset_dir, 'caffe.proto')
if os.path.exists(caffe_proto):
# Found caffe.proto, everything fine
pass
else:
print("Downloading caffe.proto")
url = ('https://raw.githubusercontent.com/'
'BVLC/caffe/master/src/caffe/proto/caffe.proto')
download(url, caffe_proto, progress_update_percentage=1)
# raise ValueError("Cannot find $CAFFE_DIR environment variable"
# " specifying location of Caffe files."
# " Nor does there seem to be pycaffe. Please"
# " provide path to caffe.proto file in the"
# " caffe_proto kwarg, e.g. "
# "/home/user/caffe/src/caffe/proto/caffe.proto")
else:
caffe_proto = os.path.join(caffe_dir, "src", "caffe", "proto",
"caffe.proto")
if not os.path.exists(caffe_proto):
raise ValueError(
("Could not find {pf}. Please specify the correct"
" caffe.proto file in the caffe_proto kwarg"
" e.g. /home/user/caffe/src/caffe/proto/caffe.proto").format(
pf=caffe_proto))
if proto_src_dir is None:
proto_src_dir = os.path.dirname(caffe_proto)
if python_out_dir is None:
python_out_dir = os.path.dirname(os.path.abspath(__file__))
protoc_command = ("protoc -I={srcdir}"
" --python_out={outdir} {protofile}").format(
srcdir=proto_src_dir, outdir=python_out_dir, protofile=caffe_proto)
import commands
status, output = commands.getstatusoutput(protoc_command)
if status != 0:
raise Exception(
"Error executing protoc: code {c}, message {m}".format(
c=status, m=output))
def _get_caffe_pb2():
import sys
this_file_path = os.path.realpath(__file__)
google_dir = str(os.sep).join(
this_file_path.split(os.sep)[:-3] + ['externals'])
sys.path.append(google_dir)
from ...models.bvlc_googlenet import caffe_pb2
sys.path.remove(google_dir)
return caffe_pb2
def _open_caffe_model(caffemodel_file):
"""Opens binary format .caffemodel files. Returns protobuf object."""
caffe_pb2 = _get_caffe_pb2()
f = open(caffemodel_file, 'rb')
binary_content = f.read()
protobuf = caffe_pb2.NetParameter()
protobuf.ParseFromString(binary_content)
return protobuf
def _blob_to_ndarray(blob):
"""Converts a caffe protobuf blob into an ndarray"""
dimnames = ["num", "channels", "height", "width"]
data = np.array(blob.data)
shape = tuple([getattr(blob, dimname) for dimname in dimnames])
return data.reshape(shape)
LAYER_PROPERTIES = dict(
DATA=None,
CONVOLUTION=('blobs',
('convolution_param', 'stride'),
('convolution_param', 'stride_h'),
('convolution_param', 'stride_w'),
('convolution_param', 'pad'),
('convolution_param', 'pad_h'),
('convolution_param', 'pad_w')),
RELU=None,
POOLING=(('pooling_param', 'kernel_size'),
('pooling_param', 'kernel_h'),
('pooling_param', 'kernel_w'),
('pooling_param', 'stride'),
('pooling_param', 'stride_h'),
('pooling_param', 'stride_w'),
('pooling_param', 'pad'),
('pooling_param', 'pad_h'),
('pooling_param', 'pad_w'),
('pooling_param', 'pool')
),
SPLIT=None,
LRN=(('lrn_param', 'local_size'),
('lrn_param', 'alpha'),
('lrn_param', 'beta'),
('lrn_param', 'norm_region')),
CONCAT=(('concat_param', 'concat_dim'),),
INNER_PRODUCT=('blobs',),
SOFTMAX_LOSS=None,
SOFTMAX=None,
DROPOUT=None
)
def _get_property(obj, property_path):
if isinstance(property_path, tuple):
if len(property_path) == 1:
return getattr(obj, property_path[0])
else:
return _get_property(getattr(obj, property_path[0]),
property_path[1:])
else:
return getattr(obj, property_path)
def _parse_caffe_model(caffe_model):
warnings.warn("Caching parse for caffemodel, this may take some time")
caffe_pb2 = _get_caffe_pb2() # need to remove this dependence on pb here
try:
_layer_types = caffe_pb2.LayerParameter.LayerType.items()
except AttributeError:
_layer_types = caffe_pb2.V1LayerParameter.LayerType.items()
# create a dictionary that indexes both ways, number->name, name->number
layer_types = dict(_layer_types)
for v, k in _layer_types:
layer_types[k] = v
if not hasattr(caffe_model, "layers"):
# Consider it a filename
caffe_model = _open_caffe_model(caffe_model)
layers_raw = caffe_model.layers
parsed = []
for n, layer in enumerate(layers_raw):
# standard properties
ltype = layer_types[layer.type]
if n == 0 and ltype != 'DATA':
warnings.warn("Caffemodel doesn't start with DATA - adding")
first_layer_descriptor = dict(
type='DATA',
name='data',
top_blobs=('data',),
bottom_blobs=tuple())
parsed.append(first_layer_descriptor)
layer_descriptor = dict(type=ltype,
name=layer.name,
top_blobs=tuple(layer.top),
bottom_blobs=tuple(layer.bottom))
parsed.append(layer_descriptor)
# specific properties
specifics = LAYER_PROPERTIES[ltype]
if specifics is None:
continue
for param in specifics:
if param == 'blobs':
layer_descriptor['blobs'] = list(map(_blob_to_ndarray,
layer.blobs))
else:
param_name = '__'.join(param)
param_value = _get_property(layer, param)
layer_descriptor[param_name] = param_value
return parsed
def parse_caffe_model(caffe_model, convert_fc_to_conv=True,
float_dtype='float32', verbose=0):
if isinstance(caffe_model, str) or not isinstance(caffe_model, list):
parsed_caffe_model = _parse_caffe_model(caffe_model)
else:
parsed_caffe_model = caffe_model
layers = OrderedDict()
inputs = OrderedDict()
blobs = OrderedDict()
params = OrderedDict()
for i, layer in enumerate(parsed_caffe_model):
layer_type = layer['type']
layer_name = layer['name']
top_blobs = layer['top_blobs']
bottom_blobs = layer['bottom_blobs']
layer_blobs = layer.get('blobs', None)
if verbose > 0:
print("%d\t%s\t%s" % (i, layer_type, layer_name))
if layer_type == 'DATA':
# DATA layers contain input data in top_blobs, create input
# variables, float for 'data' and int for 'label'
for data_blob_name in top_blobs:
if data_blob_name == 'label':
blobs['label'] = T.ivector()
inputs['label'] = blobs['label']
else:
blobs[data_blob_name] = T.tensor4(dtype=float_dtype)
inputs[data_blob_name] = blobs[data_blob_name]
elif layer_type == 'CONVOLUTION':
# CONVOLUTION layers take input from bottom_blob, convolve with
# layer_blobs[0], and add bias layer_blobs[1]
stride = layer['convolution_param__stride']
stride_h = max(layer['convolution_param__stride_h'], stride)
stride_w = max(layer['convolution_param__stride_w'], stride)
if stride_h > 1 or stride_w > 1:
subsample = (stride_h, stride_w)
else:
subsample = None
pad = layer['convolution_param__pad']
pad_h = max(layer['convolution_param__pad_h'], pad)
pad_w = max(layer['convolution_param__pad_w'], pad)
conv_filter = layer_blobs[0].astype(float_dtype)[..., ::-1, ::-1]
conv_bias = layer_blobs[1].astype(float_dtype).ravel()
convolution_input = blobs[bottom_blobs[0]]
convolution = Convolution(conv_filter, biases=conv_bias,
activation=None, subsample=subsample,
input_dtype=float_dtype)
# If padding is specified, need to pad. In practice, I think
# caffe prevents padding that would make the filter see only
# zeros, so technically this can also be obtained by sensibly
# cropping a border_mode=full convolution. However, subsampling
# may then be off by 1 and would have to be done separately :/
if pad_h > 0 or pad_w > 0:
zp = ZeroPad((pad_h, pad_w))
zp._build_expression(convolution_input)
expression = zp.expression_
layers[layer_name] = (zp, convolution)
else:
layers[layer_name] = convolution
expression = convolution_input
convolution._build_expression(expression)
expression = convolution.expression_
# if subsample is not None:
# expression = expression[:, :, ::subsample[0],
# ::subsample[1]]
blobs[top_blobs[0]] = expression
params[layer_name + '_conv_W'] = convolution.convolution_filter_
params[layer_name + '_conv_b'] = convolution.biases_
elif layer_type == "RELU":
# RELU layers take input from bottom_blobs, set everything
# negative to zero and write the result to top_blobs
relu_input = blobs[bottom_blobs[0]]
relu = Relu()
relu._build_expression(relu_input)
layers[layer_name] = relu
blobs[top_blobs[0]] = relu.expression_
elif layer_type == "POOLING":
# POOLING layers take input from bottom_blobs, perform max
# pooling according to stride and kernel size information
# and write the result to top_blobs
pooling_input = blobs[bottom_blobs[0]]
kernel_size = layer['pooling_param__kernel_size']
kernel_h = max(layer['pooling_param__kernel_h'], kernel_size)
kernel_w = max(layer['pooling_param__kernel_w'], kernel_size)
stride = layer['pooling_param__stride']
stride_h = max(layer['pooling_param__stride_h'], stride)
stride_w = max(layer['pooling_param__stride_w'], stride)
pad = layer['pooling_param__pad']
pad_h = max(layer['pooling_param__pad_h'], pad)
pad_w = max(layer['pooling_param__pad_w'], pad)
pool_types = {0: 'max', 1: 'avg'}
pool_type = pool_types[layer['pooling_param__pool']]
# print "POOL TYPE is %s" % pool_type
# pooling = FancyMaxPool((kernel_h, kernel_w),
# (stride_h, stride_w),
# ignore_border=False)
pooling = CaffePool((kernel_h, kernel_w),
(stride_h, stride_w),
(pad_h, pad_w),
pool_type=pool_type)
pooling._build_expression(pooling_input)
layers[layer_name] = pooling
blobs[top_blobs[0]] = pooling.expression_
elif layer_type == "DROPOUT":
# DROPOUT may figure in some networks, but it is only relevant
# at the learning stage, not at the prediction stage.
pass
elif layer_type in ["SOFTMAX_LOSS", "SOFTMAX"]:
softmax_input = blobs[bottom_blobs[0]]
# have to write our own softmax expression, because of shape
# issues
si = softmax_input.reshape((softmax_input.shape[0],
softmax_input.shape[1], -1))
shp = (si.shape[0], 1, si.shape[2])
exp = T.exp(si - si.max(axis=1).reshape(shp))
softmax_expression = (exp / exp.sum(axis=1).reshape(shp)
).reshape(softmax_input.shape)
layers[layer_name] = "SOFTMAX"
blobs[top_blobs[0]] = softmax_expression
elif layer_type == "SPLIT":
split_input = blobs[bottom_blobs[0]]
for top_blob in top_blobs:
blobs[top_blob] = split_input
# Should probably make a class to be able to add to layers
layers[layer_name] = "SPLIT"
elif layer_type == "LRN":
# Local normalization layer
lrn_input = blobs[bottom_blobs[0]]
lrn_factor = layer['lrn_param__alpha']
lrn_exponent = layer['lrn_param__beta']
axis = {0: 'channels'}[layer['lrn_param__norm_region']]
nsize = layer['lrn_param__local_size']
lrn = LRN(nsize, lrn_factor, lrn_exponent, axis=axis)
lrn._build_expression(lrn_input)
layers[layer_name] = lrn
blobs[top_blobs[0]] = lrn.expression_
elif layer_type == "CONCAT":
input_expressions = [blobs[bottom_blob] for bottom_blob
in bottom_blobs]
axis = layer['concat_param__concat_dim']
output_expression = T.concatenate(input_expressions, axis=axis)
blobs[top_blobs[0]] = output_expression
layers[layer_name] = "CONCAT"
elif layer_type == "INNER_PRODUCT":
weights = layer_blobs[0].astype(float_dtype)
biases = layer_blobs[1].astype(float_dtype).squeeze()
fully_connected_input = blobs[bottom_blobs[0]]
if not convert_fc_to_conv:
if fully_connected_input.ndim == 4:
m_, t_, x_, y_ = fully_connected_input.shape
fully_connected_input = fully_connected_input.reshape(
(m_, t_ * x_ * y_))
fc_layer = Feedforward(weights.squeeze().T, biases,
activation=None)
params[layer_name + '_fc_W'] = fc_layer.weights
if fc_layer.biases is not None:
params[layer_name + '_fc_b'] = fc_layer.biases
else:
fc_layer = Convolution(weights.transpose((2, 3, 0, 1)), biases,
activation=None)
params[layer_name + '_conv_W'] = convolution.convolution_filter_
params[layer_name + '_conv_b'] = convolution.biases_
fc_layer._build_expression(fully_connected_input)
layers[layer_name] = fc_layer
blobs[top_blobs[0]] = fc_layer.expression_
else:
raise ValueError('layer type %s is not known to sklearn-theano'
% layer_type)
return layers, blobs, inputs, params
|
|
# -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; tab-width: 4; -*-
# vim: set shiftwidth=4 softtabstop=4 expandtab:
"""Support for reading meta-data and data NetCDF files, primarily
time-series data.
2014 Copyright University Corporation for Atmospheric Research
This file is part of the "django-ncharts" package.
The license and distribution terms for this file may be found in the
file LICENSE in this package.
"""
import os, sys, time
import netCDF4
from datetime import datetime
import pytz
import numpy as np
import logging
import threading
import operator
import hashlib
from functools import reduce as reduce_
from ncharts import exceptions as nc_exc
from ncharts import fileset as nc_fileset
# __name__ is ncharts.netcdf
_logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def get_file_modtime(path):
""" Utility to get the modification time of a file. """
try:
pstat = os.stat(path)
except (FileNotFoundError, PermissionError) as exc:
_logger.error(exc)
raise
return datetime.fromtimestamp(
pstat.st_mtime, tz=pytz.utc)
class NetCDFDataset(object):
"""A dataset consisting of NetCDF files, within a period of time.
This is similar to netCDF4.MFDataset, but gets around some of its
limitations.
Supports reading a list of time-series variables from a
collection of files concatenating the results over the
time-dimension of the variables. If a variable is missing in
a file, the values for those times will be NaN filled.
Also handles other situations that may arise, such as if the
non-time dimensions for a variable change from file to file, in
which case the result will have the largest non-time dimensions,
with the extra values filled in.
Also attempts to handle the situation when the type of a variable
is not consistant over the collection of files.
Attributes:
path: directory path and file name format
fileset: The nc_fileset.FileSet encapsulating a set of files.
start_time: start time of the dataset
end_time: end time of the dataset
cache_hash: a hash string, created from the path, start_time
and end_time. The cache of NetCDF attributes is
stored in the class under this hash code.
These attributes of NetCDFDataset are cached:
variables: Dict of dicts for all time-series variables found in
dataset by their "exported" variable name:
{ 'shape': tuple of integer dimensions of the variable,
'dimnames': tuple of str dimension names for variable,
'units': str value of "units" attribute if found,
'long_name': str value of "long_name" attribute if found,
'dtype': numpy.dtype of the variable
}.
base_time: str name of base_time variable if found in the dataset.
time_dim: str name of the time dimension in this dataset.
time_name: str name of time variable.
nstations: int length of NetCDF "station" dimension in this dataset,
if found.
station_dim: str name of NetCDF "station" dimension, currently always
"station".
station_names: If a NetCDF character variable called "station"
is found, a list of str values of the variable.
"""
# pylint thinks this class is too big.
# pylint: disable=too-many-instance-attributes
MAX_NUM_FILES_TO_PRESCAN = 50
__cache_lock = threading.Lock()
# dictionary of attributes of a NetCDFDataset.
__cached_dataset_info = {}
def __init__(self, path, start_time, end_time):
"""Constructs NetCDFDataset with a path to a filesetFileset.
Raises:
none
"""
self.path = path
self.fileset = nc_fileset.Fileset.get(path)
self.start_time = start_time
self.end_time = end_time
hasher = hashlib.md5()
hasher.update(bytes(path, 'utf-8'))
hasher.update(bytes(str(start_time), 'utf-8'))
hasher.update(bytes(str(end_time), 'utf-8'))
self.cache_hash = hasher.digest()
def get_dataset_info(self):
"""Fetch a copy of the cache of info for this dataset.
"""
with NetCDFDataset.__cache_lock:
if self.cache_hash in NetCDFDataset.__cached_dataset_info:
return NetCDFDataset.__cached_dataset_info[self.cache_hash].copy()
dsinfo = {
'file_mod_times': {},
'base_time': None,
'time_dim_name': None,
'time_name': None,
'nstations': None,
'station_dim': None,
'station_names': None,
'variables': {},
}
return dsinfo
def save_dataset_info(self, dsinfo):
"""Save a copy of info for this dataset.
"""
with NetCDFDataset.__cache_lock:
NetCDFDataset.__cached_dataset_info[self.cache_hash] = dsinfo
def __str__(self):
return "NetCDFDataset, path=" + str(self.path)
def get_files(
self,
start_time=pytz.utc.localize(datetime.min),
end_time=pytz.utc.localize(datetime.max)):
"""Return the fileset.File objects matching a time period.
Args:
start_time: datetime.datetime of start of fileset scan.
end_time: end of fileset scan.
Returns:
List of file path names matching the time period.
Raises:
FileNotFoundError, PermissionError
"""
return self.fileset.scan(start_time, end_time)
def get_filepaths(
self,
start_time=pytz.utc.localize(datetime.min),
end_time=pytz.utc.localize(datetime.max)):
"""Return the file path names matching the time period.
Args:
start_time: datetime.datetime of start of fileset scan.
end_time: end of fileset scan.
Returns:
List of file path names matching the time period.
Raises:
FileNotFoundError, PermissionError
"""
return [f.path for f in self.get_files(start_time, end_time)]
def get_variables(
self,
time_names=('time', 'Time', 'time_offset')):
""" Scan the set of files for time series variables, returning a dict
for information about the variables.
The names of the variables in the dataset are converted to an exported
form. If a variable has a 'short_name' attribute, it is used for the
variable name, otherwise the exported name is set to the NetCDF variable
name.
Note, we don't read every file. May want to have
MAX_NUM_FILES_TO_PRESCAN be an attribute of the dataset.
Even better, would be nice to know that one only
needs to read a reduced set of files, perhaps just one!
Args:
time_names: List of allowed names for time variable.
Returns:
A dict of variables, keyed by the exported variable name.
Each value is a dict, containing the following keys:
shape: tuple containing the shape of the variable
dimnames: list of dimension names
dtype: NetCDF data type
time_index: index of the time dimension
units: units attribute of the NetCDF variable
long_name: long_name attribute of the NetCDF variable
Raises:
nc_exc.NoDataFoundException
"""
dsinfo = self.get_dataset_info()
# Note: dsinfo_vars is a reference. Modificatons to it
# are also modifications to dsinfo.
dsinfo_vars = dsinfo['variables']
files = self.get_files(
start_time=self.start_time,
end_time=self.end_time)
# typically get_files() also returns the file before start_time
# We may want that in reading a period of data, but not
# in assembling the variables for the dataset
filepaths = [f.path for f in files if f.time >= self.start_time and f.time < self.end_time]
skip = 1
if len(filepaths) > NetCDFDataset.MAX_NUM_FILES_TO_PRESCAN:
skip = len(filepaths) / NetCDFDataset.MAX_NUM_FILES_TO_PRESCAN
# Read at most MAX_NUM_FILES_TO_PRESCAN, including latest file.
# Files are scanned in a backwards sequence
pindex = len(filepaths) - 1
n_files_read = 0
while pindex >= 0:
ncpath = filepaths[int(pindex)]
pindex -= skip
# The files might be in the process of being moved, deleted, etc,
# so if we get an exception in this open, try a few more times.
# Testing indicates that with a truncated file (artificially
# truncated with dd), the underlying C code will cause a crash
# of python from an assert() rather than raising an exception
# that could be caught.
# If the netcdf library is compiled with -DNDEBUG, then the
# the open and parse of the truncated header succeeds, but
# still no exception.
# If the file is artificially corrupted by removing an
# initial portion of the file:
# dd if=test.nc of=bad.nc bs=1014 count=100 skip=1
# then an exception is raised (this was with -DNDEBUG):
# RuntimeError bad.nc: NetCDF: Unknown file format
# To make this robust, it would be good to run a king's
# taster process on each file first to reduce the possibility
# of a server death. The king's taster would not use NDEBUG,
# but perhaps the python server would. Complicated.
fileok = False
skip_file = False
exc = None
for itry in range(0, 3):
try:
curr_mod_time = get_file_modtime(ncpath)
if ncpath in dsinfo['file_mod_times']:
prev_mod_time = dsinfo['file_mod_times'][ncpath]
if curr_mod_time <= prev_mod_time:
skip_file = True
fileok = True
break
dsinfo['file_mod_times'][ncpath] = curr_mod_time
# _logger.debug("ncpath=%s",ncpath)
ncfile = netCDF4.Dataset(ncpath)
fileok = True
break
except (OSError, RuntimeError) as exc:
time.sleep(itry)
if not fileok:
_logger.error("%s: %s", ncpath, exc)
continue
n_files_read += 1
if skip_file:
continue
try:
if not dsinfo['base_time'] and 'base_time' in ncfile.variables:
dsinfo['base_time'] = 'base_time'
tdim = None
# look for a time dimension
for tname in ['time', 'Time']:
if tname in ncfile.dimensions:
tdim = ncfile.dimensions[tname]
break
if not tdim:
continue
# check for tdim.is_unlimited?
if not dsinfo['time_dim_name']:
dsinfo['time_dim_name'] = tdim.name
if 'station' in ncfile.dimensions:
if not dsinfo['nstations']:
dsinfo['nstations'] = len(ncfile.dimensions["station"])
dsinfo['station_dim'] = "station"
elif not dsinfo['nstations'] == \
len(ncfile.dimensions["station"]):
_logger.warning(
"%s: station dimension (%d) is "
"different than that of other files (%d)",
ncpath,
len(ncfile.dimensions["station"]),
dsinfo['nstations'])
if not dsinfo['station_names'] and 'station' in ncfile.variables:
var = ncfile.variables["station"]
if var.datatype == np.dtype('S1'):
dsinfo['station_names'] = \
[str(netCDF4.chartostring(v)) for v in var]
# look for a time variable
if not dsinfo['time_name']:
for tname in time_names:
if tname in ncfile.variables:
if tdim.name in ncfile.variables[tname].dimensions:
dsinfo['time_name'] = tname
break
if not dsinfo['time_name'] or \
not dsinfo['time_name'] in ncfile.variables:
# time variable not yet found or not in this file
continue
if not tdim.name in ncfile.variables[dsinfo['time_name']].dimensions:
# time variable in this file doesn't have a time dimension
continue
# pylint: disable=no-member
for (nc_vname, var) in ncfile.variables.items():
# looking for time series variables
if not dsinfo['time_dim_name'] in var.dimensions:
continue
# time variable
if nc_vname == dsinfo['time_name']:
continue
# exported variable name
if hasattr(var, 'short_name'):
exp_vname = getattr(var, 'short_name')
else:
exp_vname = nc_vname
# var.dimensions is a tuple of dimension names
time_index = var.dimensions.index(dsinfo['time_dim_name'])
# Check if we have found this variable in a earlier file
if not exp_vname in dsinfo_vars:
dsinfo_vars[exp_vname] = {}
dsinfo_vars[exp_vname]['netcdf_name'] = nc_vname
dsinfo_vars[exp_vname]['shape'] = var.shape
dsinfo_vars[
exp_vname]['dimnames'] = var.dimensions
dsinfo_vars[exp_vname]['dtype'] = var.dtype
dsinfo_vars[exp_vname]['time_index'] = time_index
# Grab certain attributes
for att in ['units', 'long_name']:
if hasattr(var, att):
dsinfo_vars[exp_vname][att] = getattr(var, att)
# Set default units to ''
if not 'units' in dsinfo_vars[exp_vname]:
dsinfo_vars[exp_vname]['units'] = ''
continue
# variable has been found in an earlier ncfile
# check for consistancy across files
if dsinfo_vars[exp_vname]['shape'][1:] != var.shape[1:]:
# the above check works even if either shape
# has length 1
if len(dsinfo_vars[exp_vname]['shape']) != \
len(var.shape):
# changing number of dimensions, punt
_logger.error(
"%s: %s: number of "
"dimensions: %d and %d changes. "
"Skipping this variable.",
ncpath, nc_vname, len(var.shape),
len(dsinfo_vars[exp_vname]['shape']))
del dsinfo_vars[exp_vname]
continue
# here we know that shapes have same length and
# they must have len > 1. Allow final dimension
# to change.
ndim = len(var.shape)
if (dsinfo_vars[exp_vname]['shape'][1:(ndim-1)] !=
var.shape[1:(ndim-1)]):
_logger.error(
"%s: %s: incompatible shapes: "
"%s and %s. Skipping this variable.",
ncpath, nc_vname, repr(var.shape),
repr(dsinfo_vars[exp_vname]['shape']))
del dsinfo_vars[exp_vname]
continue
# set shape to max shape (leaving the problem
# for later...)
dsinfo_vars[exp_vname]['shape'] = tuple(
[max(i, j) for (i, j) in zip(
dsinfo_vars[exp_vname]['shape'], var.shape)])
if dsinfo_vars[exp_vname]['dtype'] != var.dtype:
_logger.error(
"%s: %s: type=%s is different than "
"in other files",
ncpath, nc_vname, repr(var.dtype))
if dsinfo_vars[exp_vname]['time_index'] != time_index:
_logger.error(
"%s: %s: time_index=%d is different than "
"in other files. Skipping this variable.",
ncpath, nc_vname, time_index)
del dsinfo_vars[exp_vname]
for att in ['units', 'long_name']:
if hasattr(var, att) and att in dsinfo_vars[exp_vname]:
if getattr(var, att) != dsinfo_vars[exp_vname][att]:
_logger.info(
"%s: %s: %s=%s is different than previous value=%s",
ncpath, nc_vname, att, getattr(var, att),
dsinfo_vars[exp_vname][att])
dsinfo_vars[exp_vname][att] = getattr(var, att)
finally:
ncfile.close()
if not n_files_read:
msg = "No variables found"
raise nc_exc.NoDataFoundException(msg)
# cache dsinfo
dsvars = dsinfo_vars.copy()
self.save_dataset_info(dsinfo)
return dsvars
def resolve_variable_shapes(self, variables, selectdim):
"""Determine the shape of variables in this dataset.
Args:
variables: List of variable names.
selectdim: A dict containing by dimension name,
the indices of the dimension to be read.
For example: {"station":[3,4,5]} to read indices 3,4 and 5
(indexed from 0) of the station dimension for variables
which have that dimension. A index of -1 indicates that
variables which don't have the dimension are still to be read.
Returns:
Dict of resultant variable shapes, which may be different
than the non-time dimensions of the variable in a file if
the user has specified selectdim to sub-select over a dimension.
"""
dsinfo = self.get_dataset_info()
if len(dsinfo['variables']) == 0:
self.get_variables()
dsinfo = self.get_dataset_info()
dsinfo_vars = dsinfo['variables']
vshapes = {}
for exp_vname in variables:
if exp_vname in dsinfo_vars:
# maximum shape of this variable in all files
vshape = list(dsinfo_vars[exp_vname]["shape"])
time_index = dsinfo_vars[exp_vname]["time_index"]
vdims = dsinfo_vars[exp_vname]["dimnames"]
dmatch = True
for dim in selectdim:
# some dimensions selected
if not dim in vdims:
# This variable does not have the selected dimension
# If all selected indices for the dimension are >= 0
# then don't return any values for this variable.
# -1 for a selected dimension means return values
# for the variable even if it doesn't have the dimension
try:
if all(i >= 0 for i in selectdim[dim]):
dmatch = False
except TypeError: # not iterable
if selectdim[dim] >= 0:
dmatch = False
if not dmatch:
continue
# determine selected shape for variable
for idim, dim in enumerate(vdims):
if dim == dsinfo['time_dim_name']:
pass
elif dim == "sample":
# high rate files with a sample dimension
# Add support for this eventually. For now
# just grab first value
vshape[idim] = 1
elif dim in selectdim:
# variable has a selected dimension
try:
if not all(i < 0 for i in selectdim[dim]):
idx = [i for i in selectdim[dim] if i >= 0]
vshape[idim] = len(idx)
except TypeError: # not iterable
if selectdim[dim] >= 0:
vshape[idim] = 1
# remove non-time shape values of 1
vshape = [dim for (idim, dim) in enumerate(vshape) \
if idim != time_index or dim > 1]
vshapes[exp_vname] = vshape
return vshapes
def read_times(self, ncfile, ncpath, start_time, end_time, times,
size_limit):
"""Read values of the time variable from a NetCDF dataset.
Args:
ncfile: An opened netCFD4.Dataset.
ncpath: Path to the dataset. netCDF4.Dataset.filepath() is only
supported in netcdf version >= 4.1.2.
start_time: A datetime.datetme. Times greater than or equal
to start_time are read.
end_time: A datetime.datetme. Times less than end_time are read.
times: A list of UTC timestamps, the times read are
appended to this list.
total_size: Add the total size of times read to this value.
size_limit: Raise an exception if the total_size exceeds size_limit.
Returns:
A built-in slice object, giving the start and stop indices of the
requested time period in the file. The times list argument is
also extended with the times read from the file.
Raises:
TODO: what exceptions can be raised when slicing a netcdf4 variable?
nc_exc.TooMuchDataException
"""
debug = False
dsinfo = self.get_dataset_info()
base_time = None
if dsinfo['base_time'] and \
dsinfo['base_time'] in ncfile.variables and \
len(ncfile.variables[dsinfo['base_time']].dimensions) == 0:
base_time = ncfile.variables[dsinfo['base_time']].getValue()
# _logger.debug("base_time=%d",base_time)
if dsinfo['time_name'] in ncfile.variables:
var = ncfile.variables[dsinfo['time_name']]
if len(var) == 0:
return slice(0)
if hasattr(var, "units") and 'since' in var.units:
try:
# times from netCDF4.num2date are timezone naive.
# Use replace(tzinfo=pytz.UTC) to assign a timezone.
tvals = [
d.replace(tzinfo=pytz.UTC).timestamp() for d in
netCDF4.num2date(var[:], var.units, 'standard')]
except IndexError as exc:
# most likely has a dimension of 0
_logger.error(
"%s: %s: cannot index variable %s",
os.path.split(ncpath)[1],
exc, dsinfo['time_name'])
return slice(0)
except TypeError:
if base_time:
_logger.warning(
"%s: %s: cannot parse units: %s. "
"Using base_time instead",
os.path.split(ncpath)[1],
dsinfo['time_name'], var.units)
tvals = [base_time + val for val in var[:]]
else:
_logger.error(
"%s: %s: cannot parse units: %s",
os.path.split(ncpath)[1],
dsinfo['time_name'], var.units)
tvals = [val for val in var[:]]
else:
try:
tvals = [base_time + val for val in var[:]]
except IndexError as exc:
# most likely has a dimension of 0
_logger.error(
"%s: %s: cannot index variable %s",
os.path.split(ncpath)[1],
exc, dsinfo['time_name'])
return slice(0)
# pylint: disable=pointless-string-statement
"""
tvals = [
d.timestamp() for d in
netCDF4.num2date(var[:], var.units, 'standard')]
"""
if len(tvals) == 0:
return slice(0)
try:
istart = next(idx for idx, tval in enumerate(tvals) \
if tval >= start_time.timestamp())
# _logger.debug("start_time=%s, file=%s,istart=%d",
# start_time,ncpath,istart)
iend = next(idx for idx, tval in enumerate(reversed(tvals)) \
if tval < end_time.timestamp())
iend = len(tvals) - iend
# _logger.debug("end_time=%s, file=%s,iend=%d",
# end_time,ncpath,iend)
except StopIteration:
return slice(0)
if iend - istart == 0:
return slice(0)
elif iend - istart < 0:
_logger.warning(
"%s: times in file are not ordered, start_time=%s,"
"end_time=%s, file times=%s - %s, istart=%d, iend=%d",
os.path.split(ncpath)[1],
start_time.isoformat(), end_time.isoformat(),
datetime.fromtimestamp(tvals[0], tz=pytz.utc).isoformat(),
datetime.fromtimestamp(tvals[-1], tz=pytz.utc).isoformat(),
istart, iend)
return slice(0)
elif debug:
_logger.debug(
"%s: tvals[%d]=%s, tvals[%d]=%s, "
"start_time=%s, end_time=%s",
os.path.split(ncpath)[1],
istart,
datetime.fromtimestamp(
tvals[istart], tz=pytz.utc).isoformat(),
iend,
datetime.fromtimestamp(
tvals[iend-1], tz=pytz.utc).isoformat(),
start_time.isoformat(),
end_time.isoformat())
time_slice = slice(istart, iend, 1)
tvals = tvals[time_slice]
tsize = sys.getsizeof(tvals)
if tsize > size_limit:
raise nc_exc.TooMuchDataException(
"too many time values requested, size={0} MB".\
format(tsize/(1000 * 1000)))
times.extend(tvals)
return time_slice
def read_time_series_data(
self, ncfile, ncpath, exp_vname, time_slice, vshape,
selectdim, dim2):
""" Read values of a time-series variable from a netCDF4 dataset.
Args:
ncfile: An opened netCFD4.Dataset.
ncpath: Path to the dataset. netCDF4.Dataset.filepath() is only
supported in netcdf version >= 4.1.2.
exp_vname: Exported name of variable to read.
time_slice: The slice() of time indices to read.
vshape: Shape of the variable in case it isn't in the file
an a filled array should be returned.
selectdim: A dict containing for each dimension name of type
string, the indices of the dimension to read.
For example: {"station":[3,4,5]} to read indices 3,4 and 5
(indexed from 0) of the station dimension for variables
which have that dimension.
dim2: Values for second dimension of the variable, such as height.
Returns:
A numpy.ma.array containing the data read.
"""
dsinfo = self.get_dataset_info()
dsinfo_vars = dsinfo['variables']
debug = False
# which dimension is time?
time_index = dsinfo_vars[exp_vname]["time_index"]
vdtype = dsinfo_vars[exp_vname]["dtype"]
nc_vname = dsinfo_vars[exp_vname]['netcdf_name']
if nc_vname in ncfile.variables:
var = ncfile.variables[nc_vname]
# indices of variable to be read
idx = ()
for idim, dim in enumerate(var.dimensions):
if dim == dsinfo['time_dim_name']:
idx += (time_slice,)
elif dim == "sample":
# high rate files with a sample dimension
# Add support for this eventually. For now
# just grab first value
idx += (0,)
elif dim in selectdim:
# variable has a selected dimension
try:
if all(i < 0 for i in selectdim[dim]):
sized = len(ncfile.dimensions[dim])
idx += (slice(0, sized), )
else:
idx += \
(tuple([i for i in selectdim[dim] if i >= 0]),)
except TypeError: # not iterable
if selectdim[dim] >= 0:
idx = (selectdim[dim],)
else:
sized = len(ncfile.dimensions[dim])
idx += (slice(0, sized), )
else:
sized = len(ncfile.dimensions[dim])
idx += (slice(0, sized), )
if not dim2:
# dsinfo_vars[exp_vname]['shape'][idim] will
# be the largest value for this dimension
# in the set of files.
sized = dsinfo_vars[exp_vname]['shape'][idim]
dim2['data'] = [i for i in range(sized)]
dim2['name'] = dim
dim2['units'] = ''
if debug and time_slice.stop - time_slice.start > 0:
_logger.debug(
"%s: %s: time_slice.start,"
"time_slice.stop=%d,%d, idx[1:]=%s",
os.path.split(ncpath)[1], nc_vname,
time_slice.start, time_slice.stop,
repr(idx[1:]))
# extract the data from var
vdata = var[idx]
fill_val = (
0 if vdata.dtype.kind == 'i' or
vdata.dtype.kind == 'u' else float('nan'))
if isinstance(vdata, np.ma.core.MaskedArray):
vdata = vdata.filled(fill_value=fill_val)
if vdata.dtype != vdtype:
vdata = np.ndarray.astype(vdtype)
if len(vshape) > 0 and tuple(vshape[1:]) != vdata.shape[1:]:
# _logger.debug("vshape[1:]=%d, vdata.shape[1:]=%d",
# vshape[1:], vdata.shape[1:])
# changing shape. Add support for final dimension
# increasing. vshape should be the largest expected shape
shape = list(vdata.shape)
# how much to grow it by
shape[-1] = vshape[-1] - vdata.shape[-1]
vdata = np.append(
vdata, np.ma.array(
data=np.empty(
shape=shape, dtype=vdata.dtype),
mask=True, fill_value=fill_val).filled(),
axis=-1)
else:
# variable is not in file, create NaN filled array
# Determine shape of variable. Change the first, time dimension
# to match the selected period. The remaininng dimension
# in dsinfo_vars[exp_vname]['shape'] is the largest of those
# seen in the selected files.
shape = vshape
shape[time_index] = time_slice.stop - time_slice.start
shape = tuple(shape)
vdtype = dsinfo_vars[exp_vname]["dtype"]
fill_val = (
0 if vdtype.kind == 'i' or
vdtype.kind == 'u' else float('nan'))
vdata = np.ma.array(
data=np.empty(
shape=shape, dtype=vdtype),
mask=True, fill_value=fill_val).filled()
return vdata
def read_time_series(
self,
variables=(),
start_time=pytz.utc.localize(datetime.min),
end_time=pytz.utc.localize(datetime.max),
selectdim=None,
size_limit=1000 * 1000 * 1000,
series=None,
series_name_fmt=None):
""" Read a list of time-series variables from this fileset.
Args:
variables: A list of strs containing time series variable
names to be read.
start_time: A datetime, which is timezone aware, of the start
time of the series to read.
end_time: A datetime, timezone aware, end time of series to read.
selectdim: A dict containing for each dimension name of type
string, the indices of the dimension to read.
For example: {"station":[3,4,5]} to read indices 3,4 and 5
(indexed from 0) of the station dimension for variables
which have that dimension.
size_limit: Limit on the total size in bytes to read, used to
screen huge requests.
series: A list of series to be read by name.
series_fmt: a datetime.strftime format to create a
series name for the data found in each file, based
on the time associated with the file.
If series_name_fmt is None, all data is put in a dictionary
element named ''.
Returns:
A dict containing, by series name:
'time' : list of UTC timestamps,
'data': list of numpy.ndarray containing the data for
each variable,
'vmap': dict by variable name,
containing the index into the series data for the variable,
'dim2': dict by variable name, of values for second dimension
of the data, such as height,
}
Raises:
nc_exc.NoDataFoundException
nc_exc.NoDataException
The 'data' element in the returned dict is a list of numpy arrays,
and not a dict by variable name. The 'vmap' element provides the
mapping from a variable name to an index into 'data'. The data object
is typically JSON-ified and sent to a browser. If it were a dict,
the variable names may contain characters which cause headaches with
JSON and javascript in django templates. For example, the JSON-ified
string is typically passed to javascript in a django template by
surrounding it with single quotes:
var data = jQuery.parseJSON('{{ data }}');
A single quote within the data JSON string causes grief, and we want
to support single quotes in variable names. The only work around I
know of is to convert the single quotes within the string to '\u0027'.
This is, of course, a time-consuming step we want to avoid when
JSON-ifying a large chunk of data. It is less time-consuming to
replace the quotes in the smaller vmap.
The series names will not contain single quotes.
"""
debug = False
dsinfo = self.get_dataset_info()
if not dsinfo['time_name']:
self.get_variables()
dsinfo = self.get_dataset_info()
dsinfo_vars = dsinfo['variables']
if not selectdim:
selectdim = {}
vshapes = self.resolve_variable_shapes(variables, selectdim)
res_data = {}
total_size = 0
ntimes = 0
files = self.get_files(start_time, end_time)
if debug:
_logger.debug(
"len(files)=%d, series_name_fmt=%s",
len(files), series_name_fmt)
if series_name_fmt:
file_tuples = [(f.time.strftime(series_name_fmt), f.path) \
for f in files]
else:
file_tuples = [("", f.path) for f in files]
for (series_name, ncpath) in file_tuples:
if series and not series_name in series:
continue
if debug:
_logger.debug("series=%s", str(series))
_logger.debug("series_name=%s ,ncpath=%s", series_name, ncpath)
# the files might be in the process of being moved, deleted, etc
fileok = False
exc = None
for itry in range(0, 3):
try:
ncfile = netCDF4.Dataset(ncpath)
fileok = True
break
except (OSError, RuntimeError) as exc:
time.sleep(itry)
if not fileok:
_logger.error("%s: %s", ncpath, exc)
continue
if not series_name in res_data:
res_data[series_name] = {
'time': [],
'data': [],
'vmap': {},
'dim2': {},
}
otime = res_data[series_name]['time']
odata = res_data[series_name]['data']
ovmap = res_data[series_name]['vmap']
odim2 = res_data[series_name]['dim2']
try:
size1 = sys.getsizeof(otime)
# times are apended to otime
time_slice = self.read_times(
ncfile, ncpath, start_time, end_time, otime,
size_limit - total_size)
# time_slice.start is None if nothing to read
if time_slice.start is None or \
time_slice.stop <= time_slice.start:
continue
total_size += sys.getsizeof(otime) - size1
for exp_vname in variables:
# skip if variable is not a time series or
# doesn't have a selected dimension
if not exp_vname in dsinfo_vars or not exp_vname in vshapes:
continue
# selected shape of this variable
vshape = vshapes[exp_vname]
vsize = reduce_(
operator.mul, vshape, 1) * \
dsinfo_vars[exp_vname]["dtype"].itemsize
if total_size + vsize > size_limit:
raise nc_exc.TooMuchDataException(
"too much data requested, will exceed {} mbytes".
format(size_limit/(1000 * 1000)))
dim2 = {}
vdata = self.read_time_series_data(
ncfile, ncpath, exp_vname, time_slice, vshape,
selectdim, dim2)
if not exp_vname in odim2:
odim2[exp_vname] = dim2
if not exp_vname in ovmap:
size1 = 0
vindex = len(odata)
odata.append(vdata)
ovmap[exp_vname] = vindex
else:
if debug:
_logger.debug(
"odata[%s].shape=%s, vdata.shape=%s",
exp_vname, odata[exp_vname].shape, vdata.shape)
vindex = ovmap[exp_vname]
size1 = sys.getsizeof(odata[vindex])
time_index = dsinfo_vars[exp_vname]["time_index"]
odata[vindex] = np.append(
odata[vindex], vdata, axis=time_index)
total_size += sys.getsizeof(odata[vindex]) - size1
finally:
ncfile.close()
ntimes += len(otime)
if ntimes == 0:
exc = nc_exc.NoDataException(
"No data found between {} and {}".
format(
start_time.isoformat(),
end_time.isoformat()))
# _logger.warning("%s: %s", str(self), repr(exc))
raise exc
ncol_read = sum([len(cdata) for (i, cdata) in res_data.items()])
if ncol_read == 0:
exc = nc_exc.NoDataException(
"No variables named {} found between {} and {}".
format(
repr(variables),
start_time.isoformat(),
end_time.isoformat()))
# _logger.warning("%s: %s", str(self), repr(exc))
raise exc
if debug:
for series_name in res_data.keys():
for exp_vname in res_data[series_name]['vmap']:
var_index = res_data[series_name]['vmap'][exp_vname]
_logger.debug(
"res_data[%s][%d].shape=%s, exp_vname=%s",
series_name, var_index,
repr(res_data[series_name][var_index].shape),
exp_vname)
_logger.debug(
"total_size=%d", total_size)
return res_data
|
|
#!/usr/bin/python
# copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example code to deploy a cluster by compass client api."""
import os
import re
import sys
import time
# from compass.apiclient.restful import Client
from restful import Client
COMPASS_SERVER_URL = 'http://localhost/api'
COMPASS_LOGIN_EMAIL = 'admin@huawei.com'
COMPASS_LOGIN_PASSWORD = 'admin'
SWITCH_IP = '172.29.8.40'
SWITCH_SNMP_VERSION = '2c'
SWITCH_SNMP_COMMUNITY = 'public'
CLUSTER_NAME = 'test_cluster'
HOST_NAME_PREFIX = 'host'
SERVICE_USERNAME = 'service'
SERVICE_PASSWORD = 'service'
CONSOLE_USERNAME = 'console'
CONSOLE_PASSWORD = 'console'
HA_VIP = ''
MANAGEMENT_IP_START = '10.145.88.130'
MANAGEMENT_IP_END = '10.145.88.254'
MANAGEMENT_IP_GATEWAY = '10.145.88.1'
MANAGEMENT_NETMASK = '255.255.255.0'
MANAGEMENT_NIC = 'eth0'
MANAGEMENT_PROMISC = 0
TENANT_IP_START = '192.168.10.130'
TENANT_IP_END = '192.168.10.255'
TENANT_IP_GATEWAY = '192.168.10.1'
TENANT_NETMASK = '255.255.255.0'
TENANT_NIC = 'eth0'
TENANT_PROMISC = 0
PUBLIC_IP_START = '12.234.32.130'
PUBLIC_IP_END = '12.234.32.255'
PUBLIC_IP_GATEWAY = '12.234.32.1'
PUBLIC_NETMASK = '255.255.255.0'
PUBLIC_NIC = 'eth1'
PUBLIC_PROMISC = 1
STORAGE_IP_START = '172.16.100.130'
STORAGE_IP_END = '172.16.100.255'
STORAGE_NETMASK = '255.255.255.0'
STORAGE_IP_GATEWAY = '172.16.100.1'
STORAGE_NIC = 'eth0'
STORAGE_PROMISC = 0
HOME_PERCENTAGE = 5
TMP_PERCENTAGE = 5
VAR_PERCENTAGE = 10
HOST_OS = 'CentOS-6.5-x86_64'
PRESET_VALUES = {
'LANGUAGE': 'EN',
'TIMEZONE': 'GMT',
'HTTPS_PROXY': 'http://10.145.89.100:3128',
'NO_PROXY': ['127.0.0.1'],
'DOMAIN': 'ods.com',
'NAMESERVERS': ['10.145.89.100'],
'NTP_SERVER': '10.145.89.100',
'GATEWAY': '10.145.88.1',
'PROXY': 'http://10.145.89.100:3128',
'OS_NAME_PATTERN': 'CentOS.*',
'DISTRIBUTED_SYSTEM_NAME_PATTERN': 'openstack.*',
'FLAVOR_PATTERN': 'allinone.*',
'ROLES_LIST': ['allinone-compute'],
'MACHINES_TO_ADD': ['00:0c:29:a7:ea:4b'],
'BUILD_TIMEOUT': 60,
'SEARCH_PATH': ['ods.com'],
'SERVER_USERNAME': 'root',
'SERVER_PASSWORD': 'root'
}
for v in PRESET_VALUES:
if v in os.environ.keys():
PRESET_VALUES[v] = os.environ.get(v)
print (v + PRESET_VALUES[v] + " is set by env variables")
else:
print (PRESET_VALUES[v])
# instantiate a client
client = Client(COMPASS_SERVER_URL)
# login
status, response = client.login(COMPASS_LOGIN_EMAIL, COMPASS_LOGIN_PASSWORD)
print '============================================================'
print 'login status: %s response: %s' % (status, response)
if status >= 400:
sys.exit(1)
# list all switches
status, response = client.list_switches()
print '============================================================='
print 'get all switches status: %s response: %s' % (status, response)
# add a switch
status, response = client.add_switch(
SWITCH_IP,
SWITCH_SNMP_VERSION,
SWITCH_SNMP_COMMUNITY
)
print '============================================'
print 'adding a switch..status: %s, response: %s' % (status, response)
# if switch already exists, get one from all switches
switch = None
if status < 400:
switch = response
else:
status, response = client.list_switches()
print '========================================='
print 'list switches status %s response %s' % (status, response)
if status >= 400:
sys.exit(1)
for switch_ in response:
if switch_['ip'] == SWITCH_IP:
switch = switch_
break
switch_id = switch['id']
switch_ip = switch['ip']
print '======================'
print 'switch has been set as %s' % switch_ip
# wait till switch state becomes under_monitoring
while switch['state'] != 'under_monitoring':
print 'waiting for state to become under_monitoring'
client.poll_switch(switch_id)
status, resp = client.get_switch(switch_id)
print '====================================='
print 'poll switch status %s response %s' % (status, resp)
switch = resp
print 'switch is in state: %s' % switch['state']
time.sleep(5)
print '========================================='
print 'switch state now is %s' % (switch['state'])
# create a machine list
machine_macs = {}
machines = {}
for machine in PRESET_VALUES['MACHINES_TO_ADD']:
status, response = client.list_machines(mac=machine)
print '============================================'
print 'list machines status %s response %s' % (status, response)
if status >= 400:
sys.exit(1)
if status == 200 and response != []:
machine_id = response[0]['id']
machine_macs[machine_id] = response[0]['mac']
machines = response
print '================================='
print 'found machines are : %s' % machines
machines_to_add = PRESET_VALUES['MACHINES_TO_ADD']
if set(machine_macs.values()) != set(machines_to_add):
print 'only found macs %s while expected are %s' % (
machine_macs.values(), machines_to_add)
sys.exit(1)
# list all adapters
status, response = client.list_adapters()
print '==============================='
print 'all adapters are: %s' % response
if status >= 400:
sys.exit(1)
adapters = response
adapter_id = None
os_id = None
flavor_id = None
adapter_pattern = re.compile(PRESET_VALUES['DISTRIBUTED_SYSTEM_NAME_PATTERN'])
os_pattern = re.compile(PRESET_VALUES['OS_NAME_PATTERN'])
flavor_pattern = re.compile(PRESET_VALUES['FLAVOR_PATTERN'])
for adapter in adapters:
if (
'distributed_system_name' in adapter and
adapter_pattern.match(adapter['distributed_system_name'])
):
adapter_id = adapter['id']
for supported_os in adapter['supported_oses']:
if os_pattern.match(supported_os['name']):
os_id = supported_os['id']
break
for flavor in adapter['flavors']:
if flavor_pattern.match(flavor['name']):
flavor_id = flavor['id']
if adapter_id and os_id and flavor_id:
break
print '======================================================='
print 'using adapter %s os %s flavor %s to deploy cluster' % (
adapter_id, os_id, flavor_id
)
# add a cluster
status, response = client.add_cluster(
CLUSTER_NAME,
adapter_id,
os_id,
flavor_id
)
print '==============================================================='
print 'add cluster %s status %s: %s' % (CLUSTER_NAME, status, response)
if status >= 400:
sys.exit(1)
status, response = client.list_clusters(name=CLUSTER_NAME)
print '================================================================'
print 'list clusters %s status %s: %s' % (CLUSTER_NAME, status, response)
if status >= 400:
sys.exit(1)
cluster = response[0]
cluster_id = cluster['id']
print '=================='
print 'cluster is %s' % cluster
# Add hosts to the cluster
machines_dict = {}
machine_id_list = []
for machine in machines:
id_mapping = {}
id_mapping['machine_id'] = machine['id']
machine_id_list.append(id_mapping)
machines_dict['machines'] = machine_id_list
status, response = client.add_hosts_to_cluster(
cluster_id, machines_dict
)
print '==================================='
print 'add hosts %s to cluster status %s response %s' % (
machines_dict, status, response)
if status >= 400:
sys.exit(1)
# Add two subnets
subnet_1 = '10.145.89.0/24'
subnet_2 = '192.168.100.0/24'
status, response = client.add_subnet(subnet_1)
print '=================='
print 'add subnet %s status %s: %s' % (subnet_1, status, response)
if status >= 400:
sys.exit(1)
status, response = client.add_subnet(subnet_2)
print '=================='
print 'add subnet %s status %s: %s' % (subnet_2, status, response)
if status >= 400:
sys.exit(1)
status, subnet1 = client.list_subnets(subnet=subnet_1)
print '==========================================================='
print 'list subnet %s status %s: %s' % (subnet_1, status, subnet1)
if status >= 400:
sys.exit(1)
status, subnet2 = client.list_subnets(subnet=subnet_2)
print '==========================================================='
print 'list subnet %s status %s: %s' % (subnet_2, status, subnet2)
if status >= 400:
sys.exit(1)
subnet1_id = subnet1[0]['id']
subnet2_id = subnet2[0]['id']
print '========================'
print 'subnet1 has id: %s, subnet is %s' % (subnet1_id, subnet1)
print 'subnet2 has id: %s, subnet is %s' % (subnet2_id, subnet2)
# Add host network
status, response = client.list_cluster_hosts(cluster_id)
print '================================================'
print 'list cluster hosts status %s: %s' % (status, response)
if status >= 400:
sys.exit(1)
host = response[0]
host_id = host['id']
print '=================='
print 'host is: %s' % host
status, response = client.add_host_network(
host_id,
'eth0',
'10.145.89.200',
subnet1_id,
is_mgmt=True
)
print '======================='
print 'add eth0 network status %s: %s' % (status, response)
if status >= 400:
sys.exit(1)
status, response = client.add_host_network(
host_id,
'eth1',
'192.168.100.200',
subnet2_id,
is_promiscuous=True
)
print '======================='
print 'add eth1 network status %s: %s' % (status, response)
if status >= 400:
sys.exit(1)
# Update os config to cluster
cluster_os_config = {
'general': {
'language': PRESET_VALUES['LANGUAGE'],
'timezone': PRESET_VALUES['TIMEZONE'],
'http_proxy': PRESET_VALUES['PROXY'],
'https_proxy': PRESET_VALUES['HTTPS_PROXY'],
'no_proxy': PRESET_VALUES['NO_PROXY'],
'ntp_server': PRESET_VALUES['NTP_SERVER'],
'dns_servers': PRESET_VALUES['NAMESERVERS'],
'domain': PRESET_VALUES['DOMAIN'],
'search_path': PRESET_VALUES['SEARCH_PATH'],
'default_gateway': PRESET_VALUES['GATEWAY']
},
'server_credentials': {
'username': PRESET_VALUES['SERVER_USERNAME'],
'password': PRESET_VALUES['SERVER_PASSWORD']
},
'partition': {
'/var': {
'percentage': VAR_PERCENTAGE,
},
'/home': {
'percentage': HOME_PERCENTAGE,
}
}
}
cluster_package_config = {
'security': {
'service_credentials': {
'image': {
'username': SERVICE_USERNAME,
'password': SERVICE_PASSWORD
},
'compute': {
'username': SERVICE_USERNAME,
'password': SERVICE_PASSWORD
},
'dashboard': {
'username': SERVICE_USERNAME,
'password': SERVICE_PASSWORD
},
'identity': {
'username': SERVICE_USERNAME,
'password': SERVICE_PASSWORD
},
'metering': {
'username': SERVICE_USERNAME,
'password': SERVICE_PASSWORD
},
'rabbitmq': {
'username': SERVICE_USERNAME,
'password': SERVICE_PASSWORD
},
'volume': {
'username': SERVICE_USERNAME,
'password': SERVICE_PASSWORD
},
'mysql': {
'username': SERVICE_USERNAME,
'password': SERVICE_PASSWORD
}
},
'console_credentials': {
'admin': {
'username': CONSOLE_USERNAME,
'password': CONSOLE_PASSWORD
},
'compute': {
'username': CONSOLE_USERNAME,
'password': CONSOLE_PASSWORD
},
'dashboard': {
'username': CONSOLE_USERNAME,
'password': CONSOLE_PASSWORD
},
'image': {
'username': CONSOLE_USERNAME,
'password': CONSOLE_PASSWORD
},
'metering': {
'username': CONSOLE_USERNAME,
'password': CONSOLE_PASSWORD
},
'network': {
'username': CONSOLE_USERNAME,
'password': CONSOLE_PASSWORD
},
'object-store': {
'username': CONSOLE_USERNAME,
'password': CONSOLE_PASSWORD
},
'volume': {
'username': CONSOLE_USERNAME,
'password': CONSOLE_PASSWORD
}
}
},
'network_mapping': {
'management': MANAGEMENT_NIC,
'tenant': TENANT_NIC,
'storage': STORAGE_NIC,
'public': PUBLIC_NIC
}
}
status, response = client.update_cluster_config(
cluster_id,
cluster_os_config,
cluster_package_config
)
print '======================================='
print 'cluster %s update status %s: %s' % (
cluster_id, status, response)
if status >= 400:
sys.exit(1)
status, response = client.update_cluster_host(
cluster_id, host_id, roles=PRESET_VALUES['ROLES_LIST'])
print '================================================='
print 'update cluster host %s/%s status %s: %s' % (
cluster_id, host_id, status, response)
if status >= 400:
sys.exit(1)
# Review and deploy
status, response = client.review_cluster(
cluster_id, review={'hosts': [host_id]})
print '======================================='
print 'reviewing cluster status %s: %s' % (status, response)
if status >= 400:
sys.exit(1)
status, response = client.deploy_cluster(
cluster_id, deploy={'hosts': [host_id]})
print '======================================='
print 'deploy cluster status %s: %s' % (status, response)
if status >= 400:
sys.exit(1)
|
|
"""Let's Encrypt CLI."""
# TODO: Sanity check all input. Be sure to avoid shell code etc...
import argparse
import atexit
import functools
import logging
import logging.handlers
import os
import pkg_resources
import sys
import time
import traceback
import configargparse
import configobj
import OpenSSL
import zope.component
import zope.interface.exceptions
import zope.interface.verify
from acme import client as acme_client
from acme import jose
import letsencrypt
from letsencrypt import account
from letsencrypt import colored_logging
from letsencrypt import configuration
from letsencrypt import constants
from letsencrypt import client
from letsencrypt import crypto_util
from letsencrypt import errors
from letsencrypt import interfaces
from letsencrypt import le_util
from letsencrypt import log
from letsencrypt import reporter
from letsencrypt import storage
from letsencrypt.display import util as display_util
from letsencrypt.display import ops as display_ops
from letsencrypt.plugins import disco as plugins_disco
logger = logging.getLogger(__name__)
# Argparse's help formatting has a lot of unhelpful peculiarities, so we want
# to replace as much of it as we can...
# This is the stub to include in help generated by argparse
SHORT_USAGE = """
letsencrypt [SUBCOMMAND] [options] [domains]
The Let's Encrypt agent can obtain and install HTTPS/TLS/SSL certificates. By
default, it will attempt to use a webserver both for obtaining and installing
the cert. """
# This is the short help for letsencrypt --help, where we disable argparse
# altogether
USAGE = SHORT_USAGE + """Major SUBCOMMANDS are:
(default) everything Obtain & install a cert in your current webserver
auth Authenticate & obtain cert, but do not install it
install Install a previously obtained cert in a server
revoke Revoke a previously obtained certificate
rollback Rollback server configuration changes made during install
config_changes Show changes made to server config during installation
Choice of server for authentication/installation:
--apache Use the Apache plugin for authentication & installation
--nginx Use the Nginx plugin for authentication & installation
--standalone Run a standalone HTTPS server (for authentication only)
OR:
--authenticator standalone --installer nginx
More detailed help:
-h, --help [topic] print this message, or detailed help on a topic;
the available topics are:
all, apache, automation, manual, nginx, paths, security, testing, or any of
the subcommands
"""
def _find_domains(args, installer):
if args.domains is None:
domains = display_ops.choose_names(installer)
else:
domains = args.domains
if not domains:
raise errors.Error("Please specify --domains, or --installer that "
"will help in domain names autodiscovery")
return domains
def _determine_account(args, config):
"""Determine which account to use.
In order to make the renewer (configuration de/serialization) happy,
if ``args.account`` is ``None``, it will be updated based on the
user input. Same for ``args.email``.
:param argparse.Namespace args: CLI arguments
:param letsencrypt.interface.IConfig config: Configuration object
:param .AccountStorage account_storage: Account storage.
:returns: Account and optionally ACME client API (biproduct of new
registration).
:rtype: `tuple` of `letsencrypt.account.Account` and
`acme.client.Client`
"""
account_storage = account.AccountFileStorage(config)
acme = None
if args.account is not None:
acc = account_storage.load(args.account)
else:
accounts = account_storage.find_all()
if len(accounts) > 1:
acc = display_ops.choose_account(accounts)
elif len(accounts) == 1:
acc = accounts[0]
else: # no account registered yet
if args.email is None:
args.email = display_ops.get_email()
if not args.email: # get_email might return ""
args.email = None
def _tos_cb(regr):
if args.tos:
return True
msg = ("Please read the Terms of Service at {0}. You "
"must agree in order to register with the ACME "
"server at {1}".format(
regr.terms_of_service, config.server))
return zope.component.getUtility(interfaces.IDisplay).yesno(
msg, "Agree", "Cancel")
try:
acc, acme = client.register(
config, account_storage, tos_cb=_tos_cb)
except errors.Error as error:
logger.debug(error, exc_info=True)
raise errors.Error(
"Unable to register an account with ACME server")
args.account = acc.id
return acc, acme
def _init_le_client(args, config, authenticator, installer):
if authenticator is not None:
# if authenticator was given, then we will need account...
acc, acme = _determine_account(args, config)
logger.debug("Picked account: %r", acc)
# XXX
#crypto_util.validate_key_csr(acc.key)
else:
acc, acme = None, None
return client.Client(config, acc, authenticator, installer, acme=acme)
def _find_duplicative_certs(domains, config, renew_config):
"""Find existing certs that duplicate the request."""
identical_names_cert, subset_names_cert = None, None
configs_dir = renew_config.renewal_configs_dir
# Verify the directory is there
le_util.make_or_verify_dir(configs_dir, mode=0o755, uid=os.geteuid())
cli_config = configuration.RenewerConfiguration(config)
for renewal_file in os.listdir(configs_dir):
try:
full_path = os.path.join(configs_dir, renewal_file)
rc_config = configobj.ConfigObj(renew_config.renewer_config_file)
rc_config.merge(configobj.ConfigObj(full_path))
rc_config.filename = full_path
candidate_lineage = storage.RenewableCert(
rc_config, config_opts=None, cli_config=cli_config)
except (configobj.ConfigObjError, errors.CertStorageError, IOError):
logger.warning("Renewal configuration file %s is broken. "
"Skipping.", full_path)
continue
# TODO: Handle these differently depending on whether they are
# expired or still valid?
candidate_names = set(candidate_lineage.names())
if candidate_names == set(domains):
identical_names_cert = candidate_lineage
elif candidate_names.issubset(set(domains)):
subset_names_cert = candidate_lineage
return identical_names_cert, subset_names_cert
def _treat_as_renewal(config, domains):
"""Determine whether or not the call should be treated as a renewal.
:returns: RenewableCert or None if renewal shouldn't occur.
:rtype: :class:`.storage.RenewableCert`
:raises .Error: If the user would like to rerun the client again.
"""
renewal = False
# Considering the possibility that the requested certificate is
# related to an existing certificate. (config.duplicate, which
# is set with --duplicate, skips all of this logic and forces any
# kind of certificate to be obtained with renewal = False.)
if not config.duplicate:
ident_names_cert, subset_names_cert = _find_duplicative_certs(
domains, config, configuration.RenewerConfiguration(config))
# I am not sure whether that correctly reads the systemwide
# configuration file.
question = None
if ident_names_cert is not None:
question = (
"You have an existing certificate that contains exactly the "
"same domains you requested (ref: {0}){br}{br}Do you want to "
"renew and replace this certificate with a newly-issued one?"
).format(ident_names_cert.configfile.filename, br=os.linesep)
elif subset_names_cert is not None:
question = (
"You have an existing certificate that contains a portion of "
"the domains you requested (ref: {0}){br}{br}It contains these "
"names: {1}{br}{br}You requested these names for the new "
"certificate: {2}.{br}{br}Do you want to replace this existing "
"certificate with the new certificate?"
).format(subset_names_cert.configfile.filename,
", ".join(subset_names_cert.names()),
", ".join(domains),
br=os.linesep)
if question is None:
# We aren't in a duplicative-names situation at all, so we don't
# have to tell or ask the user anything about this.
pass
elif config.renew_by_default or zope.component.getUtility(
interfaces.IDisplay).yesno(question, "Replace", "Cancel"):
renewal = True
else:
reporter_util = zope.component.getUtility(interfaces.IReporter)
reporter_util.add_message(
"To obtain a new certificate that {0} an existing certificate "
"in its domain-name coverage, you must use the --duplicate "
"option.{br}{br}For example:{br}{br}{1} --duplicate {2}".format(
"duplicates" if ident_names_cert is not None else
"overlaps with",
sys.argv[0], " ".join(sys.argv[1:]),
br=os.linesep
),
reporter_util.HIGH_PRIORITY)
raise errors.Error(
"User did not use proper CLI and would like "
"to reinvoke the client.")
if renewal:
return ident_names_cert if ident_names_cert is not None else subset_names_cert
return None
def _report_new_cert(cert_path):
"""Reports the creation of a new certificate to the user."""
reporter_util = zope.component.getUtility(interfaces.IReporter)
reporter_util.add_message("Congratulations! Your certificate has been "
"saved at {0}.".format(cert_path),
reporter_util.MEDIUM_PRIORITY)
def _auth_from_domains(le_client, config, domains, plugins):
"""Authenticate and enroll certificate."""
# Note: This can raise errors... caught above us though.
lineage = _treat_as_renewal(config, domains)
if lineage is not None:
# TODO: schoen wishes to reuse key - discussion
# https://github.com/letsencrypt/letsencrypt/pull/777/files#r40498574
new_certr, new_chain, new_key, _ = le_client.obtain_certificate(domains)
# TODO: Check whether it worked! <- or make sure errors are thrown (jdk)
lineage.save_successor(
lineage.latest_common_version(), OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, new_certr.body),
new_key.pem, crypto_util.dump_pyopenssl_chain(new_chain))
lineage.update_all_links_to(lineage.latest_common_version())
# TODO: Check return value of save_successor
# TODO: Also update lineage renewal config with any relevant
# configuration values from this attempt? <- Absolutely (jdkasten)
else:
# TREAT AS NEW REQUEST
lineage = le_client.obtain_and_enroll_certificate(domains, plugins)
if not lineage:
raise errors.Error("Certificate could not be obtained")
_report_new_cert(lineage.cert)
return lineage
# TODO: Make run as close to auth + install as possible
# Possible difficulties: args.csr was hacked into auth
def run(args, config, plugins): # pylint: disable=too-many-branches,too-many-locals
"""Obtain a certificate and install."""
# Begin authenticator and installer setup
if args.configurator is not None and (args.installer is not None or
args.authenticator is not None):
return ("Either --configurator or --authenticator/--installer"
"pair, but not both, is allowed")
if args.authenticator is not None or args.installer is not None:
installer = display_ops.pick_installer(
config, args.installer, plugins)
authenticator = display_ops.pick_authenticator(
config, args.authenticator, plugins)
else:
# TODO: this assumes that user doesn't want to pick authenticator
# and installer separately...
authenticator = installer = display_ops.pick_configurator(
config, args.configurator, plugins)
if installer is None or authenticator is None:
return "Configurator could not be determined"
# End authenticator and installer setup
domains = _find_domains(args, installer)
# TODO: Handle errors from _init_le_client?
le_client = _init_le_client(args, config, authenticator, installer)
lineage = _auth_from_domains(le_client, config, domains, plugins)
# TODO: We also need to pass the fullchain (for Nginx)
le_client.deploy_certificate(
domains, lineage.privkey, lineage.cert, lineage.chain)
le_client.enhance_config(domains, args.redirect)
if len(lineage.available_versions("cert")) == 1:
display_ops.success_installation(domains)
else:
display_ops.success_renewal(domains)
def auth(args, config, plugins):
"""Authenticate & obtain cert, but do not install it."""
if args.domains is not None and args.csr is not None:
# TODO: --csr could have a priority, when --domains is
# supplied, check if CSR matches given domains?
return "--domains and --csr are mutually exclusive"
authenticator = display_ops.pick_authenticator(
config, args.authenticator, plugins)
if authenticator is None:
return "Authenticator could not be determined"
if args.installer is not None:
installer = display_ops.pick_installer(config, args.installer, plugins)
else:
installer = None
# TODO: Handle errors from _init_le_client?
le_client = _init_le_client(args, config, authenticator, installer)
# This is a special case; cert and chain are simply saved
if args.csr is not None:
certr, chain = le_client.obtain_certificate_from_csr(le_util.CSR(
file=args.csr[0], data=args.csr[1], form="der"))
le_client.save_certificate(
certr, chain, args.cert_path, args.chain_path)
_report_new_cert(args.cert_path)
else:
domains = _find_domains(args, installer)
_auth_from_domains(le_client, config, domains, plugins)
def install(args, config, plugins):
"""Install a previously obtained cert in a server."""
# XXX: Update for renewer/RenewableCert
installer = display_ops.pick_installer(config, args.installer, plugins)
if installer is None:
return "Installer could not be determined"
domains = _find_domains(args, installer)
le_client = _init_le_client(
args, config, authenticator=None, installer=installer)
assert args.cert_path is not None # required=True in the subparser
le_client.deploy_certificate(
domains, args.key_path, args.cert_path, args.chain_path)
le_client.enhance_config(domains, args.redirect)
def revoke(args, config, unused_plugins): # TODO: coop with renewal config
"""Revoke a previously obtained certificate."""
if args.key_path is not None: # revocation by cert key
logger.debug("Revoking %s using cert key %s",
args.cert_path[0], args.key_path[0])
acme = acme_client.Client(
config.server, key=jose.JWK.load(args.key_path[1]))
else: # revocation by account key
logger.debug("Revoking %s using Account Key", args.cert_path[0])
acc, _ = _determine_account(args, config)
# pylint: disable=protected-access
acme = client._acme_from_config_key(config, acc.key)
acme.revoke(jose.ComparableX509(crypto_util.pyopenssl_load_certificate(
args.cert_path[1])[0]))
def rollback(args, config, plugins):
"""Rollback server configuration changes made during install."""
client.rollback(args.installer, args.checkpoints, config, plugins)
def config_changes(unused_args, config, unused_plugins):
"""Show changes made to server config during installation
View checkpoints and associated configuration changes.
"""
client.view_config_changes(config)
def plugins_cmd(args, config, plugins): # TODO: Use IDisplay rather than print
"""List server software plugins."""
logger.debug("Expected interfaces: %s", args.ifaces)
ifaces = [] if args.ifaces is None else args.ifaces
filtered = plugins.visible().ifaces(ifaces)
logger.debug("Filtered plugins: %r", filtered)
if not args.init and not args.prepare:
print str(filtered)
return
filtered.init(config)
verified = filtered.verify(ifaces)
logger.debug("Verified plugins: %r", verified)
if not args.prepare:
print str(verified)
return
verified.prepare()
available = verified.available()
logger.debug("Prepared plugins: %s", available)
print str(available)
def read_file(filename, mode="rb"):
"""Returns the given file's contents.
:param str filename: Filename
:param str mode: open mode (see `open`)
:returns: A tuple of filename and its contents
:rtype: tuple
:raises argparse.ArgumentTypeError: File does not exist or is not readable.
"""
try:
return filename, open(filename, mode).read()
except IOError as exc:
raise argparse.ArgumentTypeError(exc.strerror)
def flag_default(name):
"""Default value for CLI flag."""
return constants.CLI_DEFAULTS[name]
def config_help(name, hidden=False):
"""Help message for `.IConfig` attribute."""
if hidden:
return argparse.SUPPRESS
else:
return interfaces.IConfig[name].__doc__
class SilentParser(object): # pylint: disable=too-few-public-methods
"""Silent wrapper around argparse.
A mini parser wrapper that doesn't print help for its
arguments. This is needed for the use of callbacks to define
arguments within plugins.
"""
def __init__(self, parser):
self.parser = parser
def add_argument(self, *args, **kwargs):
"""Wrap, but silence help"""
kwargs["help"] = argparse.SUPPRESS
self.parser.add_argument(*args, **kwargs)
class HelpfulArgumentParser(object):
"""Argparse Wrapper.
This class wraps argparse, adding the ability to make --help less
verbose, and request help on specific subcategories at a time, eg
'letsencrypt --help security' for security options.
"""
def __init__(self, args, plugins):
plugin_names = [name for name, _p in plugins.iteritems()]
self.help_topics = HELP_TOPICS + plugin_names + [None]
self.parser = configargparse.ArgParser(
usage=SHORT_USAGE,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
args_for_setting_config_path=["-c", "--config"],
default_config_files=flag_default("config_files"))
# This is the only way to turn off overly verbose config flag documentation
self.parser._add_config_file_help = False # pylint: disable=protected-access
self.silent_parser = SilentParser(self.parser)
self.verb = None
self.args = self.preprocess_args(args)
help1 = self.prescan_for_flag("-h", self.help_topics)
help2 = self.prescan_for_flag("--help", self.help_topics)
assert max(True, "a") == "a", "Gravity changed direction"
help_arg = max(help1, help2)
if help_arg is True:
# just --help with no topic; avoid argparse altogether
print USAGE
sys.exit(0)
self.visible_topics = self.determine_help_topics(help_arg)
#print self.visible_topics
self.groups = {} # elements are added by .add_group()
def preprocess_args(self, args):
"""Work around some limitations in argparse.
Currently: add the default verb "run" as a default, and ensure that the
subcommand / verb comes last.
"""
if "-h" in args or "--help" in args:
# all verbs double as help arguments; don't get them confused
self.verb = "help"
return args
for i, token in enumerate(args):
if token in VERBS:
reordered = args[:i] + args[i+1:] + [args[i]]
self.verb = token
return reordered
self.verb = "run"
return args + ["run"]
def prescan_for_flag(self, flag, possible_arguments):
"""Checks cli input for flags.
Check for a flag, which accepts a fixed set of possible arguments, in
the command line; we will use this information to configure argparse's
help correctly. Return the flag's argument, if it has one that matches
the sequence @possible_arguments; otherwise return whether the flag is
present.
"""
if flag not in self.args:
return False
pos = self.args.index(flag)
try:
nxt = self.args[pos + 1]
if nxt in possible_arguments:
return nxt
except IndexError:
pass
return True
def add(self, topic, *args, **kwargs):
"""Add a new command line argument.
@topic is required, to indicate which part of the help will document
it, but can be None for `always documented'.
"""
if self.visible_topics[topic]:
if topic in self.groups:
group = self.groups[topic]
group.add_argument(*args, **kwargs)
else:
self.parser.add_argument(*args, **kwargs)
else:
kwargs["help"] = argparse.SUPPRESS
self.parser.add_argument(*args, **kwargs)
def add_group(self, topic, **kwargs):
"""
This has to be called once for every topic; but we leave those calls
next to the argument definitions for clarity. Return something
arguments can be added to if necessary, either the parser or an argument
group.
"""
if self.visible_topics[topic]:
#print "Adding visible group " + topic
group = self.parser.add_argument_group(topic, **kwargs)
self.groups[topic] = group
return group
else:
#print "Invisible group " + topic
return self.silent_parser
def add_plugin_args(self, plugins):
"""
Let each of the plugins add its own command line arguments, which
may or may not be displayed as help topics.
"""
for name, plugin_ep in plugins.iteritems():
parser_or_group = self.add_group(name, description=plugin_ep.description)
#print parser_or_group
plugin_ep.plugin_cls.inject_parser_options(parser_or_group, name)
def determine_help_topics(self, chosen_topic):
"""
The user may have requested help on a topic, return a dict of which
topics to display. @chosen_topic has prescan_for_flag's return type
:returns: dict
"""
# topics maps each topic to whether it should be documented by
# argparse on the command line
if chosen_topic == "all":
return dict([(t, True) for t in self.help_topics])
elif not chosen_topic:
return dict([(t, False) for t in self.help_topics])
else:
return dict([(t, t == chosen_topic) for t in self.help_topics])
def create_parser(plugins, args):
"""Create parser."""
helpful = HelpfulArgumentParser(args, plugins)
# --help is automatically provided by argparse
helpful.add(
None, "-v", "--verbose", dest="verbose_count", action="count",
default=flag_default("verbose_count"), help="This flag can be used "
"multiple times to incrementally increase the verbosity of output, "
"e.g. -vvv.")
helpful.add(
None, "-t", "--text", dest="text_mode", action="store_true",
help="Use the text output instead of the curses UI.")
helpful.add(None, "-m", "--email", help=config_help("email"))
# positional arg shadows --domains, instead of appending, and
# --domains is useful, because it can be stored in config
#for subparser in parser_run, parser_auth, parser_install:
# subparser.add_argument("domains", nargs="*", metavar="domain")
helpful.add(None, "-d", "--domains", metavar="DOMAIN", action="append")
helpful.add(
None, "--duplicate", dest="duplicate", action="store_true",
help="Allow getting a certificate that duplicates an existing one")
helpful.add_group(
"automation",
description="Arguments for automating execution & other tweaks")
helpful.add(
"automation", "--version", action="version",
version="%(prog)s {0}".format(letsencrypt.__version__),
help="show program's version number and exit")
helpful.add(
"automation", "--renew-by-default", action="store_true",
help="Select renewal by default when domains are a superset of a "
"a previously attained cert")
helpful.add(
"automation", "--agree-eula", dest="eula", action="store_true",
help="Agree to the Let's Encrypt Developer Preview EULA")
helpful.add(
"automation", "--agree-tos", dest="tos", action="store_true",
help="Agree to the Let's Encrypt Subscriber Agreement")
helpful.add(
"automation", "--account", metavar="ACCOUNT_ID",
help="Account ID to use")
helpful.add_group(
"testing", description="The following flags are meant for "
"testing purposes only! Do NOT change them, unless you "
"really know what you're doing!")
helpful.add(
"testing", "--debug", action="store_true",
help="Show tracebacks if the program exits abnormally")
helpful.add(
"testing", "--no-verify-ssl", action="store_true",
help=config_help("no_verify_ssl"),
default=flag_default("no_verify_ssl"))
helpful.add( # TODO: apache plugin does NOT respect it (#479)
"testing", "--dvsni-port", type=int, default=flag_default("dvsni_port"),
help=config_help("dvsni_port"))
helpful.add("testing", "--simple-http-port", type=int,
help=config_help("simple_http_port"))
helpful.add("testing", "--no-simple-http-tls", action="store_true",
help=config_help("no_simple_http_tls"))
helpful.add_group(
"security", description="Security parameters & server settings")
helpful.add(
"security", "-B", "--rsa-key-size", type=int, metavar="N",
default=flag_default("rsa_key_size"), help=config_help("rsa_key_size"))
# TODO: resolve - assumes binary logic while client.py assumes ternary.
helpful.add(
"security", "-r", "--redirect", action="store_true",
help="Automatically redirect all HTTP traffic to HTTPS for the newly "
"authenticated vhost.")
helpful.add(
"security", "--strict-permissions", action="store_true",
help="Require that all configuration files are owned by the current "
"user; only needed if your config is somewhere unsafe like /tmp/")
_paths_parser(helpful)
# _plugins_parsing should be the last thing to act upon the main
# parser (--help should display plugin-specific options last)
_plugins_parsing(helpful, plugins)
_create_subparsers(helpful)
return helpful.parser, helpful.args
# For now unfortunately this constant just needs to match the code below;
# there isn't an elegant way to autogenerate it in time.
VERBS = ["run", "auth", "install", "revoke", "rollback", "config_changes", "plugins"]
HELP_TOPICS = ["all", "security", "paths", "automation", "testing"] + VERBS
def _create_subparsers(helpful):
subparsers = helpful.parser.add_subparsers(metavar="SUBCOMMAND")
def add_subparser(name): # pylint: disable=missing-docstring
if name == "plugins":
func = plugins_cmd
else:
func = eval(name) # pylint: disable=eval-used
h = func.__doc__.splitlines()[0]
subparser = subparsers.add_parser(name, help=h, description=func.__doc__)
subparser.set_defaults(func=func)
return subparser
# the order of add_subparser() calls is important: it defines the
# order in which subparser names will be displayed in --help
# these add_subparser objects return objects to which arguments could be
# attached, but they have annoying arg ordering constrains so we use
# groups instead: https://github.com/letsencrypt/letsencrypt/issues/820
for v in VERBS:
add_subparser(v)
helpful.add_group("auth", description="Options for modifying how a cert is obtained")
helpful.add_group("install", description="Options for modifying how a cert is deployed")
helpful.add_group("revoke", description="Options for revocation of certs")
helpful.add_group("rollback", description="Options for reverting config changes")
helpful.add_group("plugins", description="Plugin options")
helpful.add("auth",
"--csr", type=read_file, help="Path to a Certificate Signing Request (CSR) in DER format.")
helpful.add("rollback",
"--checkpoints", type=int, metavar="N",
default=flag_default("rollback_checkpoints"),
help="Revert configuration N number of checkpoints.")
helpful.add("plugins",
"--init", action="store_true", help="Initialize plugins.")
helpful.add("plugins",
"--prepare", action="store_true", help="Initialize and prepare plugins.")
helpful.add("plugins",
"--authenticators", action="append_const", dest="ifaces",
const=interfaces.IAuthenticator, help="Limit to authenticator plugins only.")
helpful.add("plugins",
"--installers", action="append_const", dest="ifaces",
const=interfaces.IInstaller, help="Limit to installer plugins only.")
def _paths_parser(helpful):
add = helpful.add
verb = helpful.verb
helpful.add_group(
"paths", description="Arguments changing execution paths & servers")
cph = "Path to where cert is saved (with auth), installed (with install --csr) or revoked."
if verb == "auth":
add("paths", "--cert-path", default=flag_default("auth_cert_path"), help=cph)
elif verb == "revoke":
add("paths", "--cert-path", type=read_file, required=True, help=cph)
else:
add("paths", "--cert-path", help=cph, required=(verb == "install"))
# revoke --key-path reads a file, install --key-path takes a string
add("paths", "--key-path", type=((verb == "revoke" and read_file) or str),
required=(verb == "install"),
help="Path to private key for cert creation or revocation (if account key is missing)")
default_cp = None
if verb == "auth":
default_cp = flag_default("auth_chain_path")
add("paths", "--chain-path", default=default_cp,
help="Accompanying path to a certificate chain.")
add("paths", "--config-dir", default=flag_default("config_dir"),
help=config_help("config_dir"))
add("paths", "--work-dir", default=flag_default("work_dir"),
help=config_help("work_dir"))
add("paths", "--logs-dir", default=flag_default("logs_dir"),
help="Logs directory.")
add("paths", "--server", default=flag_default("server"),
help=config_help("server"))
def _plugins_parsing(helpful, plugins):
helpful.add_group(
"plugins", description="Let's Encrypt client supports an "
"extensible plugins architecture. See '%(prog)s plugins' for a "
"list of all available plugins and their names. You can force "
"a particular plugin by setting options provided below. Further "
"down this help message you will find plugin-specific options "
"(prefixed by --{plugin_name}).")
helpful.add(
"plugins", "-a", "--authenticator", help="Authenticator plugin name.")
helpful.add(
"plugins", "-i", "--installer", help="Installer plugin name.")
helpful.add(
"plugins", "--configurator", help="Name of the plugin that is "
"both an authenticator and an installer. Should not be used "
"together with --authenticator or --installer.")
# things should not be reorder past/pre this comment:
# plugins_group should be displayed in --help before plugin
# specific groups (so that plugins_group.description makes sense)
helpful.add_plugin_args(plugins)
def _setup_logging(args):
level = -args.verbose_count * 10
fmt = "%(asctime)s:%(levelname)s:%(name)s:%(message)s"
if args.text_mode:
handler = colored_logging.StreamHandler()
handler.setFormatter(logging.Formatter(fmt))
else:
handler = log.DialogHandler()
# dialog box is small, display as less as possible
handler.setFormatter(logging.Formatter("%(message)s"))
handler.setLevel(level)
# TODO: use fileConfig?
# unconditionally log to file for debugging purposes
# TODO: change before release?
log_file_name = os.path.join(args.logs_dir, 'letsencrypt.log')
file_handler = logging.handlers.RotatingFileHandler(
log_file_name, maxBytes=2 ** 20, backupCount=10)
# rotate on each invocation, rollover only possible when maxBytes
# is nonzero and backupCount is nonzero, so we set maxBytes as big
# as possible not to overrun in single CLI invocation (1MB).
file_handler.doRollover() # TODO: creates empty letsencrypt.log.1 file
file_handler.setLevel(logging.DEBUG)
file_handler_formatter = logging.Formatter(fmt=fmt)
file_handler_formatter.converter = time.gmtime # don't use localtime
file_handler.setFormatter(file_handler_formatter)
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG) # send all records to handlers
root_logger.addHandler(handler)
root_logger.addHandler(file_handler)
logger.debug("Root logging level set at %d", level)
logger.info("Saving debug log to %s", log_file_name)
def _handle_exception(exc_type, exc_value, trace, args):
"""Logs exceptions and reports them to the user.
Args is used to determine how to display exceptions to the user. In
general, if args.debug is True, then the full exception and traceback is
shown to the user, otherwise it is suppressed. If args itself is None,
then the traceback and exception is attempted to be written to a logfile.
If this is successful, the traceback is suppressed, otherwise it is shown
to the user. sys.exit is always called with a nonzero status.
"""
logger.debug(
"Exiting abnormally:%s%s",
os.linesep,
"".join(traceback.format_exception(exc_type, exc_value, trace)))
if issubclass(exc_type, Exception) and (args is None or not args.debug):
if args is None:
logfile = "letsencrypt.log"
try:
with open(logfile, "w") as logfd:
traceback.print_exception(
exc_type, exc_value, trace, file=logfd)
except: # pylint: disable=bare-except
sys.exit("".join(
traceback.format_exception(exc_type, exc_value, trace)))
if issubclass(exc_type, errors.Error):
sys.exit(exc_value)
else:
# Tell the user a bit about what happened, without overwhelming
# them with a full traceback
msg = ("An unexpected error occurred.\n" +
traceback.format_exception_only(exc_type, exc_value)[0] +
"Please see the ")
if args is None:
msg += "logfile '{0}' for more details.".format(logfile)
else:
msg += "logfiles in {0} for more details.".format(args.logs_dir)
sys.exit(msg)
else:
sys.exit("".join(
traceback.format_exception(exc_type, exc_value, trace)))
def main(cli_args=sys.argv[1:]):
"""Command line argument parsing and main script execution."""
sys.excepthook = functools.partial(_handle_exception, args=None)
# note: arg parser internally handles --help (and exits afterwards)
plugins = plugins_disco.PluginsRegistry.find_all()
parser, tweaked_cli_args = create_parser(plugins, cli_args)
args = parser.parse_args(tweaked_cli_args)
config = configuration.NamespaceConfig(args)
zope.component.provideUtility(config)
# Setup logging ASAP, otherwise "No handlers could be found for
# logger ..." TODO: this should be done before plugins discovery
for directory in config.config_dir, config.work_dir:
le_util.make_or_verify_dir(
directory, constants.CONFIG_DIRS_MODE, os.geteuid(),
"--strict-permissions" in cli_args)
# TODO: logs might contain sensitive data such as contents of the
# private key! #525
le_util.make_or_verify_dir(
args.logs_dir, 0o700, os.geteuid(), "--strict-permissions" in cli_args)
_setup_logging(args)
# do not log `args`, as it contains sensitive data (e.g. revoke --key)!
logger.debug("Arguments: %r", cli_args)
logger.debug("Discovered plugins: %r", plugins)
sys.excepthook = functools.partial(_handle_exception, args=args)
# Displayer
if args.text_mode:
displayer = display_util.FileDisplay(sys.stdout)
else:
displayer = display_util.NcursesDisplay()
zope.component.provideUtility(displayer)
# Reporter
report = reporter.Reporter()
zope.component.provideUtility(report)
atexit.register(report.atexit_print_messages)
# TODO: remove developer EULA prompt for the launch
if not config.eula:
eula = pkg_resources.resource_string("letsencrypt", "EULA")
if not zope.component.getUtility(interfaces.IDisplay).yesno(
eula, "Agree", "Cancel"):
raise errors.Error("Must agree to TOS")
if not os.geteuid() == 0:
logger.warning(
"Root (sudo) is required to run most of letsencrypt functionality.")
# check must be done after arg parsing as --help should work
# w/o root; on the other hand, e.g. "letsencrypt run
# --authenticator dns" or "letsencrypt plugins" does not
# require root as well
#return (
# "{0}Root is required to run letsencrypt. Please use sudo.{0}"
# .format(os.linesep))
return args.func(args, config, plugins)
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
|
|
#
# CORE
# Copyright (c)2010-2014 the Boeing Company.
# See the LICENSE file included in this distribution.
#
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
#
'''
utility.py: defines miscellaneous utility services.
'''
import os
from core.service import CoreService, addservice
from core.misc.ipaddr import IPv4Prefix, IPv6Prefix
from core.misc.utils import *
from core.constants import *
class UtilService(CoreService):
''' Parent class for utility services.
'''
_name = "UtilityProcess"
_group = "Utility"
_depends = ()
_dirs = ()
_configs = ()
_startindex = 80
_startup = ()
_shutdown = ()
@classmethod
def generateconfig(cls, node, filename, services):
return ""
class IPForwardService(UtilService):
_name = "IPForward"
_configs = ("ipforward.sh", )
_startindex = 5
_startup = ("sh ipforward.sh", )
@classmethod
def generateconfig(cls, node, filename, services):
if os.uname()[0] == "Linux":
return cls.generateconfiglinux(node, filename, services)
elif os.uname()[0] == "FreeBSD":
return cls.generateconfigbsd(node, filename, services)
else:
raise Exception, "unknown platform"
@classmethod
def generateconfiglinux(cls, node, filename, services):
cfg = """\
#!/bin/sh
# auto-generated by IPForward service (utility.py)
%s -w net.ipv4.conf.all.forwarding=1
%s -w net.ipv6.conf.all.forwarding=1
%s -w net.ipv4.conf.all.send_redirects=0
%s -w net.ipv4.conf.all.rp_filter=0
%s -w net.ipv4.conf.default.rp_filter=0
""" % (SYSCTL_BIN, SYSCTL_BIN, SYSCTL_BIN, SYSCTL_BIN, SYSCTL_BIN)
for ifc in node.netifs():
name = sysctldevname(ifc.name)
cfg += "%s -w net.ipv4.conf.%s.forwarding=1\n" % (SYSCTL_BIN, name)
cfg += "%s -w net.ipv4.conf.%s.send_redirects=0\n" % \
(SYSCTL_BIN, name)
cfg += "%s -w net.ipv4.conf.%s.rp_filter=0\n" % (SYSCTL_BIN, name)
return cfg
@classmethod
def generateconfigbsd(cls, node, filename, services):
return """\
#!/bin/sh
# auto-generated by IPForward service (utility.py)
%s -w net.inet.ip.forwarding=1
%s -w net.inet6.ip6.forwarding=1
%s -w net.inet.icmp.bmcastecho=1
%s -w net.inet.icmp.icmplim=0
""" % (SYSCTL_BIN, SYSCTL_BIN, SYSCTL_BIN, SYSCTL_BIN)
addservice(IPForwardService)
class DefaultRouteService(UtilService):
_name = "DefaultRoute"
_configs = ("defaultroute.sh",)
_startup = ("sh defaultroute.sh",)
@classmethod
def generateconfig(cls, node, filename, services):
cfg = "#!/bin/sh\n"
cfg += "# auto-generated by DefaultRoute service (utility.py)\n"
for ifc in node.netifs():
if hasattr(ifc, 'control') and ifc.control == True:
continue
cfg += "\n".join(map(cls.addrstr, ifc.addrlist))
cfg += "\n"
return cfg
@staticmethod
def addrstr(x):
if x.find(":") >= 0:
net = IPv6Prefix(x)
fam = "inet6 ::"
else:
net = IPv4Prefix(x)
fam = "inet 0.0.0.0"
if net.maxaddr() == net.minaddr():
return ""
else:
if os.uname()[0] == "Linux":
rtcmd = "ip route add default via"
elif os.uname()[0] == "FreeBSD":
rtcmd = "route add -%s" % fam
else:
raise Exception, "unknown platform"
return "%s %s" % (rtcmd, net.minaddr())
addservice(DefaultRouteService)
class DefaultMulticastRouteService(UtilService):
_name = "DefaultMulticastRoute"
_configs = ("defaultmroute.sh",)
_startup = ("sh defaultmroute.sh",)
@classmethod
def generateconfig(cls, node, filename, services):
cfg = "#!/bin/sh\n"
cfg += "# auto-generated by DefaultMulticastRoute service (utility.py)\n"
cfg += "# the first interface is chosen below; please change it "
cfg += "as needed\n"
for ifc in node.netifs():
if hasattr(ifc, 'control') and ifc.control == True:
continue
if os.uname()[0] == "Linux":
rtcmd = "ip route add 224.0.0.0/4 dev"
elif os.uname()[0] == "FreeBSD":
rtcmd = "route add 224.0.0.0/4 -iface"
else:
raise Exception, "unknown platform"
cfg += "%s %s\n" % (rtcmd, ifc.name)
cfg += "\n"
break
return cfg
addservice(DefaultMulticastRouteService)
class StaticRouteService(UtilService):
_name = "StaticRoute"
_configs = ("staticroute.sh",)
_startup = ("sh staticroute.sh",)
_custom_needed = True
@classmethod
def generateconfig(cls, node, filename, services):
cfg = "#!/bin/sh\n"
cfg += "# auto-generated by StaticRoute service (utility.py)\n#\n"
cfg += "# NOTE: this service must be customized to be of any use\n"
cfg += "# Below are samples that you can uncomment and edit.\n#\n"
for ifc in node.netifs():
if hasattr(ifc, 'control') and ifc.control == True:
continue
cfg += "\n".join(map(cls.routestr, ifc.addrlist))
cfg += "\n"
return cfg
@staticmethod
def routestr(x):
if x.find(":") >= 0:
net = IPv6Prefix(x)
fam = "inet6"
dst = "3ffe:4::/64"
else:
net = IPv4Prefix(x)
fam = "inet"
dst = "10.9.8.0/24"
if net.maxaddr() == net.minaddr():
return ""
else:
if os.uname()[0] == "Linux":
rtcmd = "#/sbin/ip route add %s via" % dst
elif os.uname()[0] == "FreeBSD":
rtcmd = "#/sbin/route add -%s %s" % (fam, dst)
else:
raise Exception, "unknown platform"
return "%s %s" % (rtcmd, net.minaddr())
addservice(StaticRouteService)
class SshService(UtilService):
_name = "SSH"
if os.uname()[0] == "FreeBSD":
_configs = ("startsshd.sh", "sshd_config",)
_dirs = ()
else:
_configs = ("startsshd.sh", "/etc/ssh/sshd_config",)
_dirs = ("/etc/ssh", "/var/run/sshd",)
_startup = ("sh startsshd.sh",)
_shutdown = ("killall sshd",)
_validate = ()
@classmethod
def generateconfig(cls, node, filename, services):
''' Use a startup script for launching sshd in order to wait for host
key generation.
'''
if os.uname()[0] == "FreeBSD":
sshcfgdir = node.nodedir
sshstatedir = node.nodedir
sshlibdir = "/usr/libexec"
else:
sshcfgdir = cls._dirs[0]
sshstatedir = cls._dirs[1]
sshlibdir = "/usr/lib/openssh"
if filename == "startsshd.sh":
return """\
#!/bin/sh
# auto-generated by SSH service (utility.py)
ssh-keygen -q -t rsa -N "" -f %s/ssh_host_rsa_key
chmod 655 %s
# wait until RSA host key has been generated to launch sshd
/usr/sbin/sshd -f %s/sshd_config
""" % (sshcfgdir, sshstatedir, sshcfgdir)
else:
return """\
# auto-generated by SSH service (utility.py)
Port 22
Protocol 2
HostKey %s/ssh_host_rsa_key
UsePrivilegeSeparation yes
PidFile %s/sshd.pid
KeyRegenerationInterval 3600
ServerKeyBits 768
SyslogFacility AUTH
LogLevel INFO
LoginGraceTime 120
PermitRootLogin yes
StrictModes yes
RSAAuthentication yes
PubkeyAuthentication yes
IgnoreRhosts yes
RhostsRSAAuthentication no
HostbasedAuthentication no
PermitEmptyPasswords no
ChallengeResponseAuthentication no
X11Forwarding yes
X11DisplayOffset 10
PrintMotd no
PrintLastLog yes
TCPKeepAlive yes
AcceptEnv LANG LC_*
Subsystem sftp %s/sftp-server
UsePAM yes
UseDNS no
""" % (sshcfgdir, sshstatedir, sshlibdir)
addservice(SshService)
class DhcpService(UtilService):
_name = "DHCP"
_configs = ("/etc/dhcp/dhcpd.conf",)
_dirs = ("/etc/dhcp",)
_startup = ("dhcpd",)
_shutdown = ("killall dhcpd",)
_validate = ("pidof dhcpd",)
@classmethod
def generateconfig(cls, node, filename, services):
''' Generate a dhcpd config file using the network address of
each interface.
'''
cfg = """\
# auto-generated by DHCP service (utility.py)
# NOTE: move these option lines into the desired pool { } block(s) below
#option domain-name "test.com";
#option domain-name-servers 10.0.0.1;
#option routers 10.0.0.1;
log-facility local6;
default-lease-time 600;
max-lease-time 7200;
ddns-update-style none;
"""
for ifc in node.netifs():
if hasattr(ifc, 'control') and ifc.control == True:
continue
cfg += "\n".join(map(cls.subnetentry, ifc.addrlist))
cfg += "\n"
return cfg
@staticmethod
def subnetentry(x):
''' Generate a subnet declaration block given an IPv4 prefix string
for inclusion in the dhcpd3 config file.
'''
if x.find(":") >= 0:
return ""
else:
addr = x.split("/")[0]
net = IPv4Prefix(x)
# divide the address space in half
rangelow = net.addr(net.numaddr() / 2)
rangehigh = net.maxaddr()
return """
subnet %s netmask %s {
pool {
range %s %s;
default-lease-time 600;
option routers %s;
}
}
""" % (net.prefixstr(), net.netmaskstr(), rangelow, rangehigh, addr)
addservice(DhcpService)
class DhcpClientService(UtilService):
''' Use a DHCP client for all interfaces for addressing.
'''
_name = "DHCPClient"
_configs = ("startdhcpclient.sh",)
_startup = ("sh startdhcpclient.sh",)
_shutdown = ("killall dhclient",)
_validate = ("pidof dhclient",)
@classmethod
def generateconfig(cls, node, filename, services):
''' Generate a script to invoke dhclient on all interfaces.
'''
cfg = "#!/bin/sh\n"
cfg += "# auto-generated by DHCPClient service (utility.py)\n"
cfg += "# uncomment this mkdir line and symlink line to enable client-"
cfg += "side DNS\n# resolution based on the DHCP server response.\n"
cfg += "#mkdir -p /var/run/resolvconf/interface\n"
for ifc in node.netifs():
if hasattr(ifc, 'control') and ifc.control == True:
continue
cfg += "#ln -s /var/run/resolvconf/interface/%s.dhclient" % ifc.name
cfg += " /var/run/resolvconf/resolv.conf\n"
cfg += "/sbin/dhclient -nw -pf /var/run/dhclient-%s.pid" % ifc.name
cfg += " -lf /var/run/dhclient-%s.lease %s\n" % (ifc.name, ifc.name)
return cfg
addservice(DhcpClientService)
class FtpService(UtilService):
''' Start a vsftpd server.
'''
_name = "FTP"
_configs = ("vsftpd.conf",)
_dirs = ("/var/run/vsftpd/empty", "/var/ftp",)
_startup = ("vsftpd ./vsftpd.conf",)
_shutdown = ("killall vsftpd",)
_validate = ("pidof vsftpd",)
@classmethod
def generateconfig(cls, node, filename, services):
''' Generate a vsftpd.conf configuration file.
'''
return """\
# vsftpd.conf auto-generated by FTP service (utility.py)
listen=YES
anonymous_enable=YES
local_enable=YES
dirmessage_enable=YES
use_localtime=YES
xferlog_enable=YES
connect_from_port_20=YES
xferlog_file=/var/log/vsftpd.log
ftpd_banner=Welcome to the CORE FTP service
secure_chroot_dir=/var/run/vsftpd/empty
anon_root=/var/ftp
"""
addservice(FtpService)
class HttpService(UtilService):
''' Start an apache server.
'''
_name = "HTTP"
_configs = ("/etc/apache2/apache2.conf", "/etc/apache2/envvars",
"/var/www/index.html",)
_dirs = ("/etc/apache2", "/var/run/apache2", "/var/log/apache2",
"/run/lock", "/var/lock/apache2", "/var/www", )
_startup = ("chown www-data /var/lock/apache2", "apache2ctl start",)
_shutdown = ("apache2ctl stop",)
_validate = ("pidof apache2",)
APACHEVER22, APACHEVER24 = (22, 24)
@classmethod
def generateconfig(cls, node, filename, services):
''' Generate an apache2.conf configuration file.
'''
if filename == cls._configs[0]:
return cls.generateapache2conf(node, filename, services)
elif filename == cls._configs[1]:
return cls.generateenvvars(node, filename, services)
elif filename == cls._configs[2]:
return cls.generatehtml(node, filename, services)
else:
return ""
@classmethod
def detectversionfromcmd(cls):
''' Detect the apache2 version using the 'a2query' command.
'''
try:
status, result = cmdresult(['a2query', '-v'])
except Exception:
status = -1
if status == 0 and result[:3] == '2.4':
return cls.APACHEVER24
return cls.APACHEVER22
@classmethod
def generateapache2conf(cls, node, filename, services):
lockstr = { cls.APACHEVER22:
'LockFile ${APACHE_LOCK_DIR}/accept.lock\n',
cls.APACHEVER24:
'Mutex file:${APACHE_LOCK_DIR} default\n', }
mpmstr = { cls.APACHEVER22: '', cls.APACHEVER24:
'LoadModule mpm_worker_module /usr/lib/apache2/modules/mod_mpm_worker.so\n', }
permstr = { cls.APACHEVER22:
' Order allow,deny\n Deny from all\n Satisfy all\n',
cls.APACHEVER24:
' Require all denied\n', }
authstr = { cls.APACHEVER22:
'LoadModule authz_default_module /usr/lib/apache2/modules/mod_authz_default.so\n',
cls.APACHEVER24:
'LoadModule authz_core_module /usr/lib/apache2/modules/mod_authz_core.so\n', }
permstr2 = { cls.APACHEVER22:
'\t\tOrder allow,deny\n\t\tallow from all\n',
cls.APACHEVER24:
'\t\tRequire all granted\n', }
version = cls.detectversionfromcmd()
cfg ="# apache2.conf generated by utility.py:HttpService\n"
cfg += lockstr[version]
cfg += """\
PidFile ${APACHE_PID_FILE}
Timeout 300
KeepAlive On
MaxKeepAliveRequests 100
KeepAliveTimeout 5
"""
cfg += mpmstr[version]
cfg += """\
<IfModule mpm_prefork_module>
StartServers 5
MinSpareServers 5
MaxSpareServers 10
MaxClients 150
MaxRequestsPerChild 0
</IfModule>
<IfModule mpm_worker_module>
StartServers 2
MinSpareThreads 25
MaxSpareThreads 75
ThreadLimit 64
ThreadsPerChild 25
MaxClients 150
MaxRequestsPerChild 0
</IfModule>
<IfModule mpm_event_module>
StartServers 2
MinSpareThreads 25
MaxSpareThreads 75
ThreadLimit 64
ThreadsPerChild 25
MaxClients 150
MaxRequestsPerChild 0
</IfModule>
User ${APACHE_RUN_USER}
Group ${APACHE_RUN_GROUP}
AccessFileName .htaccess
<Files ~ "^\.ht">
"""
cfg += permstr[version]
cfg += """\
</Files>
DefaultType None
HostnameLookups Off
ErrorLog ${APACHE_LOG_DIR}/error.log
LogLevel warn
#Include mods-enabled/*.load
#Include mods-enabled/*.conf
LoadModule alias_module /usr/lib/apache2/modules/mod_alias.so
LoadModule auth_basic_module /usr/lib/apache2/modules/mod_auth_basic.so
"""
cfg += authstr[version]
cfg += """\
LoadModule authz_host_module /usr/lib/apache2/modules/mod_authz_host.so
LoadModule authz_user_module /usr/lib/apache2/modules/mod_authz_user.so
LoadModule autoindex_module /usr/lib/apache2/modules/mod_autoindex.so
LoadModule dir_module /usr/lib/apache2/modules/mod_dir.so
LoadModule env_module /usr/lib/apache2/modules/mod_env.so
NameVirtualHost *:80
Listen 80
<IfModule mod_ssl.c>
Listen 443
</IfModule>
<IfModule mod_gnutls.c>
Listen 443
</IfModule>
LogFormat "%v:%p %h %l %u %t \\"%r\\" %>s %O \\"%{Referer}i\\" \\"%{User-Agent}i\\"" vhost_combined
LogFormat "%h %l %u %t \\"%r\\" %>s %O \\"%{Referer}i\\" \\"%{User-Agent}i\\"" combined
LogFormat "%h %l %u %t \\"%r\\" %>s %O" common
LogFormat "%{Referer}i -> %U" referer
LogFormat "%{User-agent}i" agent
ServerTokens OS
ServerSignature On
TraceEnable Off
<VirtualHost *:80>
ServerAdmin webmaster@localhost
DocumentRoot /var/www
<Directory />
Options FollowSymLinks
AllowOverride None
</Directory>
<Directory /var/www/>
Options Indexes FollowSymLinks MultiViews
AllowOverride None
"""
cfg += permstr2[version]
cfg += """\
</Directory>
ErrorLog ${APACHE_LOG_DIR}/error.log
LogLevel warn
CustomLog ${APACHE_LOG_DIR}/access.log combined
</VirtualHost>
"""
return cfg
@classmethod
def generateenvvars(cls, node, filename, services):
return """\
# this file is used by apache2ctl - generated by utility.py:HttpService
# these settings come from a default Ubuntu apache2 installation
export APACHE_RUN_USER=www-data
export APACHE_RUN_GROUP=www-data
export APACHE_PID_FILE=/var/run/apache2.pid
export APACHE_RUN_DIR=/var/run/apache2
export APACHE_LOCK_DIR=/var/lock/apache2
export APACHE_LOG_DIR=/var/log/apache2
export LANG=C
export LANG
"""
@classmethod
def generatehtml(cls, node, filename, services):
body = """\
<!-- generated by utility.py:HttpService -->
<h1>%s web server</h1>
<p>This is the default web page for this server.</p>
<p>The web server software is running but no content has been added, yet.</p>
""" % node.name
for ifc in node.netifs():
if hasattr(ifc, 'control') and ifc.control == True:
continue
body += "<li>%s - %s</li>\n" % (ifc.name, ifc.addrlist)
return "<html><body>%s</body></html>" % body
addservice(HttpService)
class PcapService(UtilService):
''' Pcap service for logging packets.
'''
_name = "pcap"
_configs = ("pcap.sh", )
_dirs = ()
_startindex = 1
_startup = ("sh pcap.sh start",)
_shutdown = ("sh pcap.sh stop",)
_validate = ("pidof tcpdump",)
_meta = "logs network traffic to pcap packet capture files"
@classmethod
def generateconfig(cls, node, filename, services):
''' Generate a startpcap.sh traffic logging script.
'''
cfg = """
#!/bin/sh
# set tcpdump options here (see 'man tcpdump' for help)
# (-s snap length, -C limit pcap file length, -n disable name resolution)
DUMPOPTS="-s 12288 -C 10 -n"
if [ "x$1" = "xstart" ]; then
"""
for ifc in node.netifs():
if hasattr(ifc, 'control') and ifc.control == True:
cfg += '# '
redir = "< /dev/null"
cfg += "tcpdump ${DUMPOPTS} -w %s.%s.pcap -i %s %s &\n" % \
(node.name, ifc.name, ifc.name, redir)
cfg += """
elif [ "x$1" = "xstop" ]; then
mkdir -p ${SESSION_DIR}/pcap
mv *.pcap ${SESSION_DIR}/pcap
fi;
"""
return cfg
addservice(PcapService)
class RadvdService(UtilService):
_name = "radvd"
_configs = ("/etc/radvd/radvd.conf",)
_dirs = ("/etc/radvd",)
_startup = ("radvd -C /etc/radvd/radvd.conf -m logfile -l /var/log/radvd.log",)
_shutdown = ("pkill radvd",)
_validate = ("pidof radvd",)
@classmethod
def generateconfig(cls, node, filename, services):
''' Generate a RADVD router advertisement daemon config file
using the network address of each interface.
'''
cfg = "# auto-generated by RADVD service (utility.py)\n"
for ifc in node.netifs():
if hasattr(ifc, 'control') and ifc.control == True:
continue
prefixes = map(cls.subnetentry, ifc.addrlist)
if len(prefixes) < 1:
continue
cfg += """\
interface %s
{
AdvSendAdvert on;
MinRtrAdvInterval 3;
MaxRtrAdvInterval 10;
AdvDefaultPreference low;
AdvHomeAgentFlag off;
""" % ifc.name
for prefix in prefixes:
if prefix == "":
continue
cfg += """\
prefix %s
{
AdvOnLink on;
AdvAutonomous on;
AdvRouterAddr on;
};
""" % prefix
cfg += "};\n"
return cfg
@staticmethod
def subnetentry(x):
''' Generate a subnet declaration block given an IPv6 prefix string
for inclusion in the RADVD config file.
'''
if x.find(":") >= 0:
net = IPv6Prefix(x)
return str(net)
else:
return ""
addservice(RadvdService)
class AtdService(UtilService):
''' Atd service for scheduling at jobs
'''
_name = "atd"
_configs = ("startatd.sh",)
_dirs = ("/var/spool/cron/atjobs", "/var/spool/cron/atspool")
_startup = ("sh startatd.sh", )
_shutdown = ("pkill atd", )
@classmethod
def generateconfig(cls, node, filename, services):
return """
#!/bin/sh
echo 00001 > /var/spool/cron/atjobs/.SEQ
chown -R daemon /var/spool/cron/*
chmod -R 700 /var/spool/cron/*
atd
"""
addservice(AtdService)
class UserDefinedService(UtilService):
''' Dummy service allowing customization of anything.
'''
_name = "UserDefined"
_startindex = 50
_meta = "Customize this service to do anything upon startup."
addservice(UserDefinedService)
|
|
<<<<<<< HEAD
<<<<<<< HEAD
from ctypes import *
from ctypes.test import need_symbol
import unittest
import sys
class Test(unittest.TestCase):
def test_array2pointer(self):
array = (c_int * 3)(42, 17, 2)
# casting an array to a pointer works.
ptr = cast(array, POINTER(c_int))
self.assertEqual([ptr[i] for i in range(3)], [42, 17, 2])
if 2*sizeof(c_short) == sizeof(c_int):
ptr = cast(array, POINTER(c_short))
if sys.byteorder == "little":
self.assertEqual([ptr[i] for i in range(6)],
[42, 0, 17, 0, 2, 0])
else:
self.assertEqual([ptr[i] for i in range(6)],
[0, 42, 0, 17, 0, 2])
def test_address2pointer(self):
array = (c_int * 3)(42, 17, 2)
address = addressof(array)
ptr = cast(c_void_p(address), POINTER(c_int))
self.assertEqual([ptr[i] for i in range(3)], [42, 17, 2])
ptr = cast(address, POINTER(c_int))
self.assertEqual([ptr[i] for i in range(3)], [42, 17, 2])
def test_p2a_objects(self):
array = (c_char_p * 5)()
self.assertEqual(array._objects, None)
array[0] = b"foo bar"
self.assertEqual(array._objects, {'0': b"foo bar"})
p = cast(array, POINTER(c_char_p))
# array and p share a common _objects attribute
self.assertIs(p._objects, array._objects)
self.assertEqual(array._objects, {'0': b"foo bar", id(array): array})
p[0] = b"spam spam"
self.assertEqual(p._objects, {'0': b"spam spam", id(array): array})
self.assertIs(array._objects, p._objects)
p[1] = b"foo bar"
self.assertEqual(p._objects, {'1': b'foo bar', '0': b"spam spam", id(array): array})
self.assertIs(array._objects, p._objects)
def test_other(self):
p = cast((c_int * 4)(1, 2, 3, 4), POINTER(c_int))
self.assertEqual(p[:4], [1,2, 3, 4])
self.assertEqual(p[:4:], [1, 2, 3, 4])
self.assertEqual(p[3:-1:-1], [4, 3, 2, 1])
self.assertEqual(p[:4:3], [1, 4])
c_int()
self.assertEqual(p[:4], [1, 2, 3, 4])
self.assertEqual(p[:4:], [1, 2, 3, 4])
self.assertEqual(p[3:-1:-1], [4, 3, 2, 1])
self.assertEqual(p[:4:3], [1, 4])
p[2] = 96
self.assertEqual(p[:4], [1, 2, 96, 4])
self.assertEqual(p[:4:], [1, 2, 96, 4])
self.assertEqual(p[3:-1:-1], [4, 96, 2, 1])
self.assertEqual(p[:4:3], [1, 4])
c_int()
self.assertEqual(p[:4], [1, 2, 96, 4])
self.assertEqual(p[:4:], [1, 2, 96, 4])
self.assertEqual(p[3:-1:-1], [4, 96, 2, 1])
self.assertEqual(p[:4:3], [1, 4])
def test_char_p(self):
# This didn't work: bad argument to internal function
s = c_char_p(b"hiho")
self.assertEqual(cast(cast(s, c_void_p), c_char_p).value,
b"hiho")
@need_symbol('c_wchar_p')
def test_wchar_p(self):
s = c_wchar_p("hiho")
self.assertEqual(cast(cast(s, c_void_p), c_wchar_p).value,
"hiho")
if __name__ == "__main__":
unittest.main()
=======
from ctypes import *
from ctypes.test import need_symbol
import unittest
import sys
class Test(unittest.TestCase):
def test_array2pointer(self):
array = (c_int * 3)(42, 17, 2)
# casting an array to a pointer works.
ptr = cast(array, POINTER(c_int))
self.assertEqual([ptr[i] for i in range(3)], [42, 17, 2])
if 2*sizeof(c_short) == sizeof(c_int):
ptr = cast(array, POINTER(c_short))
if sys.byteorder == "little":
self.assertEqual([ptr[i] for i in range(6)],
[42, 0, 17, 0, 2, 0])
else:
self.assertEqual([ptr[i] for i in range(6)],
[0, 42, 0, 17, 0, 2])
def test_address2pointer(self):
array = (c_int * 3)(42, 17, 2)
address = addressof(array)
ptr = cast(c_void_p(address), POINTER(c_int))
self.assertEqual([ptr[i] for i in range(3)], [42, 17, 2])
ptr = cast(address, POINTER(c_int))
self.assertEqual([ptr[i] for i in range(3)], [42, 17, 2])
def test_p2a_objects(self):
array = (c_char_p * 5)()
self.assertEqual(array._objects, None)
array[0] = b"foo bar"
self.assertEqual(array._objects, {'0': b"foo bar"})
p = cast(array, POINTER(c_char_p))
# array and p share a common _objects attribute
self.assertIs(p._objects, array._objects)
self.assertEqual(array._objects, {'0': b"foo bar", id(array): array})
p[0] = b"spam spam"
self.assertEqual(p._objects, {'0': b"spam spam", id(array): array})
self.assertIs(array._objects, p._objects)
p[1] = b"foo bar"
self.assertEqual(p._objects, {'1': b'foo bar', '0': b"spam spam", id(array): array})
self.assertIs(array._objects, p._objects)
def test_other(self):
p = cast((c_int * 4)(1, 2, 3, 4), POINTER(c_int))
self.assertEqual(p[:4], [1,2, 3, 4])
self.assertEqual(p[:4:], [1, 2, 3, 4])
self.assertEqual(p[3:-1:-1], [4, 3, 2, 1])
self.assertEqual(p[:4:3], [1, 4])
c_int()
self.assertEqual(p[:4], [1, 2, 3, 4])
self.assertEqual(p[:4:], [1, 2, 3, 4])
self.assertEqual(p[3:-1:-1], [4, 3, 2, 1])
self.assertEqual(p[:4:3], [1, 4])
p[2] = 96
self.assertEqual(p[:4], [1, 2, 96, 4])
self.assertEqual(p[:4:], [1, 2, 96, 4])
self.assertEqual(p[3:-1:-1], [4, 96, 2, 1])
self.assertEqual(p[:4:3], [1, 4])
c_int()
self.assertEqual(p[:4], [1, 2, 96, 4])
self.assertEqual(p[:4:], [1, 2, 96, 4])
self.assertEqual(p[3:-1:-1], [4, 96, 2, 1])
self.assertEqual(p[:4:3], [1, 4])
def test_char_p(self):
# This didn't work: bad argument to internal function
s = c_char_p(b"hiho")
self.assertEqual(cast(cast(s, c_void_p), c_char_p).value,
b"hiho")
@need_symbol('c_wchar_p')
def test_wchar_p(self):
s = c_wchar_p("hiho")
self.assertEqual(cast(cast(s, c_void_p), c_wchar_p).value,
"hiho")
if __name__ == "__main__":
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
from ctypes import *
from ctypes.test import need_symbol
import unittest
import sys
class Test(unittest.TestCase):
def test_array2pointer(self):
array = (c_int * 3)(42, 17, 2)
# casting an array to a pointer works.
ptr = cast(array, POINTER(c_int))
self.assertEqual([ptr[i] for i in range(3)], [42, 17, 2])
if 2*sizeof(c_short) == sizeof(c_int):
ptr = cast(array, POINTER(c_short))
if sys.byteorder == "little":
self.assertEqual([ptr[i] for i in range(6)],
[42, 0, 17, 0, 2, 0])
else:
self.assertEqual([ptr[i] for i in range(6)],
[0, 42, 0, 17, 0, 2])
def test_address2pointer(self):
array = (c_int * 3)(42, 17, 2)
address = addressof(array)
ptr = cast(c_void_p(address), POINTER(c_int))
self.assertEqual([ptr[i] for i in range(3)], [42, 17, 2])
ptr = cast(address, POINTER(c_int))
self.assertEqual([ptr[i] for i in range(3)], [42, 17, 2])
def test_p2a_objects(self):
array = (c_char_p * 5)()
self.assertEqual(array._objects, None)
array[0] = b"foo bar"
self.assertEqual(array._objects, {'0': b"foo bar"})
p = cast(array, POINTER(c_char_p))
# array and p share a common _objects attribute
self.assertIs(p._objects, array._objects)
self.assertEqual(array._objects, {'0': b"foo bar", id(array): array})
p[0] = b"spam spam"
self.assertEqual(p._objects, {'0': b"spam spam", id(array): array})
self.assertIs(array._objects, p._objects)
p[1] = b"foo bar"
self.assertEqual(p._objects, {'1': b'foo bar', '0': b"spam spam", id(array): array})
self.assertIs(array._objects, p._objects)
def test_other(self):
p = cast((c_int * 4)(1, 2, 3, 4), POINTER(c_int))
self.assertEqual(p[:4], [1,2, 3, 4])
self.assertEqual(p[:4:], [1, 2, 3, 4])
self.assertEqual(p[3:-1:-1], [4, 3, 2, 1])
self.assertEqual(p[:4:3], [1, 4])
c_int()
self.assertEqual(p[:4], [1, 2, 3, 4])
self.assertEqual(p[:4:], [1, 2, 3, 4])
self.assertEqual(p[3:-1:-1], [4, 3, 2, 1])
self.assertEqual(p[:4:3], [1, 4])
p[2] = 96
self.assertEqual(p[:4], [1, 2, 96, 4])
self.assertEqual(p[:4:], [1, 2, 96, 4])
self.assertEqual(p[3:-1:-1], [4, 96, 2, 1])
self.assertEqual(p[:4:3], [1, 4])
c_int()
self.assertEqual(p[:4], [1, 2, 96, 4])
self.assertEqual(p[:4:], [1, 2, 96, 4])
self.assertEqual(p[3:-1:-1], [4, 96, 2, 1])
self.assertEqual(p[:4:3], [1, 4])
def test_char_p(self):
# This didn't work: bad argument to internal function
s = c_char_p(b"hiho")
self.assertEqual(cast(cast(s, c_void_p), c_char_p).value,
b"hiho")
@need_symbol('c_wchar_p')
def test_wchar_p(self):
s = c_wchar_p("hiho")
self.assertEqual(cast(cast(s, c_void_p), c_wchar_p).value,
"hiho")
if __name__ == "__main__":
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
|
from __future__ import absolute_import
from __future__ import print_function
from contextlib import contextmanager
from typing import (cast, Any, Callable, Dict, Generator, Iterable, Iterator, List, Mapping,
Optional, Set, Sized, Tuple, Union, IO, Text)
from django.core import signing
from django.core.urlresolvers import LocaleRegexURLResolver
from django.conf import settings
from django.test import TestCase
from django.test.client import (
BOUNDARY, MULTIPART_CONTENT, encode_multipart,
)
from django.template import loader
from django.http import HttpResponse
from django.db.utils import IntegrityError
from zerver.lib.avatar import avatar_url
from zerver.lib.cache import get_cache_backend
from zerver.lib.initial_password import initial_password
from zerver.lib.db import TimeTrackingCursor
from zerver.lib.str_utils import force_text
from zerver.lib import cache
from zerver.tornado import event_queue
from zerver.tornado.handlers import allocate_handler_id
from zerver.worker import queue_processors
from zerver.lib.actions import (
check_send_message, create_stream_if_needed, bulk_add_subscriptions,
get_display_recipient, bulk_remove_subscriptions
)
from zerver.models import (
get_recipient,
get_stream,
get_user,
Client,
Message,
Realm,
Recipient,
Stream,
Subscription,
UserMessage,
UserProfile,
)
from zerver.lib.request import JsonableError
import collections
import base64
import mock
import os
import re
import sys
import time
import ujson
import unittest
from six.moves import urllib
from six import binary_type
from zerver.lib.str_utils import NonBinaryStr
from contextlib import contextmanager
import six
import fakeldap
import ldap
class MockLDAP(fakeldap.MockLDAP):
class LDAPError(ldap.LDAPError):
pass
class INVALID_CREDENTIALS(ldap.INVALID_CREDENTIALS):
pass
class NO_SUCH_OBJECT(ldap.NO_SUCH_OBJECT):
pass
class ALREADY_EXISTS(ldap.ALREADY_EXISTS):
pass
@contextmanager
def stub_event_queue_user_events(event_queue_return, user_events_return):
# type: (Any, Any) -> Iterator[None]
with mock.patch('zerver.lib.events.request_event_queue',
return_value=event_queue_return):
with mock.patch('zerver.lib.events.get_user_events',
return_value=user_events_return):
yield
@contextmanager
def simulated_queue_client(client):
# type: (Callable) -> Iterator[None]
real_SimpleQueueClient = queue_processors.SimpleQueueClient
queue_processors.SimpleQueueClient = client # type: ignore # https://github.com/JukkaL/mypy/issues/1152
yield
queue_processors.SimpleQueueClient = real_SimpleQueueClient # type: ignore # https://github.com/JukkaL/mypy/issues/1152
@contextmanager
def tornado_redirected_to_list(lst):
# type: (List[Mapping[str, Any]]) -> Iterator[None]
real_event_queue_process_notification = event_queue.process_notification
event_queue.process_notification = lambda notice: lst.append(notice)
# process_notification takes a single parameter called 'notice'.
# lst.append takes a single argument called 'object'.
# Some code might call process_notification using keyword arguments,
# so mypy doesn't allow assigning lst.append to process_notification
# So explicitly change parameter name to 'notice' to work around this problem
yield
event_queue.process_notification = real_event_queue_process_notification
@contextmanager
def simulated_empty_cache():
# type: () -> Generator[List[Tuple[str, Union[Text, List[Text]], Text]], None, None]
cache_queries = [] # type: List[Tuple[str, Union[Text, List[Text]], Text]]
def my_cache_get(key, cache_name=None):
# type: (Text, Optional[str]) -> Optional[Dict[Text, Any]]
cache_queries.append(('get', key, cache_name))
return None
def my_cache_get_many(keys, cache_name=None): # nocoverage -- simulated code doesn't use this
# type: (List[Text], Optional[str]) -> Dict[Text, Any]
cache_queries.append(('getmany', keys, cache_name))
return {}
old_get = cache.cache_get
old_get_many = cache.cache_get_many
cache.cache_get = my_cache_get
cache.cache_get_many = my_cache_get_many
yield cache_queries
cache.cache_get = old_get
cache.cache_get_many = old_get_many
@contextmanager
def queries_captured(include_savepoints=False):
# type: (Optional[bool]) -> Generator[List[Dict[str, Union[str, binary_type]]], None, None]
'''
Allow a user to capture just the queries executed during
the with statement.
'''
queries = [] # type: List[Dict[str, Union[str, binary_type]]]
def wrapper_execute(self, action, sql, params=()):
# type: (TimeTrackingCursor, Callable, NonBinaryStr, Iterable[Any]) -> None
cache = get_cache_backend(None)
cache.clear()
start = time.time()
try:
return action(sql, params)
finally:
stop = time.time()
duration = stop - start
if include_savepoints or ('SAVEPOINT' not in sql):
queries.append({
'sql': self.mogrify(sql, params).decode('utf-8'),
'time': "%.3f" % duration,
})
old_execute = TimeTrackingCursor.execute
old_executemany = TimeTrackingCursor.executemany
def cursor_execute(self, sql, params=()):
# type: (TimeTrackingCursor, NonBinaryStr, Iterable[Any]) -> None
return wrapper_execute(self, super(TimeTrackingCursor, self).execute, sql, params) # type: ignore # https://github.com/JukkaL/mypy/issues/1167
TimeTrackingCursor.execute = cursor_execute # type: ignore # https://github.com/JukkaL/mypy/issues/1167
def cursor_executemany(self, sql, params=()):
# type: (TimeTrackingCursor, NonBinaryStr, Iterable[Any]) -> None
return wrapper_execute(self, super(TimeTrackingCursor, self).executemany, sql, params) # type: ignore # https://github.com/JukkaL/mypy/issues/1167 # nocoverage -- doesn't actually get used in tests
TimeTrackingCursor.executemany = cursor_executemany # type: ignore # https://github.com/JukkaL/mypy/issues/1167
yield queries
TimeTrackingCursor.execute = old_execute # type: ignore # https://github.com/JukkaL/mypy/issues/1167
TimeTrackingCursor.executemany = old_executemany # type: ignore # https://github.com/JukkaL/mypy/issues/1167
@contextmanager
def stdout_suppressed():
# type: () -> Iterator[IO[str]]
"""Redirect stdout to /dev/null."""
with open(os.devnull, 'a') as devnull:
stdout, sys.stdout = sys.stdout, devnull # type: ignore # monkey-patching
yield stdout
sys.stdout = stdout
def get_test_image_file(filename):
# type: (str) -> IO[Any]
test_avatar_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../tests/images'))
return open(os.path.join(test_avatar_dir, filename), 'rb')
def avatar_disk_path(user_profile, medium=False):
# type: (UserProfile, bool) -> Text
avatar_url_path = avatar_url(user_profile, medium)
avatar_disk_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars",
avatar_url_path.split("/")[-2],
avatar_url_path.split("/")[-1].split("?")[0])
return avatar_disk_path
def make_client(name):
# type: (str) -> Client
client, _ = Client.objects.get_or_create(name=name)
return client
def find_key_by_email(address):
# type: (Text) -> Optional[Text]
from django.core.mail import outbox
key_regex = re.compile("accounts/do_confirm/([a-z0-9]{24})>")
for message in reversed(outbox):
if address in message.to:
return key_regex.search(message.body).groups()[0]
return None # nocoverage -- in theory a test might want this case, but none do
def find_pattern_in_email(address, pattern):
# type: (Text, Text) -> Optional[Text]
from django.core.mail import outbox
key_regex = re.compile(pattern)
for message in reversed(outbox):
if address in message.to:
return key_regex.search(message.body).group(0)
return None # nocoverage -- in theory a test might want this case, but none do
def message_stream_count(user_profile):
# type: (UserProfile) -> int
return UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
count()
def most_recent_usermessage(user_profile):
# type: (UserProfile) -> UserMessage
query = UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
order_by('-message')
return query[0] # Django does LIMIT here
def most_recent_message(user_profile):
# type: (UserProfile) -> Message
usermessage = most_recent_usermessage(user_profile)
return usermessage.message
def get_subscription(stream_name, user_profile):
# type: (Text, UserProfile) -> Subscription
stream = get_stream(stream_name, user_profile.realm)
recipient = get_recipient(Recipient.STREAM, stream.id)
return Subscription.objects.get(user_profile=user_profile,
recipient=recipient, active=True)
def get_user_messages(user_profile):
# type: (UserProfile) -> List[Message]
query = UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
order_by('message')
return [um.message for um in query]
class DummyHandler(object):
def __init__(self):
# type: () -> None
allocate_handler_id(self) # type: ignore # this is a testing mock
class POSTRequestMock(object):
method = "POST"
def __init__(self, post_data, user_profile):
# type: (Dict[str, Any], Optional[UserProfile]) -> None
self.GET = {} # type: Dict[str, Any]
self.POST = post_data
self.user = user_profile
self._tornado_handler = DummyHandler()
self._log_data = {} # type: Dict[str, Any]
self.META = {'PATH_INFO': 'test'}
self.path = ''
class HostRequestMock(object):
"""A mock request object where get_host() works. Useful for testing
routes that use Zulip's subdomains feature"""
def __init__(self, user_profile=None, host=settings.EXTERNAL_HOST):
# type: (UserProfile, Text) -> None
self.host = host
self.GET = {} # type: Dict[str, Any]
self.POST = {} # type: Dict[str, Any]
self.META = {'PATH_INFO': 'test'}
self.path = ''
self.user = user_profile
self.method = ''
def get_host(self):
# type: () -> Text
return self.host
class MockPythonResponse(object):
def __init__(self, text, status_code):
# type: (Text, int) -> None
self.text = text
self.status_code = status_code
@property
def ok(self):
# type: () -> bool
return self.status_code == 200
INSTRUMENTING = os.environ.get('TEST_INSTRUMENT_URL_COVERAGE', '') == 'TRUE'
INSTRUMENTED_CALLS = [] # type: List[Dict[str, Any]]
UrlFuncT = Callable[..., HttpResponse] # TODO: make more specific
def append_instrumentation_data(data):
# type: (Dict[str, Any]) -> None
INSTRUMENTED_CALLS.append(data)
def instrument_url(f):
# type: (UrlFuncT) -> UrlFuncT
if not INSTRUMENTING: # nocoverage -- option is always enabled; should we remove?
return f
else:
def wrapper(self, url, info={}, **kwargs):
# type: (Any, Text, Dict[str, Any], **Any) -> HttpResponse
start = time.time()
result = f(self, url, info, **kwargs)
delay = time.time() - start
test_name = self.id()
if '?' in url:
url, extra_info = url.split('?', 1)
else:
extra_info = ''
append_instrumentation_data(dict(
url=url,
status_code=result.status_code,
method=f.__name__,
delay=delay,
extra_info=extra_info,
info=info,
test_name=test_name,
kwargs=kwargs))
return result
return wrapper
def write_instrumentation_reports(full_suite):
# type: (bool) -> None
if INSTRUMENTING:
calls = INSTRUMENTED_CALLS
from zproject.urls import urlpatterns, v1_api_and_json_patterns
# Find our untested urls.
pattern_cnt = collections.defaultdict(int) # type: Dict[str, int]
def re_strip(r):
# type: (Any) -> str
return str(r).lstrip('^').rstrip('$')
def find_patterns(patterns, prefixes):
# type: (List[Any], List[str]) -> None
for pattern in patterns:
find_pattern(pattern, prefixes)
def cleanup_url(url):
# type: (str) -> str
if url.startswith('/'):
url = url[1:]
if url.startswith('http://testserver/'):
url = url[len('http://testserver/'):]
if url.startswith('http://zulip.testserver/'):
url = url[len('http://zulip.testserver/'):]
if url.startswith('http://testserver:9080/'):
url = url[len('http://testserver:9080/'):]
return url
def find_pattern(pattern, prefixes):
# type: (Any, List[str]) -> None
if isinstance(pattern, type(LocaleRegexURLResolver)):
return # nocoverage -- shouldn't actually happen
if hasattr(pattern, 'url_patterns'):
return
canon_pattern = prefixes[0] + re_strip(pattern.regex.pattern)
cnt = 0
for call in calls:
if 'pattern' in call:
continue
url = cleanup_url(call['url'])
for prefix in prefixes:
if url.startswith(prefix):
match_url = url[len(prefix):]
if pattern.regex.match(match_url):
if call['status_code'] in [200, 204, 301, 302]:
cnt += 1
call['pattern'] = canon_pattern
pattern_cnt[canon_pattern] += cnt
find_patterns(urlpatterns, ['', 'en/', 'de/'])
find_patterns(v1_api_and_json_patterns, ['api/v1/', 'json/'])
assert len(pattern_cnt) > 100
untested_patterns = set([p for p in pattern_cnt if pattern_cnt[p] == 0])
exempt_patterns = set([
# We exempt some patterns that are called via Tornado.
'api/v1/events',
'api/v1/register',
# We also exempt some development environment debugging
# static content URLs, since the content they point to may
# or may not exist.
'coverage/(?P<path>.*)',
'node-coverage/(?P<path>.*)',
'docs/(?P<path>.*)',
])
untested_patterns -= exempt_patterns
var_dir = 'var' # TODO make sure path is robust here
fn = os.path.join(var_dir, 'url_coverage.txt')
with open(fn, 'w') as f:
for call in calls:
try:
line = ujson.dumps(call)
f.write(line + '\n')
except OverflowError: # nocoverage -- test suite error handling
print('''
A JSON overflow error was encountered while
producing the URL coverage report. Sometimes
this indicates that a test is passing objects
into methods like client_post(), which is
unnecessary and leads to false positives.
''')
print(call)
if full_suite:
print('INFO: URL coverage report is in %s' % (fn,))
print('INFO: Try running: ./tools/create-test-api-docs')
if full_suite and len(untested_patterns): # nocoverage -- test suite error handling
print("\nERROR: Some URLs are untested! Here's the list of untested URLs:")
for untested_pattern in sorted(untested_patterns):
print(" %s" % (untested_pattern,))
sys.exit(1)
def get_all_templates():
# type: () -> List[str]
templates = []
relpath = os.path.relpath
isfile = os.path.isfile
path_exists = os.path.exists
def is_valid_template(p, n):
# type: (Text, Text) -> bool
return 'webhooks' not in p \
and not n.startswith('.') \
and not n.startswith('__init__') \
and not n.endswith('.md') \
and isfile(p)
def process(template_dir, dirname, fnames):
# type: (str, str, Iterable[str]) -> None
for name in fnames:
path = os.path.join(dirname, name)
if is_valid_template(path, name):
templates.append(relpath(path, template_dir))
for engine in loader.engines.all():
template_dirs = [d for d in engine.template_dirs if path_exists(d)]
for template_dir in template_dirs:
template_dir = os.path.normpath(template_dir)
for dirpath, dirnames, fnames in os.walk(template_dir):
process(template_dir, dirpath, fnames)
return templates
def unsign_subdomain_cookie(result):
# type: (HttpResponse) -> Dict[str, Any]
key = 'subdomain.signature'
salt = key + 'zerver.views.auth'
cookie = result.cookies.get(key)
value = signing.get_cookie_signer(salt=salt).unsign(cookie.value, max_age=15)
return ujson.loads(value)
|
|
from pytest import raises
from graphql_relay.connection.arrayconnection import (
connectionFromArray, cursorForObjectInConnection)
letters = ['A', 'B', 'C', 'D', 'E']
def test_returns_all_elements_without_filters():
c = connectionFromArray(letters, {})
expected = {
'edges': [
{
'node': 'A',
'cursor': 'YXJyYXljb25uZWN0aW9uOjA=',
},
{
'node': 'B',
'cursor': 'YXJyYXljb25uZWN0aW9uOjE=',
},
{
'node': 'C',
'cursor': 'YXJyYXljb25uZWN0aW9uOjI=',
},
{
'node': 'D',
'cursor': 'YXJyYXljb25uZWN0aW9uOjM=',
},
{
'node': 'E',
'cursor': 'YXJyYXljb25uZWN0aW9uOjQ=',
},
],
'pageInfo': {
'startCursor': 'YXJyYXljb25uZWN0aW9uOjA=',
'endCursor': 'YXJyYXljb25uZWN0aW9uOjQ=',
'hasPreviousPage': False,
'hasNextPage': False,
}
}
assert c.to_dict() == expected
def test_respects_a_smaller_first():
c = connectionFromArray(letters, first=2)
expected = {
'edges': [
{
'node': 'A',
'cursor': 'YXJyYXljb25uZWN0aW9uOjA=',
},
{
'node': 'B',
'cursor': 'YXJyYXljb25uZWN0aW9uOjE=',
},
],
'pageInfo': {
'startCursor': 'YXJyYXljb25uZWN0aW9uOjA=',
'endCursor': 'YXJyYXljb25uZWN0aW9uOjE=',
'hasPreviousPage': False,
'hasNextPage': True,
}
}
assert c.to_dict() == expected
def test_respects_an_overly_large_first():
c = connectionFromArray(letters, first=10)
expected = {
'edges': [
{
'node': 'A',
'cursor': 'YXJyYXljb25uZWN0aW9uOjA=',
},
{
'node': 'B',
'cursor': 'YXJyYXljb25uZWN0aW9uOjE=',
},
{
'node': 'C',
'cursor': 'YXJyYXljb25uZWN0aW9uOjI=',
},
{
'node': 'D',
'cursor': 'YXJyYXljb25uZWN0aW9uOjM=',
},
{
'node': 'E',
'cursor': 'YXJyYXljb25uZWN0aW9uOjQ=',
},
],
'pageInfo': {
'startCursor': 'YXJyYXljb25uZWN0aW9uOjA=',
'endCursor': 'YXJyYXljb25uZWN0aW9uOjQ=',
'hasPreviousPage': False,
'hasNextPage': False,
}
}
assert c.to_dict() == expected
def test_respects_a_smaller_last():
c = connectionFromArray(letters, last=2)
expected = {
'edges': [
{
'node': 'D',
'cursor': 'YXJyYXljb25uZWN0aW9uOjM=',
},
{
'node': 'E',
'cursor': 'YXJyYXljb25uZWN0aW9uOjQ=',
},
],
'pageInfo': {
'startCursor': 'YXJyYXljb25uZWN0aW9uOjM=',
'endCursor': 'YXJyYXljb25uZWN0aW9uOjQ=',
'hasPreviousPage': True,
'hasNextPage': False,
}
}
assert c.to_dict() == expected
def test_respects_an_overly_large_last():
c = connectionFromArray(letters, last=10)
expected = {
'edges': [
{
'node': 'A',
'cursor': 'YXJyYXljb25uZWN0aW9uOjA=',
},
{
'node': 'B',
'cursor': 'YXJyYXljb25uZWN0aW9uOjE=',
},
{
'node': 'C',
'cursor': 'YXJyYXljb25uZWN0aW9uOjI=',
},
{
'node': 'D',
'cursor': 'YXJyYXljb25uZWN0aW9uOjM=',
},
{
'node': 'E',
'cursor': 'YXJyYXljb25uZWN0aW9uOjQ=',
},
],
'pageInfo': {
'startCursor': 'YXJyYXljb25uZWN0aW9uOjA=',
'endCursor': 'YXJyYXljb25uZWN0aW9uOjQ=',
'hasPreviousPage': False,
'hasNextPage': False,
}
}
assert c.to_dict() == expected
def test_pagination_respects_first_after():
c = connectionFromArray(letters, first=2, after='YXJyYXljb25uZWN0aW9uOjE=')
expected = {
'edges': [
{
'node': 'C',
'cursor': 'YXJyYXljb25uZWN0aW9uOjI=',
},
{
'node': 'D',
'cursor': 'YXJyYXljb25uZWN0aW9uOjM=',
},
],
'pageInfo': {
'startCursor': 'YXJyYXljb25uZWN0aW9uOjI=',
'endCursor': 'YXJyYXljb25uZWN0aW9uOjM=',
'hasPreviousPage': False,
'hasNextPage': True,
}
}
assert c.to_dict() == expected
def test_pagination_respects_longfirst_after():
c = connectionFromArray(letters, first=10, after='YXJyYXljb25uZWN0aW9uOjE=')
expected = {
'edges': [
{
'node': 'C',
'cursor': 'YXJyYXljb25uZWN0aW9uOjI=',
},
{
'node': 'D',
'cursor': 'YXJyYXljb25uZWN0aW9uOjM=',
},
{
'node': 'E',
'cursor': 'YXJyYXljb25uZWN0aW9uOjQ=',
},
],
'pageInfo': {
'startCursor': 'YXJyYXljb25uZWN0aW9uOjI=',
'endCursor': 'YXJyYXljb25uZWN0aW9uOjQ=',
'hasPreviousPage': False,
'hasNextPage': False,
}
}
assert c.to_dict() == expected
def test_pagination_respects_last_before():
c = connectionFromArray(letters, last=2, before='YXJyYXljb25uZWN0aW9uOjM=')
expected = {
'edges': [
{
'node': 'B',
'cursor': 'YXJyYXljb25uZWN0aW9uOjE=',
},
{
'node': 'C',
'cursor': 'YXJyYXljb25uZWN0aW9uOjI=',
},
],
'pageInfo': {
'startCursor': 'YXJyYXljb25uZWN0aW9uOjE=',
'endCursor': 'YXJyYXljb25uZWN0aW9uOjI=',
'hasPreviousPage': True,
'hasNextPage': False,
}
}
assert c.to_dict() == expected
def test_pagination_respects_longlast_before():
c = connectionFromArray(letters, last=10, before='YXJyYXljb25uZWN0aW9uOjM=')
expected = {
'edges': [
{
'node': 'A',
'cursor': 'YXJyYXljb25uZWN0aW9uOjA=',
},
{
'node': 'B',
'cursor': 'YXJyYXljb25uZWN0aW9uOjE=',
},
{
'node': 'C',
'cursor': 'YXJyYXljb25uZWN0aW9uOjI=',
},
],
'pageInfo': {
'startCursor': 'YXJyYXljb25uZWN0aW9uOjA=',
'endCursor': 'YXJyYXljb25uZWN0aW9uOjI=',
'hasPreviousPage': False,
'hasNextPage': False,
}
}
assert c.to_dict() == expected
def test_first_after_before_few():
c = connectionFromArray(letters, first=2,
after='YXJyYXljb25uZWN0aW9uOjA=',
before='YXJyYXljb25uZWN0aW9uOjQ=',
)
expected = {
'edges': [
{
'node': 'B',
'cursor': 'YXJyYXljb25uZWN0aW9uOjE=',
},
{
'node': 'C',
'cursor': 'YXJyYXljb25uZWN0aW9uOjI=',
},
],
'pageInfo': {
'startCursor': 'YXJyYXljb25uZWN0aW9uOjE=',
'endCursor': 'YXJyYXljb25uZWN0aW9uOjI=',
'hasPreviousPage': False,
'hasNextPage': True,
}
}
assert c.to_dict() == expected
def test_first_after_before_many():
c = connectionFromArray(letters, first=4,
after='YXJyYXljb25uZWN0aW9uOjA=',
before='YXJyYXljb25uZWN0aW9uOjQ=',
)
expected = {
'edges': [
{
'node': 'B',
'cursor': 'YXJyYXljb25uZWN0aW9uOjE=',
},
{
'node': 'C',
'cursor': 'YXJyYXljb25uZWN0aW9uOjI=',
},
{
'node': 'D',
'cursor': 'YXJyYXljb25uZWN0aW9uOjM=',
},
],
'pageInfo': {
'startCursor': 'YXJyYXljb25uZWN0aW9uOjE=',
'endCursor': 'YXJyYXljb25uZWN0aW9uOjM=',
'hasPreviousPage': False,
'hasNextPage': False,
}
}
assert c.to_dict() == expected
def test_first_after_before_exact():
c = connectionFromArray(letters, first=3,
after='YXJyYXljb25uZWN0aW9uOjA=',
before='YXJyYXljb25uZWN0aW9uOjQ=',
)
expected = {
'edges': [
{
'node': 'B',
'cursor': 'YXJyYXljb25uZWN0aW9uOjE=',
},
{
'node': 'C',
'cursor': 'YXJyYXljb25uZWN0aW9uOjI=',
},
{
'node': 'D',
'cursor': 'YXJyYXljb25uZWN0aW9uOjM=',
},
],
'pageInfo': {
'startCursor': 'YXJyYXljb25uZWN0aW9uOjE=',
'endCursor': 'YXJyYXljb25uZWN0aW9uOjM=',
'hasPreviousPage': False,
'hasNextPage': False,
}
}
assert c.to_dict() == expected
def test_last_after_before_few():
c = connectionFromArray(letters, last=2,
after='YXJyYXljb25uZWN0aW9uOjA=',
before='YXJyYXljb25uZWN0aW9uOjQ=',
)
expected = {
'edges': [
{
'node': 'C',
'cursor': 'YXJyYXljb25uZWN0aW9uOjI=',
},
{
'node': 'D',
'cursor': 'YXJyYXljb25uZWN0aW9uOjM=',
},
],
'pageInfo': {
'startCursor': 'YXJyYXljb25uZWN0aW9uOjI=',
'endCursor': 'YXJyYXljb25uZWN0aW9uOjM=',
'hasPreviousPage': True,
'hasNextPage': False,
}
}
assert c.to_dict() == expected
def test_last_after_before_many():
c = connectionFromArray(letters, last=4,
after='YXJyYXljb25uZWN0aW9uOjA=',
before='YXJyYXljb25uZWN0aW9uOjQ=',
)
expected = {
'edges': [
{
'node': 'B',
'cursor': 'YXJyYXljb25uZWN0aW9uOjE=',
},
{
'node': 'C',
'cursor': 'YXJyYXljb25uZWN0aW9uOjI=',
},
{
'node': 'D',
'cursor': 'YXJyYXljb25uZWN0aW9uOjM=',
},
],
'pageInfo': {
'startCursor': 'YXJyYXljb25uZWN0aW9uOjE=',
'endCursor': 'YXJyYXljb25uZWN0aW9uOjM=',
'hasPreviousPage': False,
'hasNextPage': False,
}
}
assert c.to_dict() == expected
def test_last_after_before_exact():
c = connectionFromArray(letters, last=3,
after='YXJyYXljb25uZWN0aW9uOjA=',
before='YXJyYXljb25uZWN0aW9uOjQ=',
)
expected = {
'edges': [
{
'node': 'B',
'cursor': 'YXJyYXljb25uZWN0aW9uOjE=',
},
{
'node': 'C',
'cursor': 'YXJyYXljb25uZWN0aW9uOjI=',
},
{
'node': 'D',
'cursor': 'YXJyYXljb25uZWN0aW9uOjM=',
},
],
'pageInfo': {
'startCursor': 'YXJyYXljb25uZWN0aW9uOjE=',
'endCursor': 'YXJyYXljb25uZWN0aW9uOjM=',
'hasPreviousPage': False,
'hasNextPage': False,
}
}
assert c.to_dict() == expected
def test_no_elements_first_0():
c = connectionFromArray(letters, first=0)
expected = {
'edges': [
],
'pageInfo': {
'startCursor': None,
'endCursor': None,
'hasPreviousPage': False,
'hasNextPage': False,
}
}
assert c.to_dict() == expected
def test_all_elements_invalid_cursors():
c = connectionFromArray(letters, before='invalid', after='invalid')
expected = {
'edges': [
{
'node': 'A',
'cursor': 'YXJyYXljb25uZWN0aW9uOjA=',
},
{
'node': 'B',
'cursor': 'YXJyYXljb25uZWN0aW9uOjE=',
},
{
'node': 'C',
'cursor': 'YXJyYXljb25uZWN0aW9uOjI=',
},
{
'node': 'D',
'cursor': 'YXJyYXljb25uZWN0aW9uOjM=',
},
{
'node': 'E',
'cursor': 'YXJyYXljb25uZWN0aW9uOjQ=',
},
],
'pageInfo': {
'startCursor': 'YXJyYXljb25uZWN0aW9uOjA=',
'endCursor': 'YXJyYXljb25uZWN0aW9uOjQ=',
'hasPreviousPage': False,
'hasNextPage': False,
}
}
assert c.to_dict() == expected
def test_all_elements_cursor_outside():
c = connectionFromArray(letters,
before='YXJyYXljb25uZWN0aW9uOjYK',
after='YXJyYXljb25uZWN0aW9uOi0xCg==')
expected = {
'edges': [
{
'node': 'A',
'cursor': 'YXJyYXljb25uZWN0aW9uOjA=',
},
{
'node': 'B',
'cursor': 'YXJyYXljb25uZWN0aW9uOjE=',
},
{
'node': 'C',
'cursor': 'YXJyYXljb25uZWN0aW9uOjI=',
},
{
'node': 'D',
'cursor': 'YXJyYXljb25uZWN0aW9uOjM=',
},
{
'node': 'E',
'cursor': 'YXJyYXljb25uZWN0aW9uOjQ=',
},
],
'pageInfo': {
'startCursor': 'YXJyYXljb25uZWN0aW9uOjA=',
'endCursor': 'YXJyYXljb25uZWN0aW9uOjQ=',
'hasPreviousPage': False,
'hasNextPage': False,
}
}
assert c.to_dict() == expected
def test_no_elements_cursors_cross():
c = connectionFromArray(letters,
before='YXJyYXljb25uZWN0aW9uOjI=',
after='YXJyYXljb25uZWN0aW9uOjQ=')
expected = {
'edges': [
],
'pageInfo': {
'startCursor': None,
'endCursor': None,
'hasPreviousPage': False,
'hasNextPage': False,
}
}
assert c.to_dict() == expected
def test_cursorForObjectInConnection_member_object():
letterBCursor = cursorForObjectInConnection(letters, 'B')
assert letterBCursor == 'YXJyYXljb25uZWN0aW9uOjE='
def test_cursorForObjectInConnection_non_member_object():
letterBCursor = cursorForObjectInConnection(letters, 'F')
assert letterBCursor == None
|
|
import operator
import numpy as np
import pytest
from pandas import (
DataFrame,
MultiIndex,
Series,
)
import pandas._testing as tm
from pandas.tests.apply.common import frame_transform_kernels
from pandas.tests.frame.common import zip_frames
def unpack_obj(obj, klass, axis):
"""
Helper to ensure we have the right type of object for a test parametrized
over frame_or_series.
"""
if klass is not DataFrame:
obj = obj["A"]
if axis != 0:
pytest.skip(f"Test is only for DataFrame with axis={axis}")
return obj
def test_transform_ufunc(axis, float_frame, frame_or_series):
# GH 35964
obj = unpack_obj(float_frame, frame_or_series, axis)
with np.errstate(all="ignore"):
f_sqrt = np.sqrt(obj)
# ufunc
result = obj.transform(np.sqrt, axis=axis)
expected = f_sqrt
tm.assert_equal(result, expected)
@pytest.mark.parametrize("op", frame_transform_kernels)
def test_transform_groupby_kernel(axis, float_frame, op, request):
# GH 35964
args = [0.0] if op == "fillna" else []
if axis == 0 or axis == "index":
ones = np.ones(float_frame.shape[0])
else:
ones = np.ones(float_frame.shape[1])
expected = float_frame.groupby(ones, axis=axis).transform(op, *args)
result = float_frame.transform(op, axis, *args)
tm.assert_frame_equal(result, expected)
# same thing, but ensuring we have multiple blocks
assert "E" not in float_frame.columns
float_frame["E"] = float_frame["A"].copy()
assert len(float_frame._mgr.arrays) > 1
if axis == 0 or axis == "index":
ones = np.ones(float_frame.shape[0])
else:
ones = np.ones(float_frame.shape[1])
expected2 = float_frame.groupby(ones, axis=axis).transform(op, *args)
result2 = float_frame.transform(op, axis, *args)
tm.assert_frame_equal(result2, expected2)
@pytest.mark.parametrize(
"ops, names",
[
([np.sqrt], ["sqrt"]),
([np.abs, np.sqrt], ["absolute", "sqrt"]),
(np.array([np.sqrt]), ["sqrt"]),
(np.array([np.abs, np.sqrt]), ["absolute", "sqrt"]),
],
)
def test_transform_listlike(axis, float_frame, ops, names):
# GH 35964
other_axis = 1 if axis in {0, "index"} else 0
with np.errstate(all="ignore"):
expected = zip_frames([op(float_frame) for op in ops], axis=other_axis)
if axis in {0, "index"}:
expected.columns = MultiIndex.from_product([float_frame.columns, names])
else:
expected.index = MultiIndex.from_product([float_frame.index, names])
result = float_frame.transform(ops, axis=axis)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ops", [[], np.array([])])
def test_transform_empty_listlike(float_frame, ops, frame_or_series):
obj = unpack_obj(float_frame, frame_or_series, 0)
with pytest.raises(ValueError, match="No transform functions were provided"):
obj.transform(ops)
@pytest.mark.parametrize("box", [dict, Series])
def test_transform_dictlike(axis, float_frame, box):
# GH 35964
if axis == 0 or axis == "index":
e = float_frame.columns[0]
expected = float_frame[[e]].transform(np.abs)
else:
e = float_frame.index[0]
expected = float_frame.iloc[[0]].transform(np.abs)
result = float_frame.transform(box({e: np.abs}), axis=axis)
tm.assert_frame_equal(result, expected)
def test_transform_dictlike_mixed():
# GH 40018 - mix of lists and non-lists in values of a dictionary
df = DataFrame({"a": [1, 2], "b": [1, 4], "c": [1, 4]})
result = df.transform({"b": ["sqrt", "abs"], "c": "sqrt"})
expected = DataFrame(
[[1.0, 1, 1.0], [2.0, 4, 2.0]],
columns=MultiIndex([("b", "c"), ("sqrt", "abs")], [(0, 0, 1), (0, 1, 0)]),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"ops",
[
{},
{"A": []},
{"A": [], "B": "cumsum"},
{"A": "cumsum", "B": []},
{"A": [], "B": ["cumsum"]},
{"A": ["cumsum"], "B": []},
],
)
def test_transform_empty_dictlike(float_frame, ops, frame_or_series):
obj = unpack_obj(float_frame, frame_or_series, 0)
with pytest.raises(ValueError, match="No transform functions were provided"):
obj.transform(ops)
@pytest.mark.parametrize("use_apply", [True, False])
def test_transform_udf(axis, float_frame, use_apply, frame_or_series):
# GH 35964
obj = unpack_obj(float_frame, frame_or_series, axis)
# transform uses UDF either via apply or passing the entire DataFrame
def func(x):
# transform is using apply iff x is not a DataFrame
if use_apply == isinstance(x, frame_or_series):
# Force transform to fallback
raise ValueError
return x + 1
result = obj.transform(func, axis=axis)
expected = obj + 1
tm.assert_equal(result, expected)
@pytest.mark.parametrize("method", ["abs", "shift", "pct_change", "cumsum", "rank"])
def test_transform_method_name(method):
# GH 19760
df = DataFrame({"A": [-1, 2]})
result = df.transform(method)
expected = operator.methodcaller(method)(df)
tm.assert_frame_equal(result, expected)
wont_fail = ["ffill", "bfill", "fillna", "pad", "backfill", "shift"]
frame_kernels_raise = [x for x in frame_transform_kernels if x not in wont_fail]
@pytest.mark.parametrize("op", [*frame_kernels_raise, lambda x: x + 1])
def test_transform_bad_dtype(op, frame_or_series, request):
# GH 35964
if op == "rank":
request.node.add_marker(
pytest.mark.xfail(
raises=ValueError, reason="GH 40418: rank does not raise a TypeError"
)
)
obj = DataFrame({"A": 3 * [object]}) # DataFrame that will fail on most transforms
if frame_or_series is not DataFrame:
obj = obj["A"]
# tshift is deprecated
warn = None if op != "tshift" else FutureWarning
with tm.assert_produces_warning(warn):
with pytest.raises(TypeError, match="unsupported operand|not supported"):
obj.transform(op)
with pytest.raises(TypeError, match="Transform function failed"):
obj.transform([op])
with pytest.raises(TypeError, match="Transform function failed"):
obj.transform({"A": op})
with pytest.raises(TypeError, match="Transform function failed"):
obj.transform({"A": [op]})
@pytest.mark.parametrize("op", frame_kernels_raise)
def test_transform_partial_failure_typeerror(op):
# GH 35964
if op == "rank":
pytest.skip("GH 40418: rank does not raise a TypeError")
# Using object makes most transform kernels fail
df = DataFrame({"A": 3 * [object], "B": [1, 2, 3]})
expected = df[["B"]].transform([op])
result = df.transform([op])
tm.assert_equal(result, expected)
expected = df[["B"]].transform({"B": op})
result = df.transform({"A": op, "B": op})
tm.assert_equal(result, expected)
expected = df[["B"]].transform({"B": [op]})
result = df.transform({"A": [op], "B": [op]})
tm.assert_equal(result, expected)
expected = df.transform({"A": ["shift"], "B": [op]})
result = df.transform({"A": [op, "shift"], "B": [op]})
tm.assert_equal(result, expected)
def test_transform_partial_failure_valueerror():
# GH 40211
match = ".*did not transform successfully and did not raise a TypeError"
def op(x):
if np.sum(np.sum(x)) < 10:
raise ValueError
return x
df = DataFrame({"A": [1, 2, 3], "B": [400, 500, 600]})
expected = df[["B"]].transform([op])
with tm.assert_produces_warning(FutureWarning, match=match):
result = df.transform([op])
tm.assert_equal(result, expected)
expected = df[["B"]].transform({"B": op})
with tm.assert_produces_warning(FutureWarning, match=match):
result = df.transform({"A": op, "B": op})
tm.assert_equal(result, expected)
expected = df[["B"]].transform({"B": [op]})
with tm.assert_produces_warning(FutureWarning, match=match):
result = df.transform({"A": [op], "B": [op]})
tm.assert_equal(result, expected)
expected = df.transform({"A": ["shift"], "B": [op]})
with tm.assert_produces_warning(FutureWarning, match=match, check_stacklevel=False):
result = df.transform({"A": [op, "shift"], "B": [op]})
tm.assert_equal(result, expected)
@pytest.mark.parametrize("use_apply", [True, False])
def test_transform_passes_args(use_apply, frame_or_series):
# GH 35964
# transform uses UDF either via apply or passing the entire DataFrame
expected_args = [1, 2]
expected_kwargs = {"c": 3}
def f(x, a, b, c):
# transform is using apply iff x is not a DataFrame
if use_apply == isinstance(x, frame_or_series):
# Force transform to fallback
raise ValueError
assert [a, b] == expected_args
assert c == expected_kwargs["c"]
return x
frame_or_series([1]).transform(f, 0, *expected_args, **expected_kwargs)
def test_transform_empty_dataframe():
# https://github.com/pandas-dev/pandas/issues/39636
df = DataFrame([], columns=["col1", "col2"])
result = df.transform(lambda x: x + 10)
tm.assert_frame_equal(result, df)
result = df["col1"].transform(lambda x: x + 10)
tm.assert_series_equal(result, df["col1"])
|
|
import numpy as np
import tensorflow as tf
from collections import OrderedDict, defaultdict
from bgan_util import AttributeDict
#### Bayesian DCGAN
from dcgan_ops import *
class BDCGAN(object):
def __init__(self, x_dim, z_dim, dataset_size, batch_size=64, gf_dim=64, df_dim=64,
prior_std=1.0, J=1, M=1, num_classes=1, eta=2e-4,
alpha=0.01, lr=0.0002, optimizer='adam', wasserstein=False,
ml=False, gen_observed=1000):
assert len(x_dim) == 3, "invalid image dims"
c_dim = x_dim[2]
self.is_grayscale = (c_dim == 1)
self.optimizer = optimizer.lower()
self.dataset_size = dataset_size
self.batch_size = batch_size
self.gen_observed = gen_observed
self.x_dim = x_dim
self.z_dim = z_dim
self.gf_dim = gf_dim
self.df_dim = df_dim
self.c_dim = c_dim
self.lr = lr
self.wasserstein = wasserstein
self.d_bn1 = batch_norm(name='d_bn1')
self.d_bn2 = batch_norm(name='d_bn2')
self.d_bn3 = batch_norm(name='d_bn3')
self.sd_bn1 = batch_norm(name='sd_bn1')
self.sd_bn2 = batch_norm(name='sd_bn2')
self.sd_bn3 = batch_norm(name='sd_bn3')
self.g_bn0 = batch_norm(name='g_bn0')
self.g_bn1 = batch_norm(name='g_bn1')
self.g_bn2 = batch_norm(name='g_bn2')
self.g_bn3 = batch_norm(name='g_bn3')
self.wasserstein = wasserstein
# Bayes
self.prior_std = prior_std
self.num_gen = J
self.num_mcmc = M
self.eta = eta
self.alpha = alpha
# ML
self.ml = ml
if self.ml:
assert self.num_gen == 1, "cannot have >1 generator for ml"
self.output_height = x_dim[0]
self.output_width = x_dim[1]
s_h, s_w = self.output_height, self.output_width
s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2)
s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2)
s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2)
s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2)
self.gen_params = AttributeDict()
self.bgen_params = AttributeDict()
self.weight_dims = OrderedDict([("g_h0_lin_W", (self.z_dim, self.gf_dim * 8 * s_h16 * s_w16)),
("g_h0_lin_b", (self.gf_dim * 8 * s_h16 * s_w16,)),
("g_h1_W", (5, 5, self.gf_dim*4, self.gf_dim*8)),
("g_h1_b", (self.gf_dim*4,)),
("g_h2_W", (5, 5, self.gf_dim*2, self.gf_dim*4)),
("g_h2_b", (self.gf_dim*2,)),
("g_h3_W", (5, 5, self.gf_dim*1, self.gf_dim*2)),
("g_h3_b", (self.gf_dim*1,)),
("g_h4_W", (5, 5, self.c_dim, self.gf_dim*1)),
("g_h4_b", (self.c_dim,))])
self.sghmc_noise = {}
self.noise_std = np.sqrt(2 * self.alpha * self.eta)
for name, dim in self.weight_dims.iteritems():
self.sghmc_noise[name] = tf.contrib.distributions.Normal(mu=0., sigma=self.noise_std*tf.ones(self.weight_dims[name]))
self.K = num_classes # 1 means unsupervised, label == 0 always reserved for fake
self.build_bgan_graph()
if self.K > 1:
self.build_test_graph()
def discriminator(self, image, K, reuse=False):
with tf.variable_scope("discriminator") as scope:
if reuse:
scope.reuse_variables()
h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
h1 = lrelu(self.d_bn1(conv2d(h0,
self.df_dim * 2,
name='d_h1_conv')))
h2 = lrelu(self.d_bn2(conv2d(h1,
self.df_dim * 4,
name='d_h2_conv')))
h3 = lrelu(self.d_bn3(conv2d(h2,
self.df_dim * 8,
name='d_h3_conv')))
h4 = linear(tf.reshape(h3, [self.batch_size, -1]), K, 'd_h3_lin')
return tf.nn.softmax(h4), h4
def sup_discriminator(self, image, K, reuse=False):
with tf.variable_scope("sup_discriminator") as scope:
if reuse:
scope.reuse_variables()
h0 = lrelu(conv2d(image, self.df_dim, name='sup_h0_conv'))
h1 = lrelu(self.sd_bn1(conv2d(h0,
self.df_dim * 2,
name='sup_h1_conv')))
h2 = lrelu(self.sd_bn2(conv2d(h1,
self.df_dim * 4,
name='sup_h2_conv')))
h3 = lrelu(self.sd_bn3(conv2d(h2,
self.df_dim * 8,
name='sup_h3_conv')))
h4 = linear(tf.reshape(h3, [self.batch_size, -1]), K, 'sup_h3_lin')
return tf.nn.softmax(h4), h4
def generator(self, z, gen_params):
with tf.variable_scope("generator") as scope:
s_h, s_w = self.output_height, self.output_width
s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2)
s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2)
s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2)
s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2)
# project `z` and reshape
self.z_, self.h0_w, self.h0_b = linear(z, self.gf_dim * 8 * s_h16 * s_w16, 'g_h0_lin', with_w=True,
matrix=gen_params.g_h0_lin_W, bias=gen_params.g_h0_lin_b)
self.h0 = tf.reshape(self.z_, [-1, s_h16, s_w16, self.gf_dim * 8])
h0 = tf.nn.relu(self.g_bn0(self.h0))
self.h1, self.h1_w, self.h1_b = deconv2d(h0,
[self.batch_size, s_h8, s_w8, self.gf_dim * 4], name='g_h1', with_w=True,
w=gen_params.g_h1_W, biases=gen_params.g_h1_b)
h1 = tf.nn.relu(self.g_bn1(self.h1))
h2, self.h2_w, self.h2_b = deconv2d(h1,
[self.batch_size, s_h4, s_w4, self.gf_dim * 2], name='g_h2', with_w=True,
w=gen_params.g_h2_W, biases=gen_params.g_h2_b)
h2 = tf.nn.relu(self.g_bn2(h2))
h3, self.h3_w, self.h3_b = deconv2d(h2,
[self.batch_size, s_h2, s_w2, self.gf_dim * 1], name='g_h3', with_w=True,
w=gen_params.g_h3_W, biases=gen_params.g_h3_b)
h3 = tf.nn.relu(self.g_bn3(h3))
h4, self.h4_w, self.h4_b = deconv2d(h3,
[self.batch_size, s_h, s_w, self.c_dim], name='g_h4', with_w=True,
w=gen_params.g_h4_W, biases=gen_params.g_h4_b)
return tf.nn.tanh(h4)
def sampler(self, z, gen_params):
with tf.variable_scope("generator") as scope:
scope.reuse_variables()
s_h, s_w = self.output_height, self.output_width
s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2)
s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2)
s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2)
s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2)
# project `z` and reshape
z_ = linear(z, self.gf_dim * 8 * s_h16 * s_w16, 'g_h0_lin',
matrix=gen_params.g_h0_lin_W, bias=gen_params.g_h0_lin_b)
h0 = tf.reshape(z_, [-1, s_h16, s_w16, self.gf_dim * 8])
h0 = tf.nn.relu(self.g_bn0(h0, train=False))
h1 = deconv2d(h0,
[self.batch_size, s_h8, s_w8, self.gf_dim * 4], name='g_h1',
w=gen_params.g_h1_W, biases=gen_params.g_h1_b)
h1 = tf.nn.relu(self.g_bn1(h1, train=False))
h2 = deconv2d(h1,
[self.batch_size, s_h4, s_w4, self.gf_dim * 2], name='g_h2',
w=gen_params.g_h2_W, biases=gen_params.g_h2_b)
h2 = tf.nn.relu(self.g_bn2(h2, train=False))
h3 = deconv2d(h2,
[self.batch_size, s_h2, s_w2, self.gf_dim * 1], name='g_h3',
w=gen_params.g_h3_W, biases=gen_params.g_h3_b)
h3 = tf.nn.relu(self.g_bn3(h3, train=False))
h4 = deconv2d(h3,
[self.batch_size, s_h, s_w, self.c_dim], name='g_h4',
w=gen_params.g_h4_W, biases=gen_params.g_h4_b)
return tf.nn.tanh(h4)
def _get_optimizer(self, lr):
if self.optimizer == 'adam':
return tf.train.AdamOptimizer(learning_rate=lr, beta1=0.5)
elif self.optimizer == 'sgd':
return tf.train.MomentumOptimizer(learning_rate=lr, momentum=0.5)
else:
raise ValueError("Optimizer must be either 'adam' or 'sgd'")
def build_test_graph(self):
self.test_inputs = tf.placeholder(tf.float32,
[self.batch_size] + self.x_dim, name='real_test_images')
self.lbls = tf.placeholder(tf.float32,
[self.batch_size, self.K], name='real_sup_targets')
self.S, self.S_logits = self.sup_discriminator(self.inputs, self.K)
self.test_D, self.test_D_logits = self.discriminator(self.test_inputs, self.K+1, reuse=True)
self.test_S, self.test_S_logits = self.sup_discriminator(self.test_inputs, self.K, reuse=True)
self.s_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.S_logits,
labels=self.lbls))
t_vars = tf.trainable_variables()
self.sup_vars = [var for var in t_vars if 'sup_' in var.name]
# this is purely supervised
supervised_lr = 0.05 * self.lr
s_opt = self._get_optimizer(supervised_lr)
self.s_optim = s_opt.minimize(self.s_loss, var_list=self.sup_vars)
s_opt_adam = tf.train.AdamOptimizer(learning_rate=supervised_lr, beta1=0.5)
self.s_optim_adam = s_opt_adam.minimize(self.s_loss, var_list=self.sup_vars)
def build_bgan_graph(self):
self.inputs = tf.placeholder(tf.float32,
[self.batch_size] + self.x_dim, name='real_images')
self.labeled_inputs = tf.placeholder(tf.float32,
[self.batch_size] + self.x_dim, name='real_images_w_labels')
self.labels = tf.placeholder(tf.float32,
[self.batch_size, self.K+1], name='real_targets')
self.z = tf.placeholder(tf.float32, [None, self.z_dim], name='z')
#self.z_sum = histogram_summary("z", self.z) TODO looks cool
self.gen_param_list = []
with tf.variable_scope("generator") as scope:
for gi in xrange(self.num_gen):
for m in xrange(self.num_mcmc):
gen_params = AttributeDict()
for name, shape in self.weight_dims.iteritems():
gen_params[name] = tf.get_variable("%s_%04d_%04d" % (name, gi, m),
shape, initializer=tf.random_normal_initializer(stddev=0.02))
self.gen_param_list.append(gen_params)
self.D, self.D_logits = self.discriminator(self.inputs, self.K+1)
self.Dsup, self.Dsup_logits = self.discriminator(self.labeled_inputs, self.K+1, reuse=True)
if self.K == 1:
if self.wasserstein:
self.d_loss_real = tf.reduce_mean(self.D_logits)
else:
# regular GAN
constant_labels = np.zeros((self.batch_size, 2))
constant_labels[:, 1] = 1.0
self.d_loss_real = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.D_logits,
labels=tf.constant(constant_labels)))
else:
self.d_loss_sup = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.Dsup_logits,
labels=self.labels))
self.d_loss_real = -tf.reduce_mean(tf.log((1.0 - self.D[:, 0]) + 1e-8))
self.generation = defaultdict(list)
for gen_params in self.gen_param_list:
self.generation["g_prior"].append(self.gen_prior(gen_params))
self.generation["g_noise"].append(self.gen_noise(gen_params))
self.generation["generators"].append(self.generator(self.z, gen_params))
self.generation["gen_samplers"].append(self.sampler(self.z, gen_params))
D_, D_logits_ = self.discriminator(self.generator(self.z, gen_params), self.K+1, reuse=True)
self.generation["d_logits"].append(D_logits_)
self.generation["d_probs"].append(D_)
all_d_logits = tf.concat(self.generation["d_logits"], 0)
if self.wasserstein:
self.d_loss_fake = -tf.reduce_mean(all_d_logits)
else:
constant_labels = np.zeros((self.batch_size*self.num_gen*self.num_mcmc, self.K+1))
constant_labels[:, 0] = 1.0 # class label indicating it came from generator, aka fake
self.d_loss_fake = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=all_d_logits,
labels=tf.constant(constant_labels)))
t_vars = tf.trainable_variables()
self.d_vars = [var for var in t_vars if 'd_' in var.name]
self.d_loss = self.d_loss_real + self.d_loss_fake
if not self.ml:
self.d_loss += self.disc_prior() + self.disc_noise()
if self.K > 1:
self.d_loss_semi = self.d_loss_sup + self.d_loss_real + self.d_loss_fake
if not self.ml:
self.d_loss_semi += self.disc_prior() + self.disc_noise()
self.g_vars = []
for gi in xrange(self.num_gen):
for m in xrange(self.num_mcmc):
self.g_vars.append([var for var in t_vars if 'g_' in var.name and "_%04d_%04d" % (gi, m) in var.name])
self.d_learning_rate = tf.placeholder(tf.float32, shape=[])
d_opt = self._get_optimizer(self.d_learning_rate)
self.d_optim = d_opt.minimize(self.d_loss, var_list=self.d_vars)
d_opt_adam = tf.train.AdamOptimizer(learning_rate=self.d_learning_rate, beta1=0.5)
self.d_optim_adam = d_opt_adam.minimize(self.d_loss, var_list=self.d_vars)
clip_d = [w.assign(tf.clip_by_value(w, -0.01, 0.01)) for w in self.d_vars]
self.clip_d = clip_d
if self.K > 1:
self.d_semi_learning_rate = tf.placeholder(tf.float32, shape=[])
d_opt_semi = self._get_optimizer(self.d_semi_learning_rate)
self.d_optim_semi = d_opt_semi.minimize(self.d_loss_semi, var_list=self.d_vars)
d_opt_semi_adam = tf.train.AdamOptimizer(learning_rate=self.d_semi_learning_rate, beta1=0.5)
self.d_optim_semi_adam = d_opt_semi_adam.minimize(self.d_loss_semi, var_list=self.d_vars)
self.g_optims, self.g_optims_adam = [], []
self.g_learning_rate = tf.placeholder(tf.float32, shape=[])
for gi in xrange(self.num_gen*self.num_mcmc):
if self.wasserstein:
g_loss = tf.reduce_mean(self.generation["d_logits"][gi])
else:
g_loss = -tf.reduce_mean(tf.log((1.0 - self.generation["d_probs"][gi][:, 0]) + 1e-8))
if not self.ml:
g_loss += self.generation["g_prior"][gi] + self.generation["g_noise"][gi]
self.generation["g_losses"].append(g_loss)
g_opt = self._get_optimizer(self.g_learning_rate)
self.g_optims.append(g_opt.minimize(g_loss, var_list=self.g_vars[gi]))
g_opt_adam = tf.train.AdamOptimizer(learning_rate=self.g_learning_rate, beta1=0.5)
self.g_optims_adam.append(g_opt_adam.minimize(g_loss, var_list=self.g_vars[gi]))
def gen_prior(self, gen_params):
with tf.variable_scope("generator") as scope:
prior_loss = 0.0
for var in gen_params.values():
nn = tf.divide(var, self.prior_std)
prior_loss += tf.reduce_mean(tf.multiply(nn, nn))
prior_loss /= self.gen_observed
return prior_loss
def gen_noise(self, gen_params): # for SGHMC
with tf.variable_scope("generator") as scope:
noise_loss = 0.0
for name, var in gen_params.iteritems():
noise_loss += tf.reduce_sum(var * self.sghmc_noise[name].sample())
noise_loss /= self.gen_observed
return noise_loss
def disc_prior(self):
with tf.variable_scope("discriminator") as scope:
prior_loss = 0.0
for var in self.d_vars:
nn = tf.divide(var, self.prior_std)
prior_loss += tf.reduce_mean(tf.multiply(nn, nn))
prior_loss /= self.dataset_size
return prior_loss
def disc_noise(self): # for SGHMC
with tf.variable_scope("discriminator") as scope:
noise_loss = 0.0
for var in self.d_vars:
noise_ = tf.contrib.distributions.Normal(mu=0., sigma=self.noise_std*tf.ones(var.get_shape()))
noise_loss += tf.reduce_sum(var * noise_.sample())
noise_loss /= self.dataset_size
return noise_loss
|
|
import os
from MuseParse.tests.testUsingXML.xmlSet import xmlSet, parsePiece
from MuseParse.classes.ObjectHierarchy.TreeClasses.PartNode import PartNode
from MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode import MeasureNode
from MuseParse.classes.ObjectHierarchy.TreeClasses.NoteNode import NoteNode
from MuseParse.classes.ObjectHierarchy.TreeClasses.BaseTree import Search
partname = "duration_and_stem_direction.xml"
from MuseParse.SampleMusicXML import testcases
directory = testcases.__path__._path[0]
piece = parsePiece(os.path.join(directory, partname))
class testFile(xmlSet):
def setUp(self):
xmlSet.setUp(self)
self.m_num = 32
self.p_id = "P1"
self.p_name = "Flute"
def testParts(self):
global piece
self.assertIsInstance(piece.getPart(self.p_id), PartNode)
self.assertEqual(self.p_name, piece.getPart(self.p_id).GetItem().name)
def testMeasures(self):
self.assertIsInstance(
piece.getPart(
self.p_id).getMeasure(
measure=self.m_num,
staff=1),
MeasureNode)
class testNoteDurations(xmlSet):
def setUp(self):
xmlSet.setUp(self)
self.m_num = 32
self.p_id = "P1"
self.p_name = "Flute"
def testMeasure1Note1(self):
part = piece.getPart("P1")
measure = part.getMeasure(1, 1)
self.assertIsInstance(Search(NoteNode, measure, 1), NoteNode)
def testMeasure1Note1Duration(self):
part = piece.getPart("P1")
measure = part.getMeasure(1, 1)
item = Search(NoteNode, measure, 1).GetItem()
self.assertEqual(1, item.duration)
def testMeasure2Notes(self):
part = piece.getPart("P1")
measure = part.getMeasure(2, 1)
item = Search(NoteNode, measure, 3)
self.assertIsInstance(item, NoteNode)
def testMeasure2Note1(self):
part = piece.getPart("P1")
measure = part.getMeasure(2, 1)
item = Search(NoteNode, measure, 1).GetItem()
self.assertEqual(2, item.duration)
def testMeasure2Note2(self):
part = piece.getPart("P1")
measure = part.getMeasure(2, 1)
item = Search(NoteNode, measure, 2).GetItem()
self.assertEqual(4, item.duration)
def testMeasure2Note3(self):
part = piece.getPart("P1")
measure = part.getMeasure(2, 1)
item = Search(NoteNode, measure, 3).GetItem()
self.assertEqual(4, item.duration)
def testMeasure3Notes(self):
part = piece.getPart("P1")
measure = part.getMeasure(3, 1)
item = Search(NoteNode, measure, 7)
self.assertIsInstance(item, NoteNode)
def testMeasure3Note1(self):
part = piece.getPart("P1")
measure = part.getMeasure(3, 1)
item = Search(NoteNode, measure, 1).GetItem()
self.assertEqual(8, item.duration)
def testMeasure3Note2(self):
part = piece.getPart("P1")
measure = part.getMeasure(3, 1)
item = Search(NoteNode, measure, 2).GetItem()
self.assertEqual(16, item.duration)
def testMeasure3Note3(self):
part = piece.getPart("P1")
measure = part.getMeasure(3, 1)
item = Search(NoteNode, measure, 3).GetItem()
self.assertEqual(32, item.duration)
def testMeasure3Note4(self):
part = piece.getPart("P1")
measure = part.getMeasure(3, 1)
item = Search(NoteNode, measure, 4).GetItem()
self.assertEqual(64, item.duration)
def testMeasure3Note5(self):
part = piece.getPart("P1")
measure = part.getMeasure(3, 1)
item = Search(NoteNode, measure, 5).GetItem()
self.assertEqual(64, item.duration)
def testMeasure3Note6(self):
part = piece.getPart("P1")
measure = part.getMeasure(3, 1)
item = Search(NoteNode, measure, 6).GetItem()
self.assertEqual(4, item.duration)
def testMeasure3Note7(self):
part = piece.getPart("P1")
measure = part.getMeasure(3, 1)
item = Search(NoteNode, measure, 7).GetItem()
self.assertEqual(2, item.duration)
class testStems(xmlSet):
def setUp(self):
xmlSet.setUp(self)
self.m_num = 32
self.p_id = "P1"
self.p_name = "Flute"
def testMeasure1(self):
part = piece.getPart("P1")
measure = part.getMeasure(1, 1)
note = Search(NoteNode, measure, 1).GetItem()
self.assertFalse(hasattr(note, "stem"))
def testMeasure2Note1(self):
part = piece.getPart("P1")
measure = part.getMeasure(2, 1)
note = Search(NoteNode, measure, 1).GetItem()
self.assertTrue(hasattr(note, "stem"))
def testMeasure2Note1Direction(self):
part = piece.getPart("P1")
measure = part.getMeasure(2, 1)
note = Search(NoteNode, measure, 1).GetItem()
self.assertEqual("up", note.stem.type)
def testMeasure2Note2(self):
part = piece.getPart("P1")
measure = part.getMeasure(2, 1)
note = Search(NoteNode, measure, 2).GetItem()
self.assertTrue(hasattr(note, "stem"))
def testMeasure2Note2Direction(self):
part = piece.getPart("P1")
measure = part.getMeasure(2, 1)
note = Search(NoteNode, measure, 2).GetItem()
self.assertEqual("up", note.stem.type)
def testMeasure2Note3(self):
part = piece.getPart("P1")
measure = part.getMeasure(2, 1)
note = Search(NoteNode, measure, 3).GetItem()
self.assertTrue(hasattr(note, "stem"))
def testMeasure2Note3Direction(self):
part = piece.getPart("P1")
measure = part.getMeasure(2, 1)
note = Search(NoteNode, measure, 3).GetItem()
self.assertEqual("up", note.stem.type)
def testMeasure3Note1(self):
part = piece.getPart("P1")
measure = part.getMeasure(3, 1)
note = Search(NoteNode, measure, 1).GetItem()
self.assertTrue(hasattr(note, "stem"))
def testMeasure3Note1Direction(self):
part = piece.getPart("P1")
measure = part.getMeasure(3, 1)
note = Search(NoteNode, measure, 1).GetItem()
self.assertEqual("down", note.stem.type)
def testMeasure3Note2(self):
part = piece.getPart("P1")
measure = part.getMeasure(3, 1)
note = Search(NoteNode, measure, 2).GetItem()
self.assertTrue(hasattr(note, "stem"))
def testMeasure3Note2Direction(self):
part = piece.getPart("P1")
measure = part.getMeasure(3, 1)
note = Search(NoteNode, measure, 2).GetItem()
self.assertEqual("down", note.stem.type)
def testMeasure3Note3(self):
part = piece.getPart("P1")
measure = part.getMeasure(3, 1)
note = Search(NoteNode, measure, 3).GetItem()
self.assertTrue(hasattr(note, "stem"))
def testMeasure3Note3Direction(self):
part = piece.getPart("P1")
measure = part.getMeasure(3, 1)
note = Search(NoteNode, measure, 3).GetItem()
self.assertEqual("down", note.stem.type)
def testMeasure3Note4(self):
part = piece.getPart("P1")
measure = part.getMeasure(3, 1)
note = Search(NoteNode, measure, 4).GetItem()
self.assertTrue(hasattr(note, "stem"))
def testMeasure3Note4Direction(self):
part = piece.getPart("P1")
measure = part.getMeasure(3, 1)
note = Search(NoteNode, measure, 4).GetItem()
self.assertEqual("down", note.stem.type)
|
|
import codecs
import copy
from decimal import Decimal
from django.apps.registry import Apps
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.utils import six
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_delete_table = "DROP TABLE %(table)s"
sql_create_inline_fk = "REFERENCES %(to_table)s (%(to_column)s)"
def quote_value(self, value):
# The backend "mostly works" without this function and there are use
# cases for compiling Python without the sqlite3 libraries (e.g.
# security hardening).
import _sqlite3
try:
value = _sqlite3.adapt(value)
except _sqlite3.ProgrammingError:
pass
# Manual emulation of SQLite parameter quoting
if isinstance(value, type(True)):
return str(int(value))
elif isinstance(value, (Decimal, float)):
return str(value)
elif isinstance(value, six.integer_types):
return str(value)
elif isinstance(value, six.string_types):
return "'%s'" % six.text_type(value).replace("\'", "\'\'")
elif value is None:
return "NULL"
elif isinstance(value, (bytes, bytearray, six.memoryview)):
# Bytes are only allowed for BLOB fields, encoded as string
# literals containing hexadecimal data and preceded by a single "X"
# character:
# value = b'\x01\x02' => value_hex = b'0102' => return X'0102'
value = bytes(value)
hex_encoder = codecs.getencoder('hex_codec')
value_hex, _length = hex_encoder(value)
# Use 'ascii' encoding for b'01' => '01', no need to use force_text here.
return "X'%s'" % value_hex.decode('ascii')
else:
raise ValueError("Cannot quote parameter value %r of type %s" % (value, type(value)))
def _remake_table(self, model, create_fields=[], delete_fields=[], alter_fields=[], override_uniques=None,
override_indexes=None):
"""
Shortcut to transform a model from old_model into new_model
"""
# Work out the new fields dict / mapping
body = {f.name: f for f in model._meta.local_fields}
# Since mapping might mix column names and default values,
# its values must be already quoted.
mapping = {f.column: self.quote_name(f.column) for f in model._meta.local_fields}
# This maps field names (not columns) for things like unique_together
rename_mapping = {}
# If any of the new or altered fields is introducing a new PK,
# remove the old one
restore_pk_field = None
if any(f.primary_key for f in create_fields) or any(n.primary_key for o, n in alter_fields):
for name, field in list(body.items()):
if field.primary_key:
field.primary_key = False
restore_pk_field = field
if field.auto_created:
del body[name]
del mapping[field.column]
# Add in any created fields
for field in create_fields:
body[field.name] = field
# Choose a default and insert it into the copy map
if not field.many_to_many:
mapping[field.column] = self.quote_value(
self.effective_default(field)
)
# Add in any altered fields
for (old_field, new_field) in alter_fields:
body.pop(old_field.name, None)
mapping.pop(old_field.column, None)
body[new_field.name] = new_field
if old_field.null and not new_field.null:
case_sql = "coalesce(%(col)s, %(default)s)" % {
'col': self.quote_name(old_field.column),
'default': self.quote_value(self.effective_default(new_field))
}
mapping[new_field.column] = case_sql
else:
mapping[new_field.column] = self.quote_name(old_field.column)
rename_mapping[old_field.name] = new_field.name
# Remove any deleted fields
for field in delete_fields:
del body[field.name]
del mapping[field.column]
# Remove any implicit M2M tables
if field.many_to_many and field.rel.through._meta.auto_created:
return self.delete_model(field.rel.through)
# Work inside a new app registry
apps = Apps()
# Provide isolated instances of the fields to the new model body
# Instantiating the new model with an alternate db_table will alter
# the internal references of some of the provided fields.
body = copy.deepcopy(body)
# Work out the new value of unique_together, taking renames into
# account
if override_uniques is None:
override_uniques = [
[rename_mapping.get(n, n) for n in unique]
for unique in model._meta.unique_together
]
# Work out the new value for index_together, taking renames into
# account
if override_indexes is None:
override_indexes = [
[rename_mapping.get(n, n) for n in index]
for index in model._meta.index_together
]
# Construct a new model for the new state
meta_contents = {
'app_label': model._meta.app_label,
'db_table': model._meta.db_table + "__new",
'unique_together': override_uniques,
'index_together': override_indexes,
'apps': apps,
}
meta = type("Meta", tuple(), meta_contents)
body['Meta'] = meta
body['__module__'] = model.__module__
temp_model = type(model._meta.object_name, model.__bases__, body)
# Create a new table with that format. We remove things from the
# deferred SQL that match our table name, too
self.deferred_sql = [x for x in self.deferred_sql if model._meta.db_table not in x]
self.create_model(temp_model)
# Copy data from the old table
field_maps = list(mapping.items())
self.execute("INSERT INTO %s (%s) SELECT %s FROM %s" % (
self.quote_name(temp_model._meta.db_table),
', '.join(self.quote_name(x) for x, y in field_maps),
', '.join(y for x, y in field_maps),
self.quote_name(model._meta.db_table),
))
# Delete the old table
self.delete_model(model, handle_autom2m=False)
# Rename the new to the old
self.alter_db_table(temp_model, temp_model._meta.db_table, model._meta.db_table)
# Run deferred SQL on correct table
for sql in self.deferred_sql:
self.execute(sql.replace(temp_model._meta.db_table, model._meta.db_table))
self.deferred_sql = []
# Fix any PK-removed field
if restore_pk_field:
restore_pk_field.primary_key = True
def delete_model(self, model, handle_autom2m=True):
if handle_autom2m:
super(DatabaseSchemaEditor, self).delete_model(model)
else:
# Delete the table (and only that)
self.execute(self.sql_delete_table % {
"table": self.quote_name(model._meta.db_table),
})
def add_field(self, model, field):
"""
Creates a field on a model.
Usually involves adding a column, but may involve adding a
table instead (for M2M fields)
"""
# Special-case implicit M2M tables
if field.many_to_many and field.rel.through._meta.auto_created:
return self.create_model(field.rel.through)
self._remake_table(model, create_fields=[field])
def remove_field(self, model, field):
"""
Removes a field from a model. Usually involves deleting a column,
but for M2Ms may involve deleting a table.
"""
# M2M fields are a special case
if field.many_to_many:
# For implicit M2M tables, delete the auto-created table
if field.rel.through._meta.auto_created:
self.delete_model(field.rel.through)
# For explicit "through" M2M fields, do nothing
# For everything else, remake.
else:
# It might not actually have a column behind it
if field.db_parameters(connection=self.connection)['type'] is None:
return
self._remake_table(model, delete_fields=[field])
def _alter_field(self, model, old_field, new_field, old_type, new_type,
old_db_params, new_db_params, strict=False):
"""Actually perform a "physical" (non-ManyToMany) field update."""
# Alter by remaking table
self._remake_table(model, alter_fields=[(old_field, new_field)])
def alter_index_together(self, model, old_index_together, new_index_together):
"""
Deals with a model changing its index_together.
Note: The input index_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
self._remake_table(model, override_indexes=new_index_together)
def alter_unique_together(self, model, old_unique_together, new_unique_together):
"""
Deals with a model changing its unique_together.
Note: The input unique_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
self._remake_table(model, override_uniques=new_unique_together)
def _alter_many_to_many(self, model, old_field, new_field, strict):
"""
Alters M2Ms to repoint their to= endpoints.
"""
if old_field.rel.through._meta.db_table == new_field.rel.through._meta.db_table:
# The field name didn't change, but some options did; we have to propagate this altering.
self._remake_table(
old_field.rel.through,
alter_fields=[(
# We need the field that points to the target model, so we can tell alter_field to change it -
# this is m2m_reverse_field_name() (as opposed to m2m_field_name, which points to our model)
old_field.rel.through._meta.get_field(old_field.m2m_reverse_field_name()),
new_field.rel.through._meta.get_field(new_field.m2m_reverse_field_name()),
)],
override_uniques=(new_field.m2m_field_name(), new_field.m2m_reverse_field_name()),
)
return
# Make a new through table
self.create_model(new_field.rel.through)
# Copy the data across
self.execute("INSERT INTO %s (%s) SELECT %s FROM %s" % (
self.quote_name(new_field.rel.through._meta.db_table),
', '.join([
"id",
new_field.m2m_column_name(),
new_field.m2m_reverse_name(),
]),
', '.join([
"id",
old_field.m2m_column_name(),
old_field.m2m_reverse_name(),
]),
self.quote_name(old_field.rel.through._meta.db_table),
))
# Delete the old through table
self.delete_model(old_field.rel.through)
|
|
import collections
import itertools
import datetime
from rdflib.namespace import NamespaceManager
from rdflib import Variable, BNode, Graph, ConjunctiveGraph, URIRef, Literal
from rdflib.term import Node
from parserutils import CompValue
import rdflib.plugins.sparql
from rdflib.plugins.sparql.compat import Mapping, MutableMapping
class SPARQLError(Exception):
def __init__(self, msg=None):
Exception.__init__(self, msg)
class NotBoundError(SPARQLError):
def __init__(self, msg=None):
SPARQLError.__init__(self, msg)
class AlreadyBound(SPARQLError):
"""Raised when trying to bind a variable that is already bound!"""
def __init__(self):
SPARQLError.__init__(self)
class SPARQLTypeError(SPARQLError):
def __init__(self, msg):
SPARQLError.__init__(self, msg)
class Bindings(MutableMapping):
"""
A single level of a stack of variable-value bindings.
Each dict keeps a reference to the dict below it,
any failed lookup is propegated back
In python 3.3 this could be a collections.ChainMap
"""
def __init__(self, outer=None, d=[]):
self._d = dict(d)
self.outer = outer
def __getitem__(self, key):
try:
return self._d[key]
except KeyError:
if not self.outer:
raise
return self.outer[key]
def __contains__(self, key):
try:
self[key]
return True
except KeyError:
return False
def __setitem__(self, key, value):
self._d[key] = value
def __delitem__(self, key):
raise Exception("DelItem is not implemented!")
def __len__(self):
i = 0
for x in self:
i += 1
return i
def __iter__(self):
d = self
while d is not None:
for i in dict.__iter__(d._d):
yield i
d = d.outer
def __str__(self):
return "Bindings({"+", ".join((k, self[k]) for k in self)+"})"
def __repr__(self):
return unicode(self)
class FrozenDict(Mapping):
"""
An immutable hashable dict
Taken from http://stackoverflow.com/a/2704866/81121
"""
def __init__(self, *args, **kwargs):
self._d = dict(*args, **kwargs)
self._hash = None
def __iter__(self):
return iter(self._d)
def __len__(self):
return len(self._d)
def __getitem__(self, key):
return self._d[key]
def __hash__(self):
# It would have been simpler and maybe more obvious to
# use hash(tuple(sorted(self._d.iteritems()))) from this discussion
# so far, but this solution is O(n). I don't know what kind of
# n we are going to run into, but sometimes it's hard to resist the
# urge to optimize when it will gain improved algorithmic performance.
if self._hash is None:
self._hash = 0
for key, value in self.iteritems():
self._hash ^= hash(key)
self._hash ^= hash(value)
return self._hash
def project(self, vars):
return FrozenDict(
(x for x in self.iteritems() if x[0] in vars))
def disjointDomain(self, other):
return not bool(set(self).intersection(other))
def compatible(self, other):
for k in self:
try:
if self[k] != other[k]:
return False
except KeyError:
pass
return True
def merge(self, other):
res = FrozenDict(
itertools.chain(self.iteritems(), other.iteritems()))
return res
def __str__(self):
return str(self._d)
def __repr__(self):
return repr(self._d)
class FrozenBindings(FrozenDict):
def __init__(self, ctx, *args, **kwargs):
FrozenDict.__init__(self, *args, **kwargs)
self.ctx = ctx
def __getitem__(self, key):
if not isinstance(key, Node):
key = Variable(key)
if not type(key) in (BNode, Variable):
return key
return self._d[key]
def project(self, vars):
return FrozenBindings(
self.ctx, (x for x in self.iteritems() if x[0] in vars))
def merge(self, other):
res = FrozenBindings(
self.ctx, itertools.chain(self.iteritems(), other.iteritems()))
return res
def _now(self):
return self.ctx.now
def _bnodes(self):
return self.ctx.bnodes
def _prologue(self):
return self.ctx.prologue
prologue = property(_prologue)
bnodes = property(_bnodes)
now = property(_now)
def forget(self, before):
"""
return a frozen dict only of bindings made in self
since before
"""
return FrozenBindings(self.ctx, (x for x in self.iteritems() if before[x[0]] is None))
def remember(self, these):
"""
return a frozen dict only of bindings in these
"""
return FrozenBindings(self.ctx, (x for x in self.iteritems() if x[0] in these))
class QueryContext(object):
"""
Query context - passed along when evaluating the query
"""
def __init__(self, graph=None, bindings=None):
self.bindings = bindings or Bindings()
if isinstance(graph, ConjunctiveGraph):
self._dataset = graph
if rdflib.plugins.sparql.SPARQL_DEFAULT_GRAPH_UNION:
self.graph = self.dataset
else:
self.graph = self.dataset.default_context
else:
self._dataset = None
self.graph = graph
self.prologue = None
self.now = datetime.datetime.now()
self.bnodes = collections.defaultdict(BNode)
def clone(self, bindings=None):
r = QueryContext(
self._dataset if self._dataset is not None else self.graph)
r.prologue = self.prologue
r.bindings.update(bindings or self.bindings)
r.graph = self.graph
r.bnodes = self.bnodes
return r
def _get_dataset(self):
if self._dataset is None:
raise Exception(
'You performed a query operation requiring ' +
'a dataset (i.e. ConjunctiveGraph), but ' +
'operating currently on a single graph.')
return self._dataset
dataset = property(_get_dataset, doc="current dataset")
def load(self, source, default=False, **kwargs):
def _load(graph, source):
try:
return graph.load(source, **kwargs)
except:
pass
try:
return graph.load(source, format='n3', **kwargs)
except:
pass
try:
return graph.load(source, format='nt', **kwargs)
except:
raise Exception(
"Could not load %s as either RDF/XML, N3 or NTriples" % (
source))
if not rdflib.plugins.sparql.SPARQL_LOAD_GRAPHS:
# we are not loading - if we already know the graph
# being "loaded", just add it to the default-graph
if default:
self.graph += self.dataset.get_context(source)
else:
if default:
_load(self.graph, source)
else:
_load(self.dataset, source)
def __getitem__(self, key):
# in SPARQL BNodes are just labels
if not type(key) in (BNode, Variable):
return key
try:
return self.bindings[key]
except KeyError:
return None
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def solution(self, vars=None):
"""
Return a static copy of the current variable bindings as dict
"""
if vars:
return FrozenBindings(
self, ((k, v)
for k, v in self.bindings.iteritems()
if k in vars))
else:
return FrozenBindings(self, self.bindings.iteritems())
def __setitem__(self, key, value):
if key in self.bindings and self.bindings[key] != value:
raise AlreadyBound()
self.bindings[key] = value
def pushGraph(self, graph):
r = self.clone()
r.graph = graph
return r
def push(self):
r = self.clone(Bindings(self.bindings))
return r
def clean(self):
return self.clone([])
# def pop(self):
# self.bindings = self.bindings.outer
# if self.bindings is None:
# raise Exception("We've bottomed out of the bindings stack!")
def thaw(self, frozenbindings):
"""
Create a new read/write query context from the given solution
"""
c = self.clone(frozenbindings)
return c
class Prologue:
"""
A class for holding prefixing bindings and base URI information
"""
def __init__(self):
self.base = None
self.namespace_manager = NamespaceManager(
Graph()) # ns man needs a store
def resolvePName(self, prefix, localname):
ns = self.namespace_manager.store.namespace(prefix or "")
if ns is None:
raise Exception('Unknown namespace prefix : %s' % prefix)
return URIRef(ns + (localname or ""))
def bind(self, prefix, uri):
self.namespace_manager.bind(prefix, uri, replace=True)
def absolutize(self, iri):
"""
Apply BASE / PREFIXes to URIs
(and to datatypes in Literals)
TODO: Move resolving URIs to pre-processing
"""
if isinstance(iri, CompValue):
if iri.name == 'pname':
return self.resolvePName(iri.prefix, iri.localname)
if iri.name == 'literal':
return Literal(
iri.string, lang=iri.lang,
datatype=self.absolutize(iri.datatype))
elif isinstance(iri, URIRef) and not ':' in iri:
return URIRef(iri, base=self.base)
return iri
class Query:
"""
A parsed and translated query
"""
def __init__(self, prologue, algebra):
self.prologue = prologue
self.algebra = algebra
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ServerSecurityAlertPoliciesOperations(object):
"""ServerSecurityAlertPoliciesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.rdbms.mariadb.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
server_name, # type: str
security_alert_policy_name, # type: Union[str, "_models.SecurityAlertPolicyName"]
**kwargs # type: Any
):
# type: (...) -> "_models.ServerSecurityAlertPolicy"
"""Get a server's security alert policy.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param security_alert_policy_name: The name of the security alert policy.
:type security_alert_policy_name: str or ~azure.mgmt.rdbms.mariadb.models.SecurityAlertPolicyName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServerSecurityAlertPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.rdbms.mariadb.models.ServerSecurityAlertPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServerSecurityAlertPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'securityAlertPolicyName': self._serialize.url("security_alert_policy_name", security_alert_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServerSecurityAlertPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMariaDB/servers/{serverName}/securityAlertPolicies/{securityAlertPolicyName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
server_name, # type: str
security_alert_policy_name, # type: Union[str, "_models.SecurityAlertPolicyName"]
parameters, # type: "_models.ServerSecurityAlertPolicy"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ServerSecurityAlertPolicy"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ServerSecurityAlertPolicy"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'securityAlertPolicyName': self._serialize.url("security_alert_policy_name", security_alert_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ServerSecurityAlertPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ServerSecurityAlertPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMariaDB/servers/{serverName}/securityAlertPolicies/{securityAlertPolicyName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
server_name, # type: str
security_alert_policy_name, # type: Union[str, "_models.SecurityAlertPolicyName"]
parameters, # type: "_models.ServerSecurityAlertPolicy"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ServerSecurityAlertPolicy"]
"""Creates or updates a threat detection policy.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param security_alert_policy_name: The name of the threat detection policy.
:type security_alert_policy_name: str or ~azure.mgmt.rdbms.mariadb.models.SecurityAlertPolicyName
:param parameters: The server security alert policy.
:type parameters: ~azure.mgmt.rdbms.mariadb.models.ServerSecurityAlertPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ServerSecurityAlertPolicy or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.rdbms.mariadb.models.ServerSecurityAlertPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServerSecurityAlertPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
server_name=server_name,
security_alert_policy_name=security_alert_policy_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ServerSecurityAlertPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'securityAlertPolicyName': self._serialize.url("security_alert_policy_name", security_alert_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMariaDB/servers/{serverName}/securityAlertPolicies/{securityAlertPolicyName}'} # type: ignore
def list_by_server(
self,
resource_group_name, # type: str
server_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ServerSecurityAlertPolicyListResult"]
"""Get the server's threat detection policies.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ServerSecurityAlertPolicyListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.rdbms.mariadb.models.ServerSecurityAlertPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServerSecurityAlertPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_server.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ServerSecurityAlertPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_server.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMariaDB/servers/{serverName}/securityAlertPolicies'} # type: ignore
|
|
# -*- test-case-name: twisted.test.test_failure -*-
# See also test suite twisted.test.test_pbfailure
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Asynchronous-friendly error mechanism.
See L{Failure}.
"""
# System Imports
import sys
import linecache
import inspect
import opcode
from cStringIO import StringIO
from inspect import getmro
from twisted.python import reflect
count = 0
traceupLength = 4
class DefaultException(Exception):
pass
def format_frames(frames, write, detail="default"):
"""Format and write frames.
@param frames: is a list of frames as used by Failure.frames, with
each frame being a list of
(funcName, fileName, lineNumber, locals.items(), globals.items())
@type frames: list
@param write: this will be called with formatted strings.
@type write: callable
@param detail: Four detail levels are available:
default, brief, verbose, and verbose-vars-not-captured.
C{Failure.printDetailedTraceback} uses the latter when the caller asks
for verbose, but no vars were captured, so that an explicit warning
about the missing data is shown.
@type detail: string
"""
if detail not in ('default', 'brief', 'verbose',
'verbose-vars-not-captured'):
raise ValueError(
"Detail must be default, brief, verbose, or "
"verbose-vars-not-captured. (not %r)" % (detail,))
w = write
if detail == "brief":
for method, filename, lineno, localVars, globalVars in frames:
w('%s:%s:%s\n' % (filename, lineno, method))
elif detail == "default":
for method, filename, lineno, localVars, globalVars in frames:
w( ' File "%s", line %s, in %s\n' % (filename, lineno, method))
w( ' %s\n' % linecache.getline(filename, lineno).strip())
elif detail == "verbose-vars-not-captured":
for method, filename, lineno, localVars, globalVars in frames:
w("%s:%d: %s(...)\n" % (filename, lineno, method))
w(' [Capture of Locals and Globals disabled (use captureVars=True)]\n')
elif detail == "verbose":
for method, filename, lineno, localVars, globalVars in frames:
w("%s:%d: %s(...)\n" % (filename, lineno, method))
w(' [ Locals ]\n')
# Note: the repr(val) was (self.pickled and val) or repr(val)))
for name, val in localVars:
w(" %s : %s\n" % (name, repr(val)))
w(' ( Globals )\n')
for name, val in globalVars:
w(" %s : %s\n" % (name, repr(val)))
# slyphon: i have a need to check for this value in trial
# so I made it a module-level constant
EXCEPTION_CAUGHT_HERE = "--- <exception caught here> ---"
class NoCurrentExceptionError(Exception):
"""
Raised when trying to create a Failure from the current interpreter
exception state and there is no current exception state.
"""
class _Traceback(object):
"""
Fake traceback object which can be passed to functions in the standard
library L{traceback} module.
"""
def __init__(self, frames):
"""
Construct a fake traceback object using a list of frames. Note that
although frames generally include locals and globals, this information
is not kept by this object, since locals and globals are not used in
standard tracebacks.
@param frames: [(methodname, filename, lineno, locals, globals), ...]
"""
assert len(frames) > 0, "Must pass some frames"
head, frames = frames[0], frames[1:]
name, filename, lineno, localz, globalz = head
self.tb_frame = _Frame(name, filename)
self.tb_lineno = lineno
if len(frames) == 0:
self.tb_next = None
else:
self.tb_next = _Traceback(frames)
class _Frame(object):
"""
A fake frame object, used by L{_Traceback}.
@ivar f_code: fake L{code<types.CodeType>} object
@ivar f_globals: fake f_globals dictionary (usually empty)
@ivar f_locals: fake f_locals dictionary (usually empty)
"""
def __init__(self, name, filename):
"""
@param name: method/function name for this frame.
@type name: C{str}
@param filename: filename for this frame.
@type name: C{str}
"""
self.f_code = _Code(name, filename)
self.f_globals = {}
self.f_locals = {}
class _Code(object):
"""
A fake code object, used by L{_Traceback} via L{_Frame}.
"""
def __init__(self, name, filename):
self.co_name = name
self.co_filename = filename
class Failure:
"""
A basic abstraction for an error that has occurred.
This is necessary because Python's built-in error mechanisms are
inconvenient for asynchronous communication.
The C{stack} and C{frame} attributes contain frames. Each frame is a tuple
of (funcName, fileName, lineNumber, localsItems, globalsItems), where
localsItems and globalsItems are the contents of
C{locals().items()}/C{globals().items()} for that frame, or an empty tuple
if those details were not captured.
@ivar value: The exception instance responsible for this failure.
@ivar type: The exception's class.
@ivar stack: list of frames, innermost last, excluding C{Failure.__init__}.
@ivar frames: list of frames, innermost first.
"""
pickled = 0
stack = None
# The opcode of "yield" in Python bytecode. We need this in _findFailure in
# order to identify whether an exception was thrown by a
# throwExceptionIntoGenerator.
_yieldOpcode = chr(opcode.opmap["YIELD_VALUE"])
def __init__(self, exc_value=None, exc_type=None, exc_tb=None,
captureVars=False):
"""
Initialize me with an explanation of the error.
By default, this will use the current C{exception}
(L{sys.exc_info}()). However, if you want to specify a
particular kind of failure, you can pass an exception as an
argument.
If no C{exc_value} is passed, then an "original" C{Failure} will
be searched for. If the current exception handler that this
C{Failure} is being constructed in is handling an exception
raised by L{raiseException}, then this C{Failure} will act like
the original C{Failure}.
For C{exc_tb} only L{traceback} instances or C{None} are allowed.
If C{None} is supplied for C{exc_value}, the value of C{exc_tb} is
ignored, otherwise if C{exc_tb} is C{None}, it will be found from
execution context (ie, L{sys.exc_info}).
@param captureVars: if set, capture locals and globals of stack
frames. This is pretty slow, and makes no difference unless you
are going to use L{printDetailedTraceback}.
"""
global count
count = count + 1
self.count = count
self.type = self.value = tb = None
self.captureVars = captureVars
#strings Exceptions/Failures are bad, mmkay?
if isinstance(exc_value, (str, unicode)) and exc_type is None:
import warnings
warnings.warn(
"Don't pass strings (like %r) to failure.Failure (replacing with a DefaultException)." %
exc_value, DeprecationWarning, stacklevel=2)
exc_value = DefaultException(exc_value)
stackOffset = 0
if exc_value is None:
exc_value = self._findFailure()
if exc_value is None:
self.type, self.value, tb = sys.exc_info()
if self.type is None:
raise NoCurrentExceptionError()
stackOffset = 1
elif exc_type is None:
if isinstance(exc_value, Exception):
self.type = exc_value.__class__
else: #allow arbitrary objects.
self.type = type(exc_value)
self.value = exc_value
else:
self.type = exc_type
self.value = exc_value
if isinstance(self.value, Failure):
self.__dict__ = self.value.__dict__
return
if tb is None:
if exc_tb:
tb = exc_tb
# else:
# log.msg("Erf, %r created with no traceback, %s %s." % (
# repr(self), repr(exc_value), repr(exc_type)))
# for s in traceback.format_stack():
# log.msg(s)
frames = self.frames = []
stack = self.stack = []
# added 2003-06-23 by Chris Armstrong. Yes, I actually have a
# use case where I need this traceback object, and I've made
# sure that it'll be cleaned up.
self.tb = tb
if tb:
f = tb.tb_frame
elif not isinstance(self.value, Failure):
# we don't do frame introspection since it's expensive,
# and if we were passed a plain exception with no
# traceback, it's not useful anyway
f = stackOffset = None
while stackOffset and f:
# This excludes this Failure.__init__ frame from the
# stack, leaving it to start with our caller instead.
f = f.f_back
stackOffset -= 1
# Keeps the *full* stack. Formerly in spread.pb.print_excFullStack:
#
# The need for this function arises from the fact that several
# PB classes have the peculiar habit of discarding exceptions
# with bareword "except:"s. This premature exception
# catching means tracebacks generated here don't tend to show
# what called upon the PB object.
while f:
if captureVars:
localz = f.f_locals.copy()
if f.f_locals is f.f_globals:
globalz = {}
else:
globalz = f.f_globals.copy()
for d in globalz, localz:
if "__builtins__" in d:
del d["__builtins__"]
localz = localz.items()
globalz = globalz.items()
else:
localz = globalz = ()
stack.insert(0, (
f.f_code.co_name,
f.f_code.co_filename,
f.f_lineno,
localz,
globalz,
))
f = f.f_back
while tb is not None:
f = tb.tb_frame
if captureVars:
localz = f.f_locals.copy()
if f.f_locals is f.f_globals:
globalz = {}
else:
globalz = f.f_globals.copy()
for d in globalz, localz:
if "__builtins__" in d:
del d["__builtins__"]
localz = localz.items()
globalz = globalz.items()
else:
localz = globalz = ()
frames.append((
f.f_code.co_name,
f.f_code.co_filename,
tb.tb_lineno,
localz,
globalz,
))
tb = tb.tb_next
if inspect.isclass(self.type) and issubclass(self.type, Exception):
parentCs = getmro(self.type)
self.parents = map(reflect.qual, parentCs)
else:
self.parents = [self.type]
def trap(self, *errorTypes):
"""Trap this failure if its type is in a predetermined list.
This allows you to trap a Failure in an error callback. It will be
automatically re-raised if it is not a type that you expect.
The reason for having this particular API is because it's very useful
in Deferred errback chains::
def _ebFoo(self, failure):
r = failure.trap(Spam, Eggs)
print 'The Failure is due to either Spam or Eggs!'
if r == Spam:
print 'Spam did it!'
elif r == Eggs:
print 'Eggs did it!'
If the failure is not a Spam or an Eggs, then the Failure
will be 'passed on' to the next errback.
@type errorTypes: L{Exception}
"""
error = self.check(*errorTypes)
if not error:
raise self
return error
def check(self, *errorTypes):
"""Check if this failure's type is in a predetermined list.
@type errorTypes: list of L{Exception} classes or
fully-qualified class names.
@returns: the matching L{Exception} type, or None if no match.
"""
for error in errorTypes:
err = error
if inspect.isclass(error) and issubclass(error, Exception):
err = reflect.qual(error)
if err in self.parents:
return error
return None
def raiseException(self):
"""
raise the original exception, preserving traceback
information if available.
"""
raise self.type, self.value, self.tb
def throwExceptionIntoGenerator(self, g):
"""
Throw the original exception into the given generator,
preserving traceback information if available.
@return: The next value yielded from the generator.
@raise StopIteration: If there are no more values in the generator.
@raise anything else: Anything that the generator raises.
"""
return g.throw(self.type, self.value, self.tb)
def _findFailure(cls):
"""
Find the failure that represents the exception currently in context.
"""
tb = sys.exc_info()[-1]
if not tb:
return
secondLastTb = None
lastTb = tb
while lastTb.tb_next:
secondLastTb = lastTb
lastTb = lastTb.tb_next
lastFrame = lastTb.tb_frame
# NOTE: f_locals.get('self') is used rather than
# f_locals['self'] because psyco frames do not contain
# anything in their locals() dicts. psyco makes debugging
# difficult anyhow, so losing the Failure objects (and thus
# the tracebacks) here when it is used is not that big a deal.
# handle raiseException-originated exceptions
if lastFrame.f_code is cls.raiseException.func_code:
return lastFrame.f_locals.get('self')
# handle throwExceptionIntoGenerator-originated exceptions
# this is tricky, and differs if the exception was caught
# inside the generator, or above it:
# it is only really originating from
# throwExceptionIntoGenerator if the bottom of the traceback
# is a yield.
# Pyrex and Cython extensions create traceback frames
# with no co_code, but they can't yield so we know it's okay to just return here.
if ((not lastFrame.f_code.co_code) or
lastFrame.f_code.co_code[lastTb.tb_lasti] != cls._yieldOpcode):
return
# if the exception was caught above the generator.throw
# (outside the generator), it will appear in the tb (as the
# second last item):
if secondLastTb:
frame = secondLastTb.tb_frame
if frame.f_code is cls.throwExceptionIntoGenerator.func_code:
return frame.f_locals.get('self')
# if the exception was caught below the generator.throw
# (inside the generator), it will appear in the frames' linked
# list, above the top-level traceback item (which must be the
# generator frame itself, thus its caller is
# throwExceptionIntoGenerator).
frame = tb.tb_frame.f_back
if frame and frame.f_code is cls.throwExceptionIntoGenerator.func_code:
return frame.f_locals.get('self')
_findFailure = classmethod(_findFailure)
def __repr__(self):
return "<%s %s>" % (self.__class__, self.type)
def __str__(self):
return "[Failure instance: %s]" % self.getBriefTraceback()
def __getstate__(self):
"""Avoid pickling objects in the traceback.
"""
if self.pickled:
return self.__dict__
c = self.__dict__.copy()
c['frames'] = [
[
v[0], v[1], v[2],
_safeReprVars(v[3]),
_safeReprVars(v[4]),
] for v in self.frames
]
# added 2003-06-23. See comment above in __init__
c['tb'] = None
if self.stack is not None:
# XXX: This is a band-aid. I can't figure out where these
# (failure.stack is None) instances are coming from.
c['stack'] = [
[
v[0], v[1], v[2],
_safeReprVars(v[3]),
_safeReprVars(v[4]),
] for v in self.stack
]
c['pickled'] = 1
return c
def cleanFailure(self):
"""Remove references to other objects, replacing them with strings.
"""
self.__dict__ = self.__getstate__()
def getTracebackObject(self):
"""
Get an object that represents this Failure's stack that can be passed
to traceback.extract_tb.
If the original traceback object is still present, return that. If this
traceback object has been lost but we still have the information,
return a fake traceback object (see L{_Traceback}). If there is no
traceback information at all, return None.
"""
if self.tb is not None:
return self.tb
elif len(self.frames) > 0:
return _Traceback(self.frames)
else:
return None
def getErrorMessage(self):
"""Get a string of the exception which caused this Failure."""
if isinstance(self.value, Failure):
return self.value.getErrorMessage()
return reflect.safe_str(self.value)
def getBriefTraceback(self):
io = StringIO()
self.printBriefTraceback(file=io)
return io.getvalue()
def getTraceback(self, elideFrameworkCode=0, detail='default'):
io = StringIO()
self.printTraceback(file=io, elideFrameworkCode=elideFrameworkCode, detail=detail)
return io.getvalue()
def printTraceback(self, file=None, elideFrameworkCode=False, detail='default'):
"""
Emulate Python's standard error reporting mechanism.
@param file: If specified, a file-like object to which to write the
traceback.
@param elideFrameworkCode: A flag indicating whether to attempt to
remove uninteresting frames from within Twisted itself from the
output.
@param detail: A string indicating how much information to include
in the traceback. Must be one of C{'brief'}, C{'default'}, or
C{'verbose'}.
"""
if file is None:
file = log.logerr
w = file.write
if detail == 'verbose' and not self.captureVars:
# We don't have any locals or globals, so rather than show them as
# empty make the output explicitly say that we don't have them at
# all.
formatDetail = 'verbose-vars-not-captured'
else:
formatDetail = detail
# Preamble
if detail == 'verbose':
w( '*--- Failure #%d%s---\n' %
(self.count,
(self.pickled and ' (pickled) ') or ' '))
elif detail == 'brief':
if self.frames:
hasFrames = 'Traceback'
else:
hasFrames = 'Traceback (failure with no frames)'
w("%s: %s: %s\n" % (
hasFrames,
reflect.safe_str(self.type),
reflect.safe_str(self.value)))
else:
w( 'Traceback (most recent call last):\n')
# Frames, formatted in appropriate style
if self.frames:
if not elideFrameworkCode:
format_frames(self.stack[-traceupLength:], w, formatDetail)
w("%s\n" % (EXCEPTION_CAUGHT_HERE,))
format_frames(self.frames, w, formatDetail)
elif not detail == 'brief':
# Yeah, it's not really a traceback, despite looking like one...
w("Failure: ")
# postamble, if any
if not detail == 'brief':
# Unfortunately, self.type will not be a class object if this
# Failure was created implicitly from a string exception.
# qual() doesn't make any sense on a string, so check for this
# case here and just write out the string if that's what we
# have.
if isinstance(self.type, (str, unicode)):
w(self.type + "\n")
else:
w("%s: %s\n" % (reflect.qual(self.type),
reflect.safe_str(self.value)))
# chaining
if isinstance(self.value, Failure):
# TODO: indentation for chained failures?
file.write(" (chained Failure)\n")
self.value.printTraceback(file, elideFrameworkCode, detail)
if detail == 'verbose':
w('*--- End of Failure #%d ---\n' % self.count)
def printBriefTraceback(self, file=None, elideFrameworkCode=0):
"""Print a traceback as densely as possible.
"""
self.printTraceback(file, elideFrameworkCode, detail='brief')
def printDetailedTraceback(self, file=None, elideFrameworkCode=0):
"""Print a traceback with detailed locals and globals information.
"""
self.printTraceback(file, elideFrameworkCode, detail='verbose')
def _safeReprVars(varsDictItems):
"""
Convert a list of (name, object) pairs into (name, repr) pairs.
L{twisted.python.reflect.safe_repr} is used to generate the repr, so no
exceptions will be raised by faulty C{__repr__} methods.
@param varsDictItems: a sequence of (name, value) pairs as returned by e.g.
C{locals().items()}.
@returns: a sequence of (name, repr) pairs.
"""
return [(name, reflect.safe_repr(obj)) for (name, obj) in varsDictItems]
# slyphon: make post-morteming exceptions tweakable
DO_POST_MORTEM = True
def _debuginit(self, exc_value=None, exc_type=None, exc_tb=None,
captureVars=False,
Failure__init__=Failure.__init__.im_func):
"""
Initialize failure object, possibly spawning pdb.
"""
if (exc_value, exc_type, exc_tb) == (None, None, None):
exc = sys.exc_info()
if not exc[0] == self.__class__ and DO_POST_MORTEM:
try:
strrepr = str(exc[1])
except:
strrepr = "broken str"
print "Jumping into debugger for post-mortem of exception '%s':" % (strrepr,)
import pdb
pdb.post_mortem(exc[2])
Failure__init__(self, exc_value, exc_type, exc_tb, captureVars)
def startDebugMode():
"""Enable debug hooks for Failures."""
Failure.__init__ = _debuginit
# Sibling imports - at the bottom and unqualified to avoid unresolvable
# circularity
import log
|
|
"""
This file is a wrapper around figleaf and will start/stop coverage as
needed. It also includes a method for generating the HTML reports.
"""
import os
import random
import figleaf
import pickle
from glob import glob
from gppylib import gplog
from gppylib.commands.base import Command, LOCAL, REMOTE, ExecutionContext, RemoteExecutionContext, WorkerPool
from gppylib.commands.unix import RemoveFiles, Scp
from gppylib.operations import Operation
from gppylib.operations.unix import ListFiles, ListRemoteFiles, MakeDir
logger = gplog.get_default_logger()
COVERAGE_FILENAME = 'cover.out'
#------------------------------------------------------------------------------
class GpWriteFigleafCoverageHtml(Command):
"""Command to write out figleaf html reports to disk based on the
coverage information that has been collected."""
def __init__(self,name,filename, directory,ctxt=LOCAL,remoteHost=None):
gphome = os.getenv("GPHOME", None)
if not gphome:
raise Exception('GPHOME environment variable not set.')
cmdStr = "%s -d %s %s" % (os.path.normpath(gphome + '/lib/python/figleaf/figleaf2html'), directory, filename)
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
@staticmethod
def local(name, coverfile, directory):
cmd = GpWriteFigleafCoverageHtml(name, coverfile, directory)
cmd.run(validateAfter=True)
#------------------------------------------------------------------------------
# TODO: We should not allow this class to be instantiated. It offers static
# functionality, and its exposed methods should reflect that.
class GpFigleafCoverage:
"""
Distributed code coverage, built atop figleaf.
Figleaf code coverage is a two-phase process: recording and reporting. Recording simply involves
starting and stopping instrumentation. This results in a pickled data file in a designated location
on disk. (The distributed adaptation here of figleaf relies on this point.) Lastly, we invoke
figleaf2html via the Command above to produce html from the recorded data.
Like figleaf, GpFigleafCoverage is a similar two-phase process: enable recording and enable reporting.
To enable recording, gppylib must be *reactive* to coverage requests; in other words, the entry points to gppylib
must invoke GpFigleafCoverage. Currently, there are two such entry points: gppylib.mainUtils.simple_main and
sbin/gpoperation.py. Moreover, gppylib must be *proactive* to propagate requests to subprocesses or remote processes.
This is accomplished below by hooking gppylib.commands.base.ExecutionContext, and its inherited classes, in order
to propagate a couple of key environment variables needed below: USE_FIGLEAF, FIGLEAF_DIR, and FIGLEAF_PID.
To enable reporting, we must aggregate the data that the various python interpreters across subprocesses
and remote processes had generated. This Operaiton will rely on the knowledge of how figleaf resultant data is stored
on disk. For more detail, see FinalizeCoverage below.
It will help to explain how recording and reporting come together. GpFigleafCoverage recording is expected to produce,
and its reporting is dependent upon, the following directory structure:
<base>/*.out,*.html - Global coverage data, aggregated across multiple runs
<base>/<pid>/*.out,*.html - Coverage data pertaining to <pid>, where <pid> is the
process id of the originating python program, on the master
<base>/<pid>/<comp>/*.out,*html - Coverage data pertaining to some subprocess or remote process
that is invoked as a subcomponent of the overall program given by <pid>
For clarity, the rest of the code will adopt the following coding convention:
base_dir := <base>
pid_dir := <base>/<pid>
comp_dir := <base>/<pid>/<comp>
"""
# TODO: change directory structure to something more human-readable
# How about <base>/<program_name><pid>/<program_name><rand>/*.out,*.html ?
def __init__(self):
try:
self.directory = os.getenv('FIGLEAF_DIR', None)
if self.directory is None:
self.directory = os.path.normpath(os.path.expanduser("~") + '/.figleaf')
self.my_pid = str(os.getpid())
self.main_pid = os.getenv('FIGLEAF_PID', self.my_pid)
randstring = ''.join(random.choice('0123456789') for x in range(20))
self.filename = os.path.join(self.directory, self.main_pid, randstring, COVERAGE_FILENAME)
self.running = False
except Exception, e:
logger.exception('Error initializing code coverage')
def start(self):
"""Starts coverage collection if the environment variable USE_FIGLEAF is set."""
try:
if os.getenv('USE_FIGLEAF', None):
logger.info('Code coverage will be generated')
MakeDir(os.path.dirname(self.filename)).run()
self.running = True
ExecutionContext.propagate_env_map.update({'FIGLEAF_DIR': os.getenv('FIGLEAF_DIR', self.directory),
'USE_FIGLEAF': 1,
'FIGLEAF_PID': self.main_pid })
figleaf.start()
except Exception, e:
logger.error('Error starting code coverage: %s' % e)
def stop(self):
"""Stops code coverage."""
try:
if self.running:
logger.info('Stopping code coverage')
figleaf.stop()
figleaf.write_coverage(self.filename)
self.running = False
for k in ['FIGLEAF_DIR', 'USE_FIGLEAF', 'FIGLEAF_PID']:
del ExecutionContext.propagate_env_map[k]
except Exception, e:
logger.error('Error stopping code coverage: %s' % e)
def generate_report(self):
"""Generates the html reports and puts them in the directory specified."""
if os.getenv('USE_FIGLEAF', None):
try:
directory = os.path.dirname(self.filename)
logger.info('Generating code coverage HTML reports to %s' % directory)
GpWriteFigleafCoverageHtml.local('Generate HTML', self.filename, directory)
if self.main_pid == self.my_pid:
FinalizeCoverage(trail = RemoteExecutionContext.trail,
pid = self.main_pid,
base_dir = self.directory).run()
except Exception, e:
logger.exception('Error generating HTML code cover reports.')
def delete_files(self):
"""Deletes code coverage files."""
if os.getenv('USE_FIGLEAF', None):
logger.info('Deleting coverage files...')
try:
RemoveFiles.local('Remove coverage file', self.filename)
directory = os.path.dirname(self.filename)
RemoveFiles.local('Remove html files', directory + '/*.html')
except:
logger.error('Failed to clean up coverage files')
# The coverage tool to use
#if os.getenv('USE_FIGLEAF', None):
GP_COVERAGE_CLASS=GpFigleafCoverage
#else:
# GP_COVERAGE_CLASS=<some other coverage class>
#------------------------------------------------------------------------------
class GpCoverage(GP_COVERAGE_CLASS):
"""Class the controls code coverage. Right now this inherits from
GpFigleafCoverage, but in the future we may find a better code coverage
tool and switch to that. With this class, we can do that without
touching any of the management utilities or modules."""
pass
#------------------------------------------------------------------------------
class FinalizeCoverage(Operation):
"""
This aggregates coverage data from across the cluster for this current process (which is soon to complete.)
Then, we update the global coverage data that persists from run to run at <base_dir>/*.out,*.html.
"""
def __init__(self, trail, pid, base_dir):
self.trail = trail
self.pid = pid
self.base_dir = base_dir
def execute(self):
pid_dir = os.path.join(self.base_dir, self.pid)
# update the pid-level coverage statistics, which reside within pid_dir
# this requires: collect coverage data, merge data, save, and generate html
CollectCoverage(trail = self.trail, pid_dir = pid_dir).run()
partial_coverages = LoadPartialCoverages(pid_dir = pid_dir).run()
cumulative_coverage = {}
for partial_coverage in partial_coverages:
MergeCoverage(input = partial_coverage, output = cumulative_coverage).run()
SaveCoverage(obj = cumulative_coverage,
path = os.path.join(pid_dir, COVERAGE_FILENAME)).run()
GpWriteFigleafCoverageHtml.local('Generate HTML', os.path.join(pid_dir, COVERAGE_FILENAME), pid_dir)
# update the global coverage statistics, which reside within self.base_dir
overall_coverage = LoadCoverage(os.path.join(self.base_dir, COVERAGE_FILENAME)).run()
MergeCoverage(input = cumulative_coverage, output = overall_coverage).run()
SaveCoverage(obj = overall_coverage,
path = os.path.join(self.base_dir, COVERAGE_FILENAME)).run()
GpWriteFigleafCoverageHtml.local('Generate HTML', os.path.join(self.base_dir, COVERAGE_FILENAME), self.base_dir)
#------------------------------------------------------------------------------
class CollectCoverage(Operation):
"""
Simply copy over <base>/<pid>/<comp> dirs back to the master. This may
be an unnecessary step IF <base> is an NFS mount.
"""
def __init__(self, trail, pid_dir):
self.trail = trail
self.pid_dir = pid_dir
def execute(self):
pool = WorkerPool()
given = set(ListFiles(self.pid_dir).run())
try:
for host in self.trail:
available = ListRemoteFiles(self.pid_dir, host).run()
to_copy = [dir for dir in available if dir not in given]
for dir in to_copy:
comp_dir = os.path.join(self.pid_dir, dir)
pool.addCommand(Scp('collect coverage',
srcFile = comp_dir,
srcHost = host,
dstFile = comp_dir,
recursive = True))
pool.join()
finally:
pool.haltWork()
#------------------------------------------------------------------------------
class LoadCoverage(Operation):
""" Unpickles and returns an object residing at a current path """
def __init__(self, path):
self.path = path
def execute(self):
try:
with open(self.path, 'r') as f:
obj = pickle.load(f)
return obj
except (IOError, OSError):
logger.exception('Failed to un-pickle coverage off disk.')
return {}
#------------------------------------------------------------------------------
class SaveCoverage(Operation):
""" Pickles a given object to disk at a designated path """
def __init__(self, path, obj):
self.path = path
self.obj = obj
def execute(self):
with open(self.path, 'w') as f:
pickle.dump(self.obj, f)
#------------------------------------------------------------------------------
class LoadPartialCoverages(Operation):
""" Returns an array of unpickled coverage objects from <base>/<pid>/*/<COVERAGE_FILENAME> """
def __init__(self, pid_dir):
self.pid_dir = pid_dir
def execute(self):
coverage_files = glob(os.path.join(self.pid_dir, '*', COVERAGE_FILENAME))
return [LoadCoverage(path).run() for path in coverage_files]
#------------------------------------------------------------------------------
# TODO: Support a parallel merge? Or would there be no point with the Python GIL?
class MergeCoverage(Operation):
"""
Figleaf coverage data is pickled on disk as a dict of filenames to sets of numbers,
where each number denotes a covered line number.
e.g. { "gparray.py" : set(0, 1, 2, ...),
"operations/dump.py" : set(175, 13, 208, ...),
... }
Here, we merge such an input dict into an output dict. As such, we'll be able to pickle
the result back to disk and invoke figleaf2html to get consolidated html reports.
"""
def __init__(self, input, output):
self.input, self.output = input, output
def execute(self):
for filename in self.input:
if filename not in self.output:
self.output[filename] = self.input[filename]
else:
self.output[filename] |= self.input[filename] # set union
|
|
#!/usr/bin/python
#
# azure_api.py - an Azure plugin for Vcycle
#
# Andrew McNab, University of Manchester.
# Luis Villazon Esteban, CERN.
# Copyright (c) 2013-5. All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# o Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
# o Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Contacts: Andrew.McNab@cern.ch http://www.gridpp.ac.uk/vcycle/
# Luis.Villazon.Esteban@cern.ch
#
import pprint
import os
import sys
import stat
import time
import json
import shutil
import string
import pycurl
import random
import base64
import StringIO
import tempfile
import calendar
from azure import *
from azure.servicemanagement import *
import vcycle.vacutils
class AzureError(Exception):
pass
class AzureSpace(vcycle.BaseSpace):
def __init__(self, api, spaceName, parser, spaceSectionName):
# Initialize data structures from configuration files
# Generic initialization
vcycle.BaseSpace.__init__(self, api, spaceName, parser, spaceSectionName)
# Azure-specific initialization
try:
self.tenancy_name = parser.get(spaceSectionName, 'tenancy_name')
except Exception as e:
raise AzureError('tenancy_name is required in Azure [space ' + spaceName + '] (' + str(e) + ')')
try:
self.subscription = parser.get(spaceSectionName, 'subscription')
except Exception as e:
raise AzureError('subscription is required in Azure [space ' + spaceName + '] (' + str(e) + ')')
try:
self.certificate = parser.get(spaceSectionName, 'certificate')
except Exception as e:
raise AzureError('certificate is required in Azure [space ' + spaceName + '] (' + str(e) + ')')
try:
self.location = parser.get(spaceSectionName, 'location')
except Exception as e:
raise AzureError('location is required in Azure [space ' + spaceName + '] (' + str(e) + ')')
try:
self.pfx = parser.get(spaceSectionName, 'pfx')
except Exception as e:
raise AzureError('pfx is required in Azure [space ' + spaceName + '] (' + str(e) + ')')
try:
self.username = parser.get(spaceSectionName, 'username')
except Exception as e:
raise AzureError('username is required in Azure [space ' + spaceName + '] (' + str(e) + ')')
try:
self.password = parser.get(spaceSectionName, 'password')
except Exception as e:
raise AzureError('password is required in Azure [space ' + spaceName + '] (' + str(e) + ')')
def connect(self):
# Connect to the Azure service
#Nothing to do
pass
def scanMachines(self):
"""Query Azure compute service for details of machines in this space"""
# For each machine found in the space, this method is responsible for
# either (a) ignorning non-Vcycle VMs but updating self.totalMachines
# or (b) creating a Machine object for the VM in self.spaces
try:
sms = ServiceManagementService(self.subscription, self.certificate)
results = sms.list_hosted_services()
except Exception as ex:
if 'file' in str(ex):
raise AzureError("No cert file , check the path.")
raise AzureError(str(ex))
for result in results:
try:
info = sms.get_hosted_service_properties(result.service_name, True)
except WindowsAzureMissingResourceError as ex:
vcycle.vacutils.logLine("% don't have vms? " % result.service_name)
continue
if len(info.deployments) == 0 : continue
if not result.service_name.startswith('vcycle-'):
# Still count VMs that we didn't create and won't manage, to avoid going above space limit
self.totalMachines += 1
continue
uuidStr = str(result.service_name)
ip = '0.0.0.0'
createdTime = calendar.timegm(time.strptime(result.hosted_service_properties.date_created, "%Y-%m-%dT%H:%M:%SZ"))
updatedTime = calendar.timegm(time.strptime(result.hosted_service_properties.date_last_modified, "%Y-%m-%dT%H:%M:%SZ"))
startedTime = calendar.timegm(time.strptime(result.hosted_service_properties.date_created, "%Y-%m-%dT%H:%M:%SZ"))
machinetypeName = None
try:
status = info.deployments[0].role_instance_list[0].instance_status
if status in ['Unknown', 'CreatingVM', 'StartingVM', 'CreatingRole', 'StartingRole',
'ReadyRole', 'BusyRole', 'Preparing','ProvisioningFailed']:
state = vcycle.MachineState.starting
elif status in ['StoppingRole', 'StoppingVM', 'DeletingVM',
'StoppedVM', 'RestartingRole','StoppedDeallocated']:
state = vcycle.MachineState.deleting
else:
state = vcycle.MachineState.starting
except Exception as ex:
import json
vcycle.vacutils.logLine(json.dumps(info,indent=2))
vcycle.vacutils.logLine(str(ex))
state = vcycle.MachineState.starting
self.machines[result.service_name] = vcycle.Machine(name = result.service_name,
spaceName = self.spaceName,
state = state,
ip = ip,
createdTime = createdTime,
startedTime = startedTime,
updatedTime = updatedTime,
uuidStr = uuidStr,
machinetypeName = machinetypeName)
def createMachine(self, machineName, machinetypeName):
try:
self.__create_service(name=machineName, location=self.location)
fingerprint, path = self.__add_certificate_to_service(name=machineName, pfx=self.pfx)
self.__create_vm(name=machineName,
flavor=self.machinetypes[machinetypeName].flavor_name,
image=self.machinetypes[machinetypeName].root_image,
username= self.username,
password= self.password,
user_data=base64.b64encode(open('/var/lib/vcycle/machines/' + machineName + '/user_data', 'r').read()),
fingerprint=(fingerprint, path))
vcycle.vacutils.logLine('Created ' + machineName + ' (' + machineName + ') for ' + machinetypeName + ' within ' + self.spaceName)
self.machines[machineName] = vcycle.shared.Machine(name = machineName,
spaceName = self.spaceName,
state = vcycle.MachineState.starting,
ip = '0.0.0.0',
createdTime = int(time.time()),
startedTime = None,
updatedTime = int(time.time()),
uuidStr = None,
machinetypeName = machinetypeName)
except Exception as ex:
try:
self.__delete(machineName)
raise AzureError(str(ex))
except Exception as ex:
raise AzureError(str(ex))
def deleteOneMachine(self, machineName):
sms = ServiceManagementService(self.subscription, self.certificate)
try:
sms.delete_hosted_service(machineName, True)
except Exception as e:
raise vcycle.shared.VcycleError('Cannot delete ' + machineName + ' (' + str(e) + ')')
def __create_service(self, name="", location=None):
""" Create a new service
:param name: Name of the service
:param location: Location of the service
"""
sms = ServiceManagementService(self.subscription, self.certificate)
result = sms.check_hosted_service_name_availability(name)
if not result:
raise AzureError("The service name %s is not available" % name)
try:
result = sms.create_hosted_service(name, name, name, location)
sms.wait_for_operation_status(result.request_id)
except Exception as ex:
raise AzureError("The service name %s is not available" % name)
def __add_certificate_to_service(self, name="", pfx=""):
""" Adds a certificate into the service.
The certificate is used to connect via ssh to the VM
:param name: Name of the service where the certificate will be added
:param pfx: location on local disk of the certificate to upload
"""
import base64
sms = ServiceManagementService(self.subscription, self.certificate)
result = sms.add_service_certificate(name, base64.b64encode(open(pfx).read()), 'pfx', '')
sms.wait_for_operation_status(result.request_id)
list = sms.list_service_certificates(name)
for certificate in list:
return certificate.thumbprint, certificate.certificate_url
def __create_vm(self, name="", flavor="", image="", username="", password="", user_data=None, fingerprint=None):
""" Creates new VM
:param name: Name of the new VM
:param flavor: Flavor to create the VM
:param image: Image to create the VM
:param username: username to use to connect to the vm via SSH
:param password: password to use to connect to the vm via SSH
:param user_data: contextualization file
"""
sms = ServiceManagementService(self.subscription, self.certificate)
configuration_set = LinuxConfigurationSet(host_name=name,
user_name=username,
user_password=password,
disable_ssh_password_authentication=False,
custom_data=user_data)
if fingerprint is not None:
configuration_set.ssh.public_keys.public_keys.append(PublicKey(fingerprint=fingerprint[0], path=fingerprint[1]))
network_set = ConfigurationSet()
network_set.input_endpoints.input_endpoints.append(ConfigurationSetInputEndpoint(name='SSH',
protocol="TCP",
port=22,
local_port=22))
result = sms.create_virtual_machine_deployment(name,
name,
'production',
name,
name,
configuration_set,
None,
network_config= network_set,
role_size=flavor,
vm_image_name=image,
provision_guest_agent=True)
def __delete(self, identifier):
"""Deletes a VM in the provider
:param identifier: vm identifier
"""
sms = ServiceManagementService(self.subscription, self.certificate)
try:
sms.delete_hosted_service(identifier, True)
except Exception as e:
raise AzureError(str(e))
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PacketCapturesOperations:
"""PacketCapturesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_initial(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
parameters: "_models.PacketCapture",
**kwargs: Any
) -> "_models.PacketCaptureResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PacketCapture')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def begin_create(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
parameters: "_models.PacketCapture",
**kwargs: Any
) -> AsyncLROPoller["_models.PacketCaptureResult"]:
"""Create and start a packet capture on the specified VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:param parameters: Parameters that define the create packet capture operation.
:type parameters: ~azure.mgmt.network.v2018_10_01.models.PacketCapture
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PacketCaptureResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_10_01.models.PacketCaptureResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def get(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> "_models.PacketCaptureResult":
"""Gets a packet capture session by name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PacketCaptureResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_10_01.models.PacketCaptureResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def _stop_initial(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/stop'} # type: ignore
async def begin_stop(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Stops a specified packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._stop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/stop'} # type: ignore
async def _get_status_initial(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> "_models.PacketCaptureQueryStatusResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureQueryStatusResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self._get_status_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_status_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/queryStatus'} # type: ignore
async def begin_get_status(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.PacketCaptureQueryStatusResult"]:
"""Query the status of a running packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param packet_capture_name: The name given to the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PacketCaptureQueryStatusResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_10_01.models.PacketCaptureQueryStatusResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureQueryStatusResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_status_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/queryStatus'} # type: ignore
def list(
self,
resource_group_name: str,
network_watcher_name: str,
**kwargs: Any
) -> AsyncIterable["_models.PacketCaptureListResult"]:
"""Lists all packet capture sessions within the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PacketCaptureListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_10_01.models.PacketCaptureListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PacketCaptureListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures'} # type: ignore
|
|
import errno
import glob
import shutil
import subprocess
import os.path
def join(*paths):
"""Like os.path.join but doesn't treat absolute RHS specially"""
return os.path.normpath("/".join(paths))
def relative(src, dest):
""" Return a relative path from src to dest.
>>> relative("/usr/bin", "/tmp/foo/bar")
../../tmp/foo/bar
>>> relative("/usr/bin", "/usr/lib")
../lib
>>> relative("/tmp", "/tmp/foo/bar")
foo/bar
"""
return os.path.relpath(dest, src)
def make_relative_symlink(path):
""" Convert an absolute symlink to a relative one """
if not os.path.islink(path):
return
link = os.readlink(path)
if not os.path.isabs(link):
return
# find the common ancestor directory
ancestor = path
depth = 0
while ancestor and not link.startswith(ancestor):
ancestor = ancestor.rpartition('/')[0]
depth += 1
if not ancestor:
print("make_relative_symlink() Error: unable to find the common ancestor of %s and its target" % path)
return
base = link.partition(ancestor)[2].strip('/')
while depth > 1:
base = "../" + base
depth -= 1
os.remove(path)
os.symlink(base, path)
def format_display(path, metadata):
""" Prepare a path for display to the user. """
rel = relative(metadata.getVar("TOPDIR", True), path)
if len(rel) > len(path):
return path
else:
return rel
def copytree(src, dst):
# We could use something like shutil.copytree here but it turns out to
# to be slow. It takes twice as long copying to an empty directory.
# If dst already has contents performance can be 15 time slower
# This way we also preserve hardlinks between files in the tree.
bb.utils.mkdirhier(dst)
cmd = 'tar -cf - -C %s -p . | tar -xf - -C %s' % (src, dst)
check_output(cmd, shell=True, stderr=subprocess.STDOUT)
def copyhardlinktree(src, dst):
""" Make the hard link when possible, otherwise copy. """
bb.utils.mkdirhier(dst)
if os.path.isdir(src) and not len(os.listdir(src)):
return
if (os.stat(src).st_dev == os.stat(dst).st_dev):
# Need to copy directories only with tar first since cp will error if two
# writers try and create a directory at the same time
cmd = 'cd %s; find . -type d -print | tar -cf - -C %s -p --files-from - --no-recursion | tar -xf - -C %s' % (src, src, dst)
check_output(cmd, shell=True, stderr=subprocess.STDOUT)
cmd = 'cd %s; find . -print0 | cpio --null -pdlu %s' % (src, dst)
check_output(cmd, shell=True, stderr=subprocess.STDOUT)
else:
copytree(src, dst)
def remove(path, recurse=True):
"""Equivalent to rm -f or rm -rf"""
for name in glob.glob(path):
try:
os.unlink(name)
except OSError as exc:
if recurse and exc.errno == errno.EISDIR:
shutil.rmtree(name)
elif exc.errno != errno.ENOENT:
raise
def symlink(source, destination, force=False):
"""Create a symbolic link"""
try:
if force:
remove(destination)
os.symlink(source, destination)
except OSError as e:
if e.errno != errno.EEXIST or os.readlink(destination) != source:
raise
class CalledProcessError(Exception):
def __init__(self, retcode, cmd, output = None):
self.retcode = retcode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d with output %s" % (self.cmd, self.retcode, self.output)
# Not needed when we move to python 2.7
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd, output=output)
return output
def find(dir, **walkoptions):
""" Given a directory, recurses into that directory,
returning all files as absolute paths. """
for root, dirs, files in os.walk(dir, **walkoptions):
for file in files:
yield os.path.join(root, file)
## realpath() related functions
def __is_path_below(file, root):
return (file + os.path.sep).startswith(root)
def __realpath_rel(start, rel_path, root, loop_cnt, assume_dir):
"""Calculates real path of symlink 'start' + 'rel_path' below
'root'; no part of 'start' below 'root' must contain symlinks. """
have_dir = True
for d in rel_path.split(os.path.sep):
if not have_dir and not assume_dir:
raise OSError(errno.ENOENT, "no such directory %s" % start)
if d == os.path.pardir: # '..'
if len(start) >= len(root):
# do not follow '..' before root
start = os.path.dirname(start)
else:
# emit warning?
pass
else:
(start, have_dir) = __realpath(os.path.join(start, d),
root, loop_cnt, assume_dir)
assert(__is_path_below(start, root))
return start
def __realpath(file, root, loop_cnt, assume_dir):
while os.path.islink(file) and len(file) >= len(root):
if loop_cnt == 0:
raise OSError(errno.ELOOP, file)
loop_cnt -= 1
target = os.path.normpath(os.readlink(file))
if not os.path.isabs(target):
tdir = os.path.dirname(file)
assert(__is_path_below(tdir, root))
else:
tdir = root
file = __realpath_rel(tdir, target, root, loop_cnt, assume_dir)
try:
is_dir = os.path.isdir(file)
except:
is_dir = false
return (file, is_dir)
def realpath(file, root, use_physdir = True, loop_cnt = 100, assume_dir = False):
""" Returns the canonical path of 'file' with assuming a
toplevel 'root' directory. When 'use_physdir' is set, all
preceding path components of 'file' will be resolved first;
this flag should be set unless it is guaranteed that there is
no symlink in the path. When 'assume_dir' is not set, missing
path components will raise an ENOENT error"""
root = os.path.normpath(root)
file = os.path.normpath(file)
if not root.endswith(os.path.sep):
# letting root end with '/' makes some things easier
root = root + os.path.sep
if not __is_path_below(file, root):
raise OSError(errno.EINVAL, "file '%s' is not below root" % file)
try:
if use_physdir:
file = __realpath_rel(root, file[(len(root) - 1):], root, loop_cnt, assume_dir)
else:
file = __realpath(file, root, loop_cnt, assume_dir)[0]
except OSError as e:
if e.errno == errno.ELOOP:
# make ELOOP more readable; without catching it, there will
# be printed a backtrace with 100s of OSError exceptions
# else
raise OSError(errno.ELOOP,
"too much recursions while resolving '%s'; loop in '%s'" %
(file, e.strerror))
raise
return file
|
|
""" """
# Standard library modules.
import unittest
import logging
import pickle
# Third party modules.
# Local modules.
from pyhmsa.spec.condition.detector import \
(PulseHeightAnalyser, WindowLayer, Window,
_Detector, DetectorCamera, DetectorSpectrometer,
DetectorSpectrometerCL, DetectorSpectrometerWDS, DetectorSpectrometerXEDS)
from pyhmsa.spec.condition.calibration import CalibrationConstant
# Globals and constants variables.
from pyhmsa.spec.condition.detector import \
(PHA_MODE_DIFFERENTIAL, SIGNAL_TYPE_ELS, COLLECTION_MODE_PARALLEL,
XEDS_TECHNOLOGY_SILI)
class TestPulseHeightAnalyser(unittest.TestCase):
def setUp(self):
super().setUp()
self.pha = PulseHeightAnalyser(1750, 32, 0.5, 4.5, PHA_MODE_DIFFERENTIAL)
def tearDown(self):
unittest.TestCase.tearDown(self)
def testbias(self):
self.assertTrue(self.pha.bias)
self.assertAlmostEqual(1750, self.pha.bias, 4)
self.assertEqual('V', self.pha.bias.unit, 4)
def testgain(self):
self.assertTrue(self.pha.gain)
self.assertAlmostEqual(32, self.pha.gain, 4)
def testbase_level(self):
self.assertTrue(self.pha.base_level)
self.assertAlmostEqual(0.5, self.pha.base_level, 4)
self.assertEqual('V', self.pha.base_level.unit, 4)
def testwindow(self):
self.assertTrue(self.pha.window)
self.assertAlmostEqual(4.5, self.pha.window, 4)
self.assertEqual('V', self.pha.window.unit, 4)
def testmode(self):
self.assertEqual(PHA_MODE_DIFFERENTIAL, self.pha.mode)
self.assertRaises(ValueError, self.pha.set_mode, 'ABC')
def testpickle(self):
s = pickle.dumps(self.pha)
pha = pickle.loads(s)
self.assertAlmostEqual(1750, pha.bias, 4)
self.assertEqual('V', pha.bias.unit, 4)
self.assertAlmostEqual(32, pha.gain, 4)
self.assertAlmostEqual(0.5, pha.base_level, 4)
self.assertEqual('V', pha.base_level.unit, 4)
self.assertAlmostEqual(4.5, pha.window, 4)
self.assertEqual('V', pha.window.unit, 4)
self.assertEqual(PHA_MODE_DIFFERENTIAL, pha.mode)
class TestWindowLayer(unittest.TestCase):
def setUp(self):
super().setUp()
self.layer = WindowLayer("Al", (100.0, 'nm'))
def tearDown(self):
unittest.TestCase.tearDown(self)
def testmaterial(self):
self.assertEqual('Al', self.layer.material)
self.assertRaises(ValueError, self.layer.set_material, None)
def testthickness(self):
self.assertAlmostEqual(100.0, self.layer.thickness, 4)
self.assertEqual('nm', self.layer.thickness.unit)
self.assertRaises(ValueError, self.layer.set_thickness, None)
def testpickle(self):
s = pickle.dumps(self.layer)
layer = pickle.loads(s)
self.assertEqual('Al', layer.material)
self.assertAlmostEqual(100.0, layer.thickness, 4)
self.assertEqual('nm', layer.thickness.unit)
class TestWindow(unittest.TestCase):
def setUp(self):
super().setUp()
self.window = Window()
self.window.append_layer('Al', 0.5)
def tearDown(self):
unittest.TestCase.tearDown(self)
def testlayers(self):
self.assertEqual(1, len(self.window.layers))
self.assertEqual('Al', self.window.layers[0].material)
self.assertAlmostEqual(0.5, self.window.layers[0].thickness, 4)
self.assertEqual('um', self.window.layers[0].thickness.unit, 4)
self.window.layers.append(WindowLayer('Be', 0.3))
self.assertEqual(2, len(self.window.layers))
self.assertEqual('Be', self.window.layers[1].material)
self.assertAlmostEqual(0.3, self.window.layers[1].thickness, 4)
self.assertEqual('um', self.window.layers[1].thickness.unit, 4)
def testpickle(self):
s = pickle.dumps(self.window)
window = pickle.loads(s)
self.assertEqual(1, len(window.layers))
self.assertEqual('Al', window.layers[0].material)
self.assertAlmostEqual(0.5, window.layers[0].thickness, 4)
self.assertEqual('um', window.layers[0].thickness.unit, 4)
class Test_Detector(unittest.TestCase):
def setUp(self):
super().setUp()
self.det = _Detector()
def tearDown(self):
unittest.TestCase.tearDown(self)
def testsignal_type(self):
self.det.signal_type = SIGNAL_TYPE_ELS
self.assertEqual(SIGNAL_TYPE_ELS, self.det.signal_type)
self.assertRaises(ValueError, self.det.set_signal_type, 'ABC')
def testmanufacturer(self):
self.det.manufacturer = 'Example Inc.'
self.assertEqual('Example Inc.', self.det.manufacturer)
def testmodel(self):
self.det.model = 'Example Model 123'
self.assertEqual('Example Model 123', self.det.model)
def testserial_number(self):
self.det.serial_number = '12345-abc-67890'
self.assertEqual('12345-abc-67890', self.det.serial_number)
def testmeasurement_unit(self):
self.det.measurement_unit = None
self.assertEqual('counts', self.det.measurement_unit)
self.det.measurement_unit = 'A'
self.assertEqual('A', self.det.measurement_unit)
def testelevation(self):
self.det.elevation = 45.0
self.assertAlmostEqual(45.0, self.det.elevation, 4)
self.assertEqual('degrees', self.det.elevation.unit)
def testazimuth(self):
self.det.azimuth = 0.0
self.assertAlmostEqual(0.0, self.det.azimuth, 4)
self.assertEqual('degrees', self.det.azimuth.unit)
def testdistance(self):
self.det.distance = 50.0
self.assertAlmostEqual(50.0, self.det.distance, 4)
self.assertEqual('mm', self.det.distance.unit)
def testarea(self):
self.det.area = 20.0
self.assertAlmostEqual(20.0, self.det.area, 4)
self.assertEqual('mm2', self.det.area.unit)
def testsolid_angle(self):
self.det.solid_angle = 1.0
self.assertAlmostEqual(1.0, self.det.solid_angle, 4)
self.assertEqual('sr', self.det.solid_angle.unit)
def testsemi_angle(self):
self.det.semi_angle = 3.4
self.assertAlmostEqual(3.4, self.det.semi_angle, 4)
self.assertEqual('mrad', self.det.semi_angle.unit)
def testtemperature(self):
self.det.temperature = -20.0
self.assertAlmostEqual(-20.0, self.det.temperature, 4)
self.assertEqual('degreesC', self.det.temperature.unit)
def testpickle(self):
self.det.model = 'Example Model 123'
self.det.serial_number = '12345-abc-67890'
self.det.measurement_unit = 'A'
self.det.elevation = 45.0
self.det.azimuth = 0.0
self.det.distance = 50.0
self.det.area = 20.0
self.det.solid_angle = 1.0
self.det.semi_angle = 3.4
self.det.temperature = -20.0
s = pickle.dumps(self.det)
det = pickle.loads(s)
self.assertEqual('Example Model 123', det.model)
self.assertEqual('12345-abc-67890', det.serial_number)
self.assertEqual('A', det.measurement_unit)
self.assertEqual('A', det.measurement_unit)
self.assertAlmostEqual(45.0, det.elevation, 4)
self.assertEqual('degrees', det.elevation.unit)
self.assertAlmostEqual(0.0, det.azimuth, 4)
self.assertEqual('degrees', det.azimuth.unit)
self.assertAlmostEqual(50.0, det.distance, 4)
self.assertEqual('mm', det.distance.unit)
self.assertAlmostEqual(20.0, det.area, 4)
self.assertEqual('mm2', det.area.unit)
self.assertAlmostEqual(1.0, det.solid_angle, 4)
self.assertEqual('sr', det.solid_angle.unit)
self.assertAlmostEqual(3.4, det.semi_angle, 4)
self.assertEqual('mrad', det.semi_angle.unit)
self.assertAlmostEqual(-20.0, det.temperature, 4)
self.assertEqual('degreesC', det.temperature.unit)
class TestDetectorCamera(unittest.TestCase):
def setUp(self):
super().setUp()
self.det = DetectorCamera(512, 400)
def tearDown(self):
unittest.TestCase.tearDown(self)
def testpixel_count_u(self):
self.assertEqual(512, self.det.pixel_count_u)
self.assertRaises(ValueError, self.det.set_pixel_count_u, None)
def testpixel_count_v(self):
self.assertEqual(400, self.det.pixel_count_v)
self.assertRaises(ValueError, self.det.set_pixel_count_v, None)
def testexposure_time(self):
self.det.exposure_time = 200.0
self.assertAlmostEqual(200.0, self.det.exposure_time, 4)
self.assertEqual('ms', self.det.exposure_time.unit)
def testmagnification(self):
self.det.magnification = 4.5
self.assertAlmostEqual(4.5, self.det.magnification, 4)
def testfocal_length(self):
self.det.focal_length = 80.0
self.assertAlmostEqual(80.0, self.det.focal_length, 4)
self.assertEqual('mm', self.det.focal_length.unit)
def testpickle(self):
self.det.exposure_time = 200.0
self.det.magnification = 4.5
self.det.focal_length = 80.0
s = pickle.dumps(self.det)
det = pickle.loads(s)
self.assertEqual(512, det.pixel_count_u)
self.assertEqual(400, det.pixel_count_v)
self.assertAlmostEqual(200.0, det.exposure_time, 4)
self.assertEqual('ms', det.exposure_time.unit)
self.assertAlmostEqual(4.5, det.magnification, 4)
self.assertAlmostEqual(80.0, det.focal_length, 4)
self.assertEqual('mm', det.focal_length.unit)
class TestDetectorSpectrometer(unittest.TestCase):
def setUp(self):
super().setUp()
calibration = CalibrationConstant('Energy', 'eV', -237.098251)
self.det = DetectorSpectrometer(4096, calibration)
def tearDown(self):
unittest.TestCase.tearDown(self)
def testchannel_count(self):
self.assertEqual(4096, self.det.channel_count)
self.assertRaises(ValueError, self.det.set_channel_count, None)
def testcalibration(self):
self.assertEqual('Energy', self.det.calibration.quantity)
self.assertEqual('eV', self.det.calibration.unit)
self.assertAlmostEqual(-237.098251, self.det.calibration.value, 4)
self.assertRaises(ValueError, self.det.set_calibration, None)
def testcollection_mode(self):
self.det.collection_mode = COLLECTION_MODE_PARALLEL
self.assertEqual(COLLECTION_MODE_PARALLEL, self.det.collection_mode)
self.assertRaises(ValueError, self.det.set_collection_mode, 'ABC')
def testcalibration_energy(self):
cal = self.det.calibration_energy
self.assertEqual('Energy', cal.quantity)
self.assertEqual('eV', cal.unit)
self.assertAlmostEqual(-237.098251, cal(0), 4)
def testcalibration_wavelength(self):
cal = self.det.calibration_wavelength
self.assertEqual('Wavelength', cal.quantity)
self.assertEqual('m', cal.unit)
self.assertAlmostEqual(-5.22923e-9, cal(0), 13)
def testpickle(self):
self.det.collection_mode = COLLECTION_MODE_PARALLEL
s = pickle.dumps(self.det)
det = pickle.loads(s)
self.assertEqual(4096, det.channel_count)
self.assertEqual('Energy', det.calibration.quantity)
self.assertEqual('eV', det.calibration.unit)
self.assertAlmostEqual(-237.098251, det.calibration.value, 4)
self.assertEqual(COLLECTION_MODE_PARALLEL, det.collection_mode)
class TestDetectorSpectrometerCL(unittest.TestCase):
def setUp(self):
super().setUp()
calibration = CalibrationConstant('Energy', 'eV', -237.098251)
self.det = DetectorSpectrometerCL(4096, calibration)
def tearDown(self):
unittest.TestCase.tearDown(self)
def testgrating_d(self):
self.det.grating_d = 800.0
self.assertAlmostEqual(800.0, self.det.grating_d, 4)
self.assertEqual('mm-1', self.det.grating_d.unit)
def testpickle(self):
self.det.grating_d = 800.0
s = pickle.dumps(self.det)
det = pickle.loads(s)
self.assertAlmostEqual(800.0, det.grating_d, 4)
self.assertEqual('mm-1', det.grating_d.unit)
class TestDetectorSpectrometerWDS(unittest.TestCase):
def setUp(self):
super().setUp()
calibration = CalibrationConstant('Energy', 'eV', -237.098251)
self.det = DetectorSpectrometerWDS(4096, calibration,
crystal_2d=8.742,
rowland_circle_diameter=140)
def tearDown(self):
unittest.TestCase.tearDown(self)
def testdispersion_element(self):
self.det.dispersion_element = 'TAP'
self.assertEqual('TAP', self.det.dispersion_element)
def testcrystal_2d(self):
self.assertAlmostEqual(8.742, self.det.crystal_2d, 4)
self.assertEqual(u'\u00c5', self.det.crystal_2d.unit)
def testrowland_circle_diameter(self):
self.assertAlmostEqual(140.0, self.det.rowland_circle_diameter, 4)
self.assertEqual('mm', self.det.rowland_circle_diameter.unit)
def testpulse_height_analyser(self):
pha = PulseHeightAnalyser(1700.0, 16, 0.7, 9.3, PHA_MODE_DIFFERENTIAL)
self.det.pulse_height_analyser = pha
self.assertAlmostEqual(1700.0, self.det.pulse_height_analyser.bias, 4)
self.assertAlmostEqual(16.0, self.det.pulse_height_analyser.gain, 4)
self.assertAlmostEqual(0.7, self.det.pulse_height_analyser.base_level, 4)
self.assertAlmostEqual(9.3, self.det.pulse_height_analyser.window, 4)
self.assertEqual(PHA_MODE_DIFFERENTIAL, self.det.pulse_height_analyser.mode)
def testwindow(self):
window = Window()
window.append_layer('Al', 1.0)
self.det.window = window
self.assertEqual('Al', self.det.window.layers[0].material)
self.assertEqual(1.0, self.det.window.layers[0].thickness)
def testcalibration_position(self):
cal = self.det.calibration_position
self.assertEqual('Position', cal.quantity)
self.assertEqual('m', cal.unit)
self.assertAlmostEqual(-0.8374, cal(0), 4)
def testpickle(self):
self.det.dispersion_element = 'TAP'
self.det.crystal_2d = 8.742
self.det.rowland_circle_diameter = 140.0
pha = PulseHeightAnalyser(1700.0, 16, 0.7, 9.3, PHA_MODE_DIFFERENTIAL)
self.det.pulse_height_analyser = pha
window = Window()
window.append_layer('Al', 1.0)
self.det.window = window
s = pickle.dumps(self.det)
det = pickle.loads(s)
self.assertEqual('TAP', det.dispersion_element)
self.assertAlmostEqual(8.742, det.crystal_2d, 4)
self.assertEqual(u'\u00c5', det.crystal_2d.unit)
self.assertAlmostEqual(140.0, det.rowland_circle_diameter, 4)
self.assertEqual('mm', det.rowland_circle_diameter.unit)
self.assertAlmostEqual(1700.0, det.pulse_height_analyser.bias, 4)
self.assertAlmostEqual(16.0, det.pulse_height_analyser.gain, 4)
self.assertAlmostEqual(0.7, det.pulse_height_analyser.base_level, 4)
self.assertAlmostEqual(9.3, det.pulse_height_analyser.window, 4)
self.assertEqual(PHA_MODE_DIFFERENTIAL, det.pulse_height_analyser.mode)
self.assertEqual('Al', det.window.layers[0].material)
self.assertEqual(1.0, det.window.layers[0].thickness)
class TestDetectorSpectrometerXEDS(unittest.TestCase):
def setUp(self):
super().setUp()
calibration = CalibrationConstant('Energy', 'eV', -237.098251)
self.det = DetectorSpectrometerXEDS(4096, calibration)
def tearDown(self):
unittest.TestCase.tearDown(self)
def testtechnology(self):
self.det.technology = XEDS_TECHNOLOGY_SILI
self.assertEqual(XEDS_TECHNOLOGY_SILI, self.det.technology)
self.assertRaises(ValueError, self.det.set_technology, 'ABC')
def testnominal_throughput(self):
self.det.nominal_throughput = 180000
self.assertAlmostEqual(180000, self.det.nominal_throughput, 4)
self.assertEqual('counts', self.det.nominal_throughput.unit)
def testtime_constant(self):
self.det.time_constant = 11.1
self.assertAlmostEqual(11.1, self.det.time_constant, 4)
self.assertEqual('us', self.det.time_constant.unit)
def teststrobe_rate(self):
self.det.strobe_rate = 2000
self.assertAlmostEqual(2000, self.det.strobe_rate, 4)
self.assertEqual('Hz', self.det.strobe_rate.unit)
def testwindow(self):
window = Window()
window.append_layer('Al', 1.0)
self.det.window = window
self.assertEqual('Al', self.det.window.layers[0].material)
self.assertEqual(1.0, self.det.window.layers[0].thickness)
def testpickle(self):
self.det.technology = XEDS_TECHNOLOGY_SILI
self.det.nominal_throughput = 180000
self.det.time_constant = 11.1
self.det.strobe_rate = 2000
window = Window()
window.append_layer('Al', 1.0)
self.det.window = window
s = pickle.dumps(self.det)
det = pickle.loads(s)
self.assertEqual(XEDS_TECHNOLOGY_SILI, det.technology)
self.assertAlmostEqual(180000, det.nominal_throughput, 4)
self.assertEqual('counts', det.nominal_throughput.unit)
self.assertAlmostEqual(11.1, det.time_constant, 4)
self.assertEqual('us', det.time_constant.unit)
self.assertAlmostEqual(2000, det.strobe_rate, 4)
self.assertEqual('Hz', det.strobe_rate.unit)
self.assertEqual('Al', det.window.layers[0].material)
self.assertEqual(1.0, det.window.layers[0].thickness)
if __name__ == '__main__': # pragma: no cover
logging.getLogger().setLevel(logging.DEBUG)
unittest.main()
|
|
# Authors: Chris Holdgraf <choldgraf@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import pytest
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose, assert_equal
from mne import io, pick_types
from mne.fixes import einsum
from mne.utils import requires_version, requires_sklearn, run_tests_if_main
from mne.decoding import ReceptiveField, TimeDelayingRidge
from mne.decoding.receptive_field import (_delay_time_series, _SCORERS,
_times_to_delays, _delays_to_slice)
from mne.decoding.time_delaying_ridge import (_compute_reg_neighbors,
_compute_corrs)
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
tmin, tmax = -0.1, 0.5
event_id = dict(aud_l=1, vis_l=3)
# Loading raw data
raw = io.read_raw_fif(raw_fname, preload=True)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[:2]
n_jobs_test = (1, 'cuda')
def test_compute_reg_neighbors():
"""Test fast calculation of laplacian regularizer."""
for reg_type in (
('ridge', 'ridge'),
('ridge', 'laplacian'),
('laplacian', 'ridge'),
('laplacian', 'laplacian')):
for n_ch_x, n_delays in (
(1, 1), (1, 2), (2, 1), (1, 3), (3, 1), (1, 4), (4, 1),
(2, 2), (2, 3), (3, 2), (3, 3),
(2, 4), (4, 2), (3, 4), (4, 3), (4, 4),
(5, 4), (4, 5), (5, 5),
(20, 9), (9, 20)):
for normed in (True, False):
reg_direct = _compute_reg_neighbors(
n_ch_x, n_delays, reg_type, 'direct', normed=normed)
reg_csgraph = _compute_reg_neighbors(
n_ch_x, n_delays, reg_type, 'csgraph', normed=normed)
assert_allclose(
reg_direct, reg_csgraph, atol=1e-7,
err_msg='%s: %s' % (reg_type, (n_ch_x, n_delays)))
@requires_sklearn
def test_rank_deficiency():
"""Test signals that are rank deficient."""
# See GH#4253
from sklearn.linear_model import Ridge
N = 256
fs = 1.
tmin, tmax = -50, 100
reg = 0.1
rng = np.random.RandomState(0)
eeg = rng.randn(N, 1)
eeg *= 100
eeg = np.fft.rfft(eeg, axis=0)
eeg[N // 4:] = 0 # rank-deficient lowpass
eeg = np.fft.irfft(eeg, axis=0)
win = np.hanning(N // 8)
win /= win.mean()
y = np.apply_along_axis(np.convolve, 0, eeg, win, mode='same')
y += rng.randn(*y.shape) * 100
for est in (Ridge(reg), reg):
rf = ReceptiveField(tmin, tmax, fs, estimator=est, patterns=True)
rf.fit(eeg, y)
pred = rf.predict(eeg)
assert_equal(y.shape, pred.shape)
corr = np.corrcoef(y.ravel(), pred.ravel())[0, 1]
assert corr > 0.995
def test_time_delay():
"""Test that time-delaying w/ times and samples works properly."""
# Explicit delays + sfreq
X = np.random.RandomState(0).randn(1000, 2)
assert (X == 0).sum() == 0 # need this for later
test_tlims = [
((1, 2), 1),
((1, 1), 1),
((0, 2), 1),
((0, 1), 1),
((0, 0), 1),
((-1, 2), 1),
((-1, 1), 1),
((-1, 0), 1),
((-1, -1), 1),
((-2, 2), 1),
((-2, 1), 1),
((-2, 0), 1),
((-2, -1), 1),
((-2, -1), 1),
((0, .2), 10),
((-.1, .1), 10)]
for (tmin, tmax), isfreq in test_tlims:
# sfreq must be int/float
with pytest.raises(TypeError, match='`sfreq` must be an instance of'):
_delay_time_series(X, tmin, tmax, sfreq=[1])
# Delays must be int/float
with pytest.raises(TypeError, match='.*complex.*'):
_delay_time_series(X, np.complex(tmin), tmax, 1)
# Make sure swapaxes works
start, stop = int(round(tmin * isfreq)), int(round(tmax * isfreq)) + 1
n_delays = stop - start
X_delayed = _delay_time_series(X, tmin, tmax, isfreq)
assert_equal(X_delayed.shape, (1000, 2, n_delays))
# Make sure delay slice is correct
delays = _times_to_delays(tmin, tmax, isfreq)
assert_array_equal(delays, np.arange(start, stop))
keep = _delays_to_slice(delays)
expected = np.where((X_delayed != 0).all(-1).all(-1))[0]
got = np.arange(len(X_delayed))[keep]
assert_array_equal(got, expected)
assert X_delayed[keep].shape[-1] > 0
assert (X_delayed[keep] == 0).sum() == 0
del_zero = int(round(-tmin * isfreq))
for ii in range(-2, 3):
idx = del_zero + ii
err_msg = '[%s,%s] (%s): %s %s' % (tmin, tmax, isfreq, ii, idx)
if 0 <= idx < X_delayed.shape[-1]:
if ii == 0:
assert_array_equal(X_delayed[:, :, idx], X,
err_msg=err_msg)
elif ii < 0: # negative delay
assert_array_equal(X_delayed[:ii, :, idx], X[-ii:, :],
err_msg=err_msg)
assert_array_equal(X_delayed[ii:, :, idx], 0.)
else:
assert_array_equal(X_delayed[ii:, :, idx], X[:-ii, :],
err_msg=err_msg)
assert_array_equal(X_delayed[:ii, :, idx], 0.)
@pytest.mark.parametrize('n_jobs', n_jobs_test)
@requires_sklearn
def test_receptive_field_basic(n_jobs):
"""Test model prep and fitting."""
from sklearn.linear_model import Ridge
# Make sure estimator pulling works
mod = Ridge()
rng = np.random.RandomState(1337)
# Test the receptive field model
# Define parameters for the model and simulate inputs + weights
tmin, tmax = -10., 0
n_feats = 3
rng = np.random.RandomState(0)
X = rng.randn(10000, n_feats)
w = rng.randn(int((tmax - tmin) + 1) * n_feats)
# Delay inputs and cut off first 4 values since they'll be cut in the fit
X_del = np.concatenate(
_delay_time_series(X, tmin, tmax, 1.).transpose(2, 0, 1), axis=1)
y = np.dot(X_del, w)
# Fit the model and test values
feature_names = ['feature_%i' % ii for ii in [0, 1, 2]]
rf = ReceptiveField(tmin, tmax, 1, feature_names, estimator=mod,
patterns=True)
rf.fit(X, y)
assert_array_equal(rf.delays_, np.arange(tmin, tmax + 1))
y_pred = rf.predict(X)
assert_allclose(y[rf.valid_samples_], y_pred[rf.valid_samples_], atol=1e-2)
scores = rf.score(X, y)
assert scores > .99
assert_allclose(rf.coef_.T.ravel(), w, atol=1e-3)
# Make sure different input shapes work
rf.fit(X[:, np.newaxis:], y[:, np.newaxis])
rf.fit(X, y[:, np.newaxis])
with pytest.raises(ValueError, match='If X has 3 .* y must have 2 or 3'):
rf.fit(X[..., np.newaxis], y)
with pytest.raises(ValueError, match='X must be shape'):
rf.fit(X[:, 0], y)
with pytest.raises(ValueError, match='X and y do not have the same n_epo'):
rf.fit(X[:, np.newaxis], np.tile(y[:, np.newaxis, np.newaxis],
[1, 2, 1]))
with pytest.raises(ValueError, match='X and y do not have the same n_tim'):
rf.fit(X, y[:-2])
with pytest.raises(ValueError, match='n_features in X does not match'):
rf.fit(X[:, :1], y)
# auto-naming features
feature_names = ['feature_%s' % ii for ii in [0, 1, 2]]
rf = ReceptiveField(tmin, tmax, 1, estimator=mod,
feature_names=feature_names)
assert_equal(rf.feature_names, feature_names)
rf = ReceptiveField(tmin, tmax, 1, estimator=mod)
rf.fit(X, y)
assert_equal(rf.feature_names, None)
# Float becomes ridge
rf = ReceptiveField(tmin, tmax, 1, ['one', 'two', 'three'], estimator=0)
str(rf) # repr works before fit
rf.fit(X, y)
assert isinstance(rf.estimator_, TimeDelayingRidge)
str(rf) # repr works after fit
rf = ReceptiveField(tmin, tmax, 1, ['one'], estimator=0)
rf.fit(X[:, [0]], y)
str(rf) # repr with one feature
# Should only accept estimators or floats
with pytest.raises(ValueError, match='`estimator` must be a float or'):
ReceptiveField(tmin, tmax, 1, estimator='foo').fit(X, y)
with pytest.raises(ValueError, match='`estimator` must be a float or'):
ReceptiveField(tmin, tmax, 1, estimator=np.array([1, 2, 3])).fit(X, y)
with pytest.raises(ValueError, match='tmin .* must be at most tmax'):
ReceptiveField(5, 4, 1).fit(X, y)
# scorers
for key, val in _SCORERS.items():
rf = ReceptiveField(tmin, tmax, 1, ['one'],
estimator=0, scoring=key, patterns=True)
rf.fit(X[:, [0]], y)
y_pred = rf.predict(X[:, [0]]).T.ravel()[:, np.newaxis]
assert_allclose(val(y[:, np.newaxis], y_pred,
multioutput='raw_values'),
rf.score(X[:, [0]], y), rtol=1e-2)
with pytest.raises(ValueError, match='inputs must be shape'):
_SCORERS['corrcoef'](y.ravel(), y_pred, multioutput='raw_values')
# Need correct scorers
with pytest.raises(ValueError, match='scoring must be one of'):
ReceptiveField(tmin, tmax, 1., scoring='foo').fit(X, y)
@pytest.mark.parametrize('n_jobs', n_jobs_test)
def test_time_delaying_fast_calc(n_jobs):
"""Test time delaying and fast calculations."""
X = np.array([[1, 2, 3], [5, 7, 11]]).T
# all negative
smin, smax = 1, 2
X_del = _delay_time_series(X, smin, smax, 1.)
# (n_times, n_features, n_delays) -> (n_times, n_features * n_delays)
X_del.shape = (X.shape[0], -1)
expected = np.array([[0, 1, 2], [0, 0, 1], [0, 5, 7], [0, 0, 5]]).T
assert_allclose(X_del, expected)
Xt_X = np.dot(X_del.T, X_del)
expected = [[5, 2, 19, 10], [2, 1, 7, 5], [19, 7, 74, 35], [10, 5, 35, 25]]
assert_allclose(Xt_X, expected)
x_xt = _compute_corrs(X, np.zeros((X.shape[0], 1)), smin, smax + 1)[0]
assert_allclose(x_xt, expected)
# all positive
smin, smax = -2, -1
X_del = _delay_time_series(X, smin, smax, 1.)
X_del.shape = (X.shape[0], -1)
expected = np.array([[3, 0, 0], [2, 3, 0], [11, 0, 0], [7, 11, 0]]).T
assert_allclose(X_del, expected)
Xt_X = np.dot(X_del.T, X_del)
expected = [[9, 6, 33, 21], [6, 13, 22, 47],
[33, 22, 121, 77], [21, 47, 77, 170]]
assert_allclose(Xt_X, expected)
x_xt = _compute_corrs(X, np.zeros((X.shape[0], 1)), smin, smax + 1)[0]
assert_allclose(x_xt, expected)
# both sides
smin, smax = -1, 1
X_del = _delay_time_series(X, smin, smax, 1.)
X_del.shape = (X.shape[0], -1)
expected = np.array([[2, 3, 0], [1, 2, 3], [0, 1, 2],
[7, 11, 0], [5, 7, 11], [0, 5, 7]]).T
assert_allclose(X_del, expected)
Xt_X = np.dot(X_del.T, X_del)
expected = [[13, 8, 3, 47, 31, 15],
[8, 14, 8, 29, 52, 31],
[3, 8, 5, 11, 29, 19],
[47, 29, 11, 170, 112, 55],
[31, 52, 29, 112, 195, 112],
[15, 31, 19, 55, 112, 74]]
assert_allclose(Xt_X, expected)
x_xt = _compute_corrs(X, np.zeros((X.shape[0], 1)), smin, smax + 1)[0]
assert_allclose(x_xt, expected)
# slightly harder to get the non-Toeplitz correction correct
X = np.array([[1, 2, 3, 5]]).T
smin, smax = 0, 3
X_del = _delay_time_series(X, smin, smax, 1.)
X_del.shape = (X.shape[0], -1)
expected = np.array([[1, 2, 3, 5], [0, 1, 2, 3],
[0, 0, 1, 2], [0, 0, 0, 1]]).T
assert_allclose(X_del, expected)
Xt_X = np.dot(X_del.T, X_del)
expected = [[39, 23, 13, 5], [23, 14, 8, 3], [13, 8, 5, 2], [5, 3, 2, 1]]
assert_allclose(Xt_X, expected)
x_xt = _compute_corrs(X, np.zeros((X.shape[0], 1)), smin, smax + 1)[0]
assert_allclose(x_xt, expected)
# even worse
X = np.array([[1, 2, 3], [5, 7, 11]]).T
smin, smax = 0, 2
X_del = _delay_time_series(X, smin, smax, 1.)
X_del.shape = (X.shape[0], -1)
expected = np.array([[1, 2, 3], [0, 1, 2], [0, 0, 1],
[5, 7, 11], [0, 5, 7], [0, 0, 5]]).T
assert_allclose(X_del, expected)
Xt_X = np.dot(X_del.T, X_del)
expected = np.array([[14, 8, 3, 52, 31, 15],
[8, 5, 2, 29, 19, 10],
[3, 2, 1, 11, 7, 5],
[52, 29, 11, 195, 112, 55],
[31, 19, 7, 112, 74, 35],
[15, 10, 5, 55, 35, 25]])
assert_allclose(Xt_X, expected)
x_xt = _compute_corrs(X, np.zeros((X.shape[0], 1)), smin, smax + 1)[0]
assert_allclose(x_xt, expected)
# And a bunch of random ones for good measure
rng = np.random.RandomState(0)
X = rng.randn(25, 3)
y = np.empty((25, 2))
vals = (0, -1, 1, -2, 2, -11, 11)
for smax in vals:
for smin in vals:
if smin > smax:
continue
for ii in range(X.shape[1]):
kernel = rng.randn(smax - smin + 1)
kernel -= np.mean(kernel)
y[:, ii % y.shape[-1]] = np.convolve(X[:, ii], kernel, 'same')
x_xt, x_yt, n_ch_x, _, _ = _compute_corrs(X, y, smin, smax + 1)
X_del = _delay_time_series(X, smin, smax, 1., fill_mean=False)
x_yt_true = einsum('tfd,to->ofd', X_del, y)
x_yt_true = np.reshape(x_yt_true, (x_yt_true.shape[0], -1)).T
assert_allclose(x_yt, x_yt_true, atol=1e-7, err_msg=(smin, smax))
X_del.shape = (X.shape[0], -1)
x_xt_true = np.dot(X_del.T, X_del).T
assert_allclose(x_xt, x_xt_true, atol=1e-7, err_msg=(smin, smax))
@pytest.mark.parametrize('n_jobs', n_jobs_test)
@requires_sklearn
def test_receptive_field_1d(n_jobs):
"""Test that the fast solving works like Ridge."""
from sklearn.linear_model import Ridge
rng = np.random.RandomState(0)
x = rng.randn(500, 1)
for delay in range(-2, 3):
y = np.zeros(500)
slims = [(-2, 4)]
if delay == 0:
y[:] = x[:, 0]
elif delay < 0:
y[:delay] = x[-delay:, 0]
slims += [(-4, -1)]
else:
y[delay:] = x[:-delay, 0]
slims += [(1, 2)]
for ndim in (1, 2):
y.shape = (y.shape[0],) + (1,) * (ndim - 1)
for slim in slims:
smin, smax = slim
lap = TimeDelayingRidge(smin, smax, 1., 0.1, 'laplacian',
fit_intercept=False, n_jobs=n_jobs)
for estimator in (Ridge(alpha=0.), Ridge(alpha=0.1), 0., 0.1,
lap):
for offset in (-100, 0, 100):
model = ReceptiveField(smin, smax, 1.,
estimator=estimator,
n_jobs=n_jobs)
use_x = x + offset
model.fit(use_x, y)
if estimator is lap:
continue # these checks are too stringent
assert_allclose(model.estimator_.intercept_, -offset,
atol=1e-1)
assert_array_equal(model.delays_,
np.arange(smin, smax + 1))
expected = (model.delays_ == delay).astype(float)
expected = expected[np.newaxis] # features
if y.ndim == 2:
expected = expected[np.newaxis] # outputs
assert_equal(model.coef_.ndim, ndim + 1)
assert_allclose(model.coef_, expected, atol=1e-3)
start = model.valid_samples_.start or 0
stop = len(use_x) - (model.valid_samples_.stop or 0)
assert stop - start >= 495
assert_allclose(
model.predict(use_x)[model.valid_samples_],
y[model.valid_samples_], atol=1e-2)
score = np.mean(model.score(use_x, y))
assert score > 0.9999
@pytest.mark.parametrize('n_jobs', n_jobs_test)
@requires_sklearn
def test_receptive_field_nd(n_jobs):
"""Test multidimensional support."""
from sklearn.linear_model import Ridge
# multidimensional
rng = np.random.RandomState(3)
x = rng.randn(1000, 3)
y = np.zeros((1000, 2))
smin, smax = 0, 5
# This is a weird assignment, but it's just a way to distribute some
# unique values at various delays, and "expected" explains how they
# should appear in the resulting RF
for ii in range(1, 5):
y[ii:, ii % 2] += (-1) ** ii * ii * x[:-ii, ii % 3]
y -= np.mean(y, axis=0)
x -= np.mean(x, axis=0)
x_off = x + 1e3
expected = [
[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 4, 0],
[0, 0, 2, 0, 0, 0]],
[[0, 0, 0, -3, 0, 0],
[0, -1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
]
tdr_l = TimeDelayingRidge(smin, smax, 1., 0.1, 'laplacian', n_jobs=n_jobs)
tdr_nc = TimeDelayingRidge(smin, smax, 1., 0.1, n_jobs=n_jobs,
edge_correction=False)
for estimator, atol in zip((Ridge(alpha=0.), 0., 0.01, tdr_l, tdr_nc),
(1e-3, 1e-3, 1e-3, 5e-3, 5e-2)):
model = ReceptiveField(smin, smax, 1.,
estimator=estimator)
model.fit(x, y)
assert_array_equal(model.delays_,
np.arange(smin, smax + 1))
assert_allclose(model.coef_, expected, atol=atol)
tdr = TimeDelayingRidge(smin, smax, 1., 0.01, reg_type='foo',
n_jobs=n_jobs)
model = ReceptiveField(smin, smax, 1., estimator=tdr)
with pytest.raises(ValueError, match='reg_type entries must be one of'):
model.fit(x, y)
tdr = TimeDelayingRidge(smin, smax, 1., 0.01, reg_type=['laplacian'],
n_jobs=n_jobs)
model = ReceptiveField(smin, smax, 1., estimator=tdr)
with pytest.raises(ValueError, match='reg_type must have two elements'):
model.fit(x, y)
model = ReceptiveField(smin, smax, 1, estimator=tdr, fit_intercept=False)
with pytest.raises(ValueError, match='fit_intercept'):
model.fit(x, y)
# Now check the intercept_
tdr = TimeDelayingRidge(smin, smax, 1., 0., n_jobs=n_jobs)
tdr_no = TimeDelayingRidge(smin, smax, 1., 0., fit_intercept=False,
n_jobs=n_jobs)
for estimator in (Ridge(alpha=0.), tdr,
Ridge(alpha=0., fit_intercept=False), tdr_no):
# first with no intercept in the data
model = ReceptiveField(smin, smax, 1., estimator=estimator)
model.fit(x, y)
assert_allclose(model.estimator_.intercept_, 0., atol=1e-7,
err_msg=repr(estimator))
assert_allclose(model.coef_, expected, atol=1e-3,
err_msg=repr(estimator))
y_pred = model.predict(x)
assert_allclose(y_pred[model.valid_samples_],
y[model.valid_samples_],
atol=1e-2, err_msg=repr(estimator))
score = np.mean(model.score(x, y))
assert score > 0.9999
# now with an intercept in the data
model.fit(x_off, y)
if estimator.fit_intercept:
val = [-6000, 4000]
itol = 0.5
ctol = 5e-4
else:
val = itol = 0.
ctol = 2.
assert_allclose(model.estimator_.intercept_, val, atol=itol,
err_msg=repr(estimator))
assert_allclose(model.coef_, expected, atol=ctol, rtol=ctol,
err_msg=repr(estimator))
if estimator.fit_intercept:
ptol = 1e-2
stol = 0.999999
else:
ptol = 10
stol = 0.6
y_pred = model.predict(x_off)[model.valid_samples_]
assert_allclose(y_pred, y[model.valid_samples_],
atol=ptol, err_msg=repr(estimator))
score = np.mean(model.score(x_off, y))
assert score > stol, estimator
model = ReceptiveField(smin, smax, 1., fit_intercept=False)
model.fit(x_off, y)
assert_allclose(model.estimator_.intercept_, 0., atol=1e-7)
score = np.mean(model.score(x_off, y))
assert score > 0.6
def _make_data(n_feats, n_targets, n_samples, tmin, tmax):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_feats)
w = rng.randn(int((tmax - tmin) + 1) * n_feats, n_targets)
# Delay inputs
X_del = np.concatenate(
_delay_time_series(X, tmin, tmax, 1.).transpose(2, 0, 1), axis=1)
y = np.dot(X_del, w)
return X, y
@requires_sklearn
def test_inverse_coef():
"""Test inverse coefficients computation."""
from sklearn.linear_model import Ridge
tmin, tmax = 0., 10.
n_feats, n_targets, n_samples = 3, 2, 1000
n_delays = int((tmax - tmin) + 1)
# Check coefficient dims, for all estimator types
X, y = _make_data(n_feats, n_targets, n_samples, tmin, tmax)
tdr = TimeDelayingRidge(tmin, tmax, 1., 0.1, 'laplacian')
for estimator in (0., 0.01, Ridge(alpha=0.), tdr):
rf = ReceptiveField(tmin, tmax, 1., estimator=estimator,
patterns=True)
rf.fit(X, y)
inv_rf = ReceptiveField(tmin, tmax, 1., estimator=estimator,
patterns=True)
inv_rf.fit(y, X)
assert_array_equal(rf.coef_.shape, rf.patterns_.shape,
(n_targets, n_feats, n_delays))
assert_array_equal(inv_rf.coef_.shape, inv_rf.patterns_.shape,
(n_feats, n_targets, n_delays))
# we should have np.dot(patterns.T,coef) ~ np.eye(n)
c0 = rf.coef_.reshape(n_targets, n_feats * n_delays)
c1 = rf.patterns_.reshape(n_targets, n_feats * n_delays)
assert_allclose(np.dot(c0, c1.T), np.eye(c0.shape[0]), atol=0.2)
@requires_sklearn
@requires_version('scipy', '1.0')
def test_linalg_warning():
"""Test that warnings are issued when no regularization is applied."""
from sklearn.linear_model import Ridge
n_feats, n_targets, n_samples = 5, 60, 50
X, y = _make_data(n_feats, n_targets, n_samples, tmin, tmax)
for estimator in (0., Ridge(alpha=0.)):
rf = ReceptiveField(tmin, tmax, 1., estimator=estimator)
with pytest.warns((RuntimeWarning, UserWarning),
match='[Singular|scipy.linalg.solve]'):
rf.fit(y, X)
run_tests_if_main()
|
|
from collections import namedtuple
import pytest
from carto.datasets import DatasetManager
from carto.sql import SQLClient, BatchSQLClient, CopySQLClient
from carto.exceptions import CartoRateLimitException
from pandas import DataFrame
from geopandas import GeoDataFrame
from cartoframes.auth import Credentials
from cartoframes.io.managers.context_manager import ContextManager, DEFAULT_RETRY_TIMES, retry_copy
from cartoframes.utils.columns import ColumnInfo
class TestContextManager(object):
def setup_method(self):
self.credentials = Credentials('fake_user', 'fake_api')
def test_execute_query(self, mocker):
# Given
mocker.patch('cartoframes.io.managers.context_manager._create_auth_client')
mock = mocker.patch.object(SQLClient, 'send')
# When
cm = ContextManager(self.credentials)
cm.execute_query('query')
# Then
mock.assert_called_once_with('query', True, True, None)
def test_execute_long_running_query(self, mocker):
# Given
mocker.patch('cartoframes.io.managers.context_manager._create_auth_client')
mock = mocker.patch.object(BatchSQLClient, 'create_and_wait_for_completion')
# When
cm = ContextManager(self.credentials)
cm.execute_long_running_query('query')
# Then
mock.assert_called_once_with('query')
def test_copy_to(self, mocker):
# Given
query = '__query__'
columns = [ColumnInfo('A', 'a', 'bigint', False)]
mocker.patch.object(ContextManager, 'compute_query', return_value=query)
mocker.patch.object(ContextManager, '_get_query_columns_info', return_value=columns)
mock = mocker.patch.object(ContextManager, '_copy_to')
# When
cm = ContextManager(self.credentials)
cm.copy_to(query)
# Then
mock.assert_called_once_with('SELECT "A" FROM (__query__) _q', columns, 3)
def test_copy_from(self, mocker):
# Given
mocker.patch('cartoframes.io.managers.context_manager._create_auth_client')
mocker.patch.object(ContextManager, 'has_table', return_value=False)
mocker.patch.object(ContextManager, 'get_schema', return_value='schema')
mock_create_table = mocker.patch.object(ContextManager, 'execute_query')
mock = mocker.patch.object(ContextManager, '_copy_from')
df = DataFrame({'A': [1]})
columns = [ColumnInfo('A', 'a', 'bigint', False)]
# When
cm = ContextManager(self.credentials)
cm.copy_from(df, 'TABLE NAME')
# Then
mock_create_table.assert_called_once_with('''
BEGIN; CREATE TABLE table_name ("a" bigint); COMMIT;
'''.strip())
mock.assert_called_once_with(df, 'table_name', columns, DEFAULT_RETRY_TIMES)
def test_copy_from_exists_fail(self, mocker):
# Given
mocker.patch('cartoframes.io.managers.context_manager._create_auth_client')
mocker.patch.object(ContextManager, 'has_table', return_value=True)
mocker.patch.object(ContextManager, 'get_schema', return_value='schema')
df = DataFrame({'A': [1]})
# When
with pytest.raises(Exception) as e:
cm = ContextManager(self.credentials)
cm.copy_from(df, 'TABLE NAME', 'fail')
# Then
assert str(e.value) == ('Table "schema.table_name" already exists in your CARTO account. '
'Please choose a different `table_name` or use '
'if_exists="replace" to overwrite it.')
def test_copy_from_exists_replace_truncate_and_drop_add_columns(self, mocker):
# Given
mocker.patch('cartoframes.io.managers.context_manager._create_auth_client')
mocker.patch.object(ContextManager, 'has_table', return_value=True)
mocker.patch.object(ContextManager, 'get_schema', return_value='schema')
mock = mocker.patch.object(ContextManager, '_truncate_and_drop_add_columns')
df = DataFrame({'A': [1]})
columns = [ColumnInfo('A', 'a', 'bigint', False)]
# When
cm = ContextManager(self.credentials)
cm.copy_from(df, 'TABLE NAME', 'replace')
# Then
mock.assert_called_once_with('table_name', 'schema', columns, [])
def test_copy_from_exists_replace_truncate(self, mocker):
# Given
mocker.patch('cartoframes.io.managers.context_manager._create_auth_client')
mocker.patch.object(ContextManager, 'has_table', return_value=True)
mocker.patch.object(ContextManager, 'get_schema', return_value='schema')
mocker.patch.object(ContextManager, '_compare_columns', return_value=True)
mock = mocker.patch.object(ContextManager, '_truncate_table')
df = DataFrame({'A': [1]})
# When
cm = ContextManager(self.credentials)
cm.copy_from(df, 'TABLE NAME', 'replace')
# Then
mock.assert_called_once_with('table_name', 'schema')
def test_internal_copy_from(self, mocker):
# Given
from shapely.geometry import Point
mocker.patch('cartoframes.io.managers.context_manager._create_auth_client')
mock = mocker.patch.object(CopySQLClient, 'copyfrom')
gdf = GeoDataFrame({'A': [1, 2], 'B': [Point(0, 0), Point(1, 1)]})
columns = [
ColumnInfo('A', 'a', 'bigint', False),
ColumnInfo('B', 'b', 'geometry', True)
]
# When
cm = ContextManager(self.credentials)
cm._copy_from(gdf, 'table_name', columns)
# Then
assert mock.call_args[0][0] == '''
COPY table_name("a","b") FROM stdin WITH (FORMAT csv, DELIMITER '|', NULL '__null');
'''.strip()
assert list(mock.call_args[0][1]) == [
b'1|0101000020E610000000000000000000000000000000000000\n',
b'2|0101000020E6100000000000000000F03F000000000000F03F\n'
]
def test_rename_table(self, mocker):
# Given
def has_table(table_name):
if table_name == 'table_name':
return True
elif table_name == 'new_table_name':
return False
mocker.patch('cartoframes.io.managers.context_manager._create_auth_client')
mocker.patch.object(ContextManager, 'has_table', side_effect=has_table)
mock = mocker.patch.object(ContextManager, '_rename_table')
# When
cm = ContextManager(self.credentials)
result = cm.rename_table('table_name', 'NEW TABLE NAME')
# Then
mock.assert_called_once_with('table_name', 'new_table_name')
assert result == 'new_table_name'
def test_rename_table_equal(self, mocker):
# When
with pytest.raises(Exception) as e:
cm = ContextManager(self.credentials)
cm.rename_table('table_name', 'TABLE NAME')
# Then
assert str(e.value) == ('Table names are equal. Please choose a different table name.')
def test_rename_table_orig_not_exist(self, mocker):
# Given
def has_table(table_name):
if table_name == 'table_name':
return False
mocker.patch('cartoframes.io.managers.context_manager._create_auth_client')
mocker.patch.object(ContextManager, 'has_table', side_effect=has_table)
# When
with pytest.raises(Exception) as e:
cm = ContextManager(self.credentials)
cm.rename_table('table_name', 'NEW TABLE NAME')
# Then
assert str(e.value) == ('Table "table_name" does not exist in your CARTO account.')
def test_rename_table_dest_exists_fail(self, mocker):
# Given
def has_table(table_name):
if table_name == 'table_name':
return True
elif table_name == 'new_table_name':
return True
mocker.patch('cartoframes.io.managers.context_manager._create_auth_client')
mocker.patch.object(ContextManager, 'has_table', side_effect=has_table)
# When
with pytest.raises(Exception) as e:
cm = ContextManager(self.credentials)
cm.rename_table('table_name', 'NEW TABLE NAME', 'fail')
# Then
assert str(e.value) == ('Table "new_table_name" already exists in your CARTO account. '
'Please choose a different `new_table_name` or use '
'if_exists="replace" to overwrite it.')
def test_rename_table_dest_exists_replace(self, mocker):
# Given
def has_table(table_name):
if table_name == 'table_name':
return True
elif table_name == 'new_table_name':
return True
mocker.patch('cartoframes.io.managers.context_manager._create_auth_client')
mocker.patch.object(ContextManager, 'has_table', side_effect=has_table)
mock = mocker.patch.object(ContextManager, '_rename_table')
# When
cm = ContextManager(self.credentials)
result = cm.rename_table('table_name', 'NEW TABLE NAME', 'replace')
# Then
mock.assert_called_once_with('table_name', 'new_table_name')
assert result == 'new_table_name'
def test_list_tables(self, mocker):
# Given
Dataset = namedtuple('Dataset', ['name', 'updated_at'])
mocker.patch('cartoframes.io.managers.context_manager._create_auth_client')
mocker.patch.object(DatasetManager, 'filter', return_value=[
Dataset('table_zero', 1), Dataset('table_one', 0)
])
# When
cm = ContextManager(self.credentials)
tables = cm.list_tables()
# Then
assert DataFrame(['table_zero', 'table_one'], columns=['tables']).equals(tables)
def test_list_tables_empty(self, mocker):
# Given
mocker.patch('cartoframes.io.managers.context_manager._create_auth_client')
mocker.patch.object(DatasetManager, 'filter', return_value=[])
# When
cm = ContextManager(self.credentials)
tables = cm.list_tables()
# Then
assert DataFrame(columns=['tables']).equals(tables)
def test_retry_copy_decorator(self):
@retry_copy
def test_function(retry_times):
class ResponseMock:
def __init__(self):
self.text = 'My text'
self.headers = {
'Carto-Rate-Limit-Limit': 1,
'Carto-Rate-Limit-Remaining': 1,
'Retry-After': 1,
'Carto-Rate-Limit-Reset': 1
}
response_mock = ResponseMock()
raise CartoRateLimitException(response_mock)
with pytest.raises(CartoRateLimitException):
test_function(retry_times=0)
def test_create_table_from_query_cartodbfy(self, mocker):
# Given
mocker.patch.object(ContextManager, 'has_table', return_value=False)
mocker.patch.object(ContextManager, 'get_schema', return_value='schema')
mock = mocker.patch.object(ContextManager, 'execute_long_running_query')
# When
cm = ContextManager(self.credentials)
cm.create_table_from_query('SELECT * FROM table_name', '__new_table_name__', if_exists='fail', cartodbfy=True)
# Then
mock.assert_called_with("SELECT CDB_CartodbfyTable('schema', '__new_table_name__')")
def test_create_table_from_query_cartodbfy_default(self, mocker):
# Given
mocker.patch.object(ContextManager, 'has_table', return_value=False)
mocker.patch.object(ContextManager, 'get_schema', return_value='schema')
mock = mocker.patch.object(ContextManager, 'execute_long_running_query')
# When
cm = ContextManager(self.credentials)
cm.create_table_from_query('SELECT * FROM table_name', '__new_table_name__', if_exists='fail')
# Then
mock.assert_called_with("SELECT CDB_CartodbfyTable('schema', '__new_table_name__')")
|
|
"""
Provides functionality to group entities.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/group/
"""
import asyncio
import logging
import os
import voluptuous as vol
from homeassistant import config as conf_util, core as ha
from homeassistant.const import (
ATTR_ENTITY_ID, CONF_ICON, CONF_NAME, STATE_CLOSED, STATE_HOME,
STATE_NOT_HOME, STATE_OFF, STATE_ON, STATE_OPEN, STATE_LOCKED,
STATE_UNLOCKED, STATE_UNKNOWN, ATTR_ASSUMED_STATE, SERVICE_RELOAD)
from homeassistant.core import callback
from homeassistant.helpers.entity import Entity, async_generate_entity_id
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.event import async_track_state_change
import homeassistant.helpers.config_validation as cv
from homeassistant.util.async import run_coroutine_threadsafe
DOMAIN = 'group'
ENTITY_ID_FORMAT = DOMAIN + '.{}'
CONF_ENTITIES = 'entities'
CONF_VIEW = 'view'
CONF_CONTROL = 'control'
ATTR_AUTO = 'auto'
ATTR_ORDER = 'order'
ATTR_VIEW = 'view'
ATTR_VISIBLE = 'visible'
ATTR_CONTROL = 'control'
SERVICE_SET_VISIBILITY = 'set_visibility'
SET_VISIBILITY_SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_VISIBLE): cv.boolean
})
RELOAD_SERVICE_SCHEMA = vol.Schema({})
_LOGGER = logging.getLogger(__name__)
def _conf_preprocess(value):
"""Preprocess alternative configuration formats."""
if not isinstance(value, dict):
value = {CONF_ENTITIES: value}
return value
GROUP_SCHEMA = vol.Schema({
vol.Optional(CONF_ENTITIES): vol.Any(cv.entity_ids, None),
CONF_VIEW: cv.boolean,
CONF_NAME: cv.string,
CONF_ICON: cv.icon,
CONF_CONTROL: cv.string,
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: cv.ordered_dict(vol.All(_conf_preprocess, GROUP_SCHEMA))
}, extra=vol.ALLOW_EXTRA)
# List of ON/OFF state tuples for groupable states
_GROUP_TYPES = [(STATE_ON, STATE_OFF), (STATE_HOME, STATE_NOT_HOME),
(STATE_OPEN, STATE_CLOSED), (STATE_LOCKED, STATE_UNLOCKED)]
def _get_group_on_off(state):
"""Determine the group on/off states based on a state."""
for states in _GROUP_TYPES:
if state in states:
return states
return None, None
def is_on(hass, entity_id):
"""Test if the group state is in its ON-state."""
state = hass.states.get(entity_id)
if state:
group_on, _ = _get_group_on_off(state.state)
# If we found a group_type, compare to ON-state
return group_on is not None and state.state == group_on
return False
def reload(hass):
"""Reload the automation from config."""
hass.add_job(async_reload, hass)
@asyncio.coroutine
def async_reload(hass):
"""Reload the automation from config."""
yield from hass.services.async_call(DOMAIN, SERVICE_RELOAD)
def set_visibility(hass, entity_id=None, visible=True):
"""Hide or shows a group."""
data = {ATTR_ENTITY_ID: entity_id, ATTR_VISIBLE: visible}
hass.services.call(DOMAIN, SERVICE_SET_VISIBILITY, data)
def expand_entity_ids(hass, entity_ids):
"""Return entity_ids with group entity ids replaced by their members.
Async friendly.
"""
found_ids = []
for entity_id in entity_ids:
if not isinstance(entity_id, str):
continue
entity_id = entity_id.lower()
try:
# If entity_id points at a group, expand it
domain, _ = ha.split_entity_id(entity_id)
if domain == DOMAIN:
found_ids.extend(
ent_id for ent_id
in expand_entity_ids(hass, get_entity_ids(hass, entity_id))
if ent_id not in found_ids)
else:
if entity_id not in found_ids:
found_ids.append(entity_id)
except AttributeError:
# Raised by split_entity_id if entity_id is not a string
pass
return found_ids
def get_entity_ids(hass, entity_id, domain_filter=None):
"""Get members of this group.
Async friendly.
"""
group = hass.states.get(entity_id)
if not group or ATTR_ENTITY_ID not in group.attributes:
return []
entity_ids = group.attributes[ATTR_ENTITY_ID]
if not domain_filter:
return entity_ids
domain_filter = domain_filter.lower() + '.'
return [ent_id for ent_id in entity_ids
if ent_id.startswith(domain_filter)]
@asyncio.coroutine
def async_setup(hass, config):
"""Setup all groups found definded in the configuration."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
yield from _async_process_config(hass, config, component)
descriptions = yield from hass.loop.run_in_executor(
None, conf_util.load_yaml_config_file, os.path.join(
os.path.dirname(__file__), 'services.yaml')
)
@asyncio.coroutine
def reload_service_handler(service_call):
"""Remove all groups and load new ones from config."""
conf = yield from component.async_prepare_reload()
if conf is None:
return
yield from _async_process_config(hass, conf, component)
@asyncio.coroutine
def visibility_service_handler(service):
"""Change visibility of a group."""
visible = service.data.get(ATTR_VISIBLE)
tasks = [group.async_set_visible(visible) for group
in component.async_extract_from_service(service,
expand_group=False)]
yield from asyncio.wait(tasks, loop=hass.loop)
hass.services.async_register(
DOMAIN, SERVICE_SET_VISIBILITY, visibility_service_handler,
descriptions[DOMAIN][SERVICE_SET_VISIBILITY],
schema=SET_VISIBILITY_SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_RELOAD, reload_service_handler,
descriptions[DOMAIN][SERVICE_RELOAD], schema=RELOAD_SERVICE_SCHEMA)
return True
@asyncio.coroutine
def _async_process_config(hass, config, component):
"""Process group configuration."""
groups = []
for object_id, conf in config.get(DOMAIN, {}).items():
name = conf.get(CONF_NAME, object_id)
entity_ids = conf.get(CONF_ENTITIES) or []
icon = conf.get(CONF_ICON)
view = conf.get(CONF_VIEW)
control = conf.get(CONF_CONTROL)
# Don't create tasks and await them all. The order is important as
# groups get a number based on creation order.
group = yield from Group.async_create_group(
hass, name, entity_ids, icon=icon, view=view,
control=control, object_id=object_id)
groups.append(group)
if groups:
yield from component.async_add_entities(groups)
class Group(Entity):
"""Track a group of entity ids."""
def __init__(self, hass, name, order=None, user_defined=True, icon=None,
view=False, control=None):
"""Initialize a group.
This Object has factory function for creation.
"""
self.hass = hass
self._name = name
self._state = STATE_UNKNOWN
self._user_defined = user_defined
self._order = order
self._icon = icon
self._view = view
self.tracking = []
self.group_on = None
self.group_off = None
self._assumed_state = False
self._async_unsub_state_changed = None
self._visible = True
self._control = control
@staticmethod
def create_group(hass, name, entity_ids=None, user_defined=True,
icon=None, view=False, control=None, object_id=None):
"""Initialize a group."""
return run_coroutine_threadsafe(
Group.async_create_group(hass, name, entity_ids, user_defined,
icon, view, control, object_id),
hass.loop).result()
@staticmethod
@asyncio.coroutine
def async_create_group(hass, name, entity_ids=None, user_defined=True,
icon=None, view=False, control=None,
object_id=None):
"""Initialize a group.
This method must be run in the event loop.
"""
group = Group(
hass, name,
order=len(hass.states.async_entity_ids(DOMAIN)),
user_defined=user_defined, icon=icon, view=view,
control=control)
group.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, object_id or name, hass=hass)
# run other async stuff
if entity_ids is not None:
yield from group.async_update_tracked_entity_ids(entity_ids)
else:
yield from group.async_update_ha_state(True)
return group
@property
def should_poll(self):
"""No need to poll because groups will update themselves."""
return False
@property
def name(self):
"""Return the name of the group."""
return self._name
@property
def state(self):
"""Return the state of the group."""
return self._state
@property
def icon(self):
"""Return the icon of the group."""
return self._icon
@asyncio.coroutine
def async_set_visible(self, visible):
"""Change visibility of the group."""
if self._visible != visible:
self._visible = visible
yield from self.async_update_ha_state()
@property
def hidden(self):
"""If group should be hidden or not."""
# Visibility from set_visibility service overrides
if self._visible:
return not self._user_defined or self._view
return True
@property
def state_attributes(self):
"""Return the state attributes for the group."""
data = {
ATTR_ENTITY_ID: self.tracking,
ATTR_ORDER: self._order,
}
if not self._user_defined:
data[ATTR_AUTO] = True
if self._view:
data[ATTR_VIEW] = True
if self._control:
data[ATTR_CONTROL] = self._control
return data
@property
def assumed_state(self):
"""Test if any member has an assumed state."""
return self._assumed_state
def update_tracked_entity_ids(self, entity_ids):
"""Update the member entity IDs."""
run_coroutine_threadsafe(
self.async_update_tracked_entity_ids(entity_ids), self.hass.loop
).result()
@asyncio.coroutine
def async_update_tracked_entity_ids(self, entity_ids):
"""Update the member entity IDs.
This method must be run in the event loop.
"""
yield from self.async_stop()
self.tracking = tuple(ent_id.lower() for ent_id in entity_ids)
self.group_on, self.group_off = None, None
yield from self.async_update_ha_state(True)
self.async_start()
def start(self):
"""Start tracking members."""
self.hass.add_job(self.async_start)
@callback
def async_start(self):
"""Start tracking members.
This method must be run in the event loop.
"""
if self._async_unsub_state_changed is None:
self._async_unsub_state_changed = async_track_state_change(
self.hass, self.tracking, self._async_state_changed_listener
)
def stop(self):
"""Unregister the group from Home Assistant."""
run_coroutine_threadsafe(self.async_stop(), self.hass.loop).result()
@asyncio.coroutine
def async_stop(self):
"""Unregister the group from Home Assistant.
This method must be run in the event loop.
"""
yield from self.async_remove()
@asyncio.coroutine
def async_update(self):
"""Query all members and determine current group state."""
self._state = STATE_UNKNOWN
self._async_update_group_state()
def async_remove(self):
"""Remove group from HASS.
This method must be run in the event loop and returns a coroutine.
"""
if self._async_unsub_state_changed:
self._async_unsub_state_changed()
self._async_unsub_state_changed = None
return super().async_remove()
@asyncio.coroutine
def _async_state_changed_listener(self, entity_id, old_state, new_state):
"""Respond to a member state changing.
This method must be run in the event loop.
"""
# removed
if self._async_unsub_state_changed is None:
return
self._async_update_group_state(new_state)
yield from self.async_update_ha_state()
@property
def _tracking_states(self):
"""The states that the group is tracking."""
states = []
for entity_id in self.tracking:
state = self.hass.states.get(entity_id)
if state is not None:
states.append(state)
return states
@callback
def _async_update_group_state(self, tr_state=None):
"""Update group state.
Optionally you can provide the only state changed since last update
allowing this method to take shortcuts.
This method must be run in the event loop.
"""
# To store current states of group entities. Might not be needed.
states = None
gr_state = self._state
gr_on = self.group_on
gr_off = self.group_off
# We have not determined type of group yet
if gr_on is None:
if tr_state is None:
states = self._tracking_states
for state in states:
gr_on, gr_off = \
_get_group_on_off(state.state)
if gr_on is not None:
break
else:
gr_on, gr_off = _get_group_on_off(tr_state.state)
if gr_on is not None:
self.group_on, self.group_off = gr_on, gr_off
# We cannot determine state of the group
if gr_on is None:
return
if tr_state is None or ((gr_state == gr_on and
tr_state.state == gr_off) or
tr_state.state not in (gr_on, gr_off)):
if states is None:
states = self._tracking_states
if any(state.state == gr_on for state in states):
self._state = gr_on
else:
self._state = gr_off
elif tr_state.state in (gr_on, gr_off):
self._state = tr_state.state
if tr_state is None or self._assumed_state and \
not tr_state.attributes.get(ATTR_ASSUMED_STATE):
if states is None:
states = self._tracking_states
self._assumed_state = any(
state.attributes.get(ATTR_ASSUMED_STATE) for state
in states)
elif tr_state.attributes.get(ATTR_ASSUMED_STATE):
self._assumed_state = True
|
|
"""
.. iterations.py
This module implements a number of :term:`iterator` building blocks,
inspired by Python's built-in module `itertools`_.
.. _itertools: http://docs.python.org/2/library/itertools.html
"""
## Framework
import itertools as it
import operator as op
## Infinity
inf = float('inf')
###################################
## ----- General functions ----- ##
###################################
def accumulate(iterable, func=op.add):
""" Return running totals (partial sum :term:`generator`).
:param iterable: the iterable to accumulate.
:type iterable: :term:`iterable`
:param func: the function to accumulate with.
:type func: :class:`~.Callable`
:rtype: :term:`generator`
Examples:
>>> accumulate([1, 2, 3, 4, 5]) # --> 1 3 6 10 15
>>> accumulate([1, 2, 3, 4, 5], op.mul) # --> 1 2 6 24 120
"""
it = iter(iterable)
total = next(it)
yield total
for element in it:
total = func(total, element)
yield total
def append(iterable, element):
""" Append *element* to the "end" of *iterable*. """
return chain(iterable, (element,))
def grouper(iterable, n):
""" Collect data into fixed-length chunks or blocks.
:param iterable: the iterable from which fixed-length chunks should be
taken.
:type iterable: :term:`iterable`
:param n: the length of the chunks.
:type n: :class:`int`
:rtype: :term:`generator`
Example:
>>> grouper('ABCDEFG', 3) # --> ABC DEF
"""
args = [iter(iterable)] * n
return it.izip(*args)
def mzip(mapping):
""" Return a zipped version of a mapping of iterables, as an iterable
(like :func:`it.izip`).
:param mapping: the mapping to be zipped
:type iterable: :term:`mapping`
:rtype: :term:`generator`
Example:
>>> list(mzip({"a": [1, 2, 3], "b": (2, 3), "c": [3, 4, 5, 6]}))
[{'a': 1, 'b': 2, 'c': 3}, {'a': 2, 'b': 3, 'c': 4}]
"""
keys, values = unzip(mapping.iteritems())
return (dict(it.izip(keys, val_tup)) for val_tup in it.izip(*values))
def pairwise(iterable):
""" Return a :term:`generator` of pairs of following items of iterable.
:param iterable: the iterable from which pairs of items should be taken.
:type iterable: :term:`iterable`
:rtype: :term:`generator`
Example:
>>> pairwise([0, 1, 2, 3]) # --> (0, 1), (1, 2), (2, 3)
"""
a, b = it.tee(iterable)
next(b, None)
return it.izip(a, b)
def powerset(sequence, minsize=0, maxsize=inf):
maxsize = min(maxsize, len(sequence))
return chain.from_iterable(combinations(sequence, r)
for r in xrange(minsize, maxsize+1))
def roundrobin(*iterables):
""" roundrobin('ABC', 'D', 'EF') --> A D E B F C """
## Recipe credited to George Sakkis
pending = len(iterables)
nexts = it.cycle(iter(iterable).next for iterable in iterables)
while pending:
try:
for next in nexts:
yield next()
except StopIteration:
pending -= 1
nexts = it.cycle(it.islice(nexts, pending))
def sfilter(predicate, *iterables):
""" Return a :term:`generator` of :term`tuples <tuple>` that are filtered
simultaneously according to a given *predicate*, which operates on the
zipped tuples of the given *iterables*.
:param predicate: the filtering predicate.
:type predicate: boolean function
:param iterables: the family of iterables that should be filtered
simultaneously.
:type iterables: ``*args``
Example:
>>> without_None = lambda t: None not in t
>>> x = [None, 1, 2, 3]; y = [2, 7, 3, 4]; z = [3, 4, 5, None]
>>> x1, y1, z1 = sfilter(without_None, x, y, z)
>>> x1, y1, z1
((1, 2), (7, 3), (4, 5))
"""
return unzip(t for t in zip(*iterables) if predicate(t))
def substrings(string, maxlen=inf):
ps = powerset(string, maxsize=maxlen)
return ("".join(s) for s in ps)
def subtuples(tup, maxlen=inf):
ps = powerset(tup, maxsize=maxlen)
return (tuple(t) for t in ps)
def triwise(iterable):
""" Return a :term:`generator` of triples of following items of iterable.
:param iterable: the iterable from which triples of items should be taken.
:type iterable: :term:`iterable`
:rtype: :term:`generator`
Example:
>>> triwise([0, 1, 2, 3, 4]) # --> (0, 1, 2), (1, 2, 3), (2, 3, 4)
"""
a, b, c = it.tee(iterable, 3)
next(b, None)
next(c, None)
next(c, None)
return it.izip(a, b, c)
def nwise(iterable, n=2):
iters = it.tee(iterable, n)
for i, j in enumerate(iters):
next(it.islice(j, i, i), None)
return it.izip(*iters)
def uniquify(iterable):
""" Return a :term:`generator` of elements of *iterable*, without repeating
any element. This function is order-preserving.
:param iterable: the iterable to uniquify.
:type iterable: :term:`iterable`
:rtype: :term:`generator`
Example:
>>> list(uniquify([1, 2, 2, 3, 3, 5, 1, 4, 5, 4]))
[1, 2, 3, 5, 4]
.. warning:: any non-hashable element of the given *iterable* will be
generated, even if appears in the iterable more than once.
"""
seen = set()
for x in iterable:
try:
if x not in seen:
seen.add(x)
yield x
except TypeError:
yield x
def unzip(zipped):
""" Return a :term:`generator` reverses the work of zip/it.izip.
:param zipped: the iterable to unzip.
:type zipped: :term:`iterable` of :term:`iterables <iterable>`.
:rtype: :term:`generator`
Examples:
>>> list(unzip(zip(xrange(3), xrange(2, 5))))
[(0, 1, 2), (2, 3, 4)]
>>> list(unzip(it.izip(xrange(3), xrange(2, 5))))
[(0, 1, 2), (2, 3, 4)]
.. note:: The returned elements of the generator are always tuples.
This is a result of how :func:`zip` works.
"""
return it.izip(*zipped)
def transpose(zipped, n):
""" Return *n* generators, where *n* is the number of elements in each of
the zipped generators (they can have more, but only these will be
considered). """
teed = it.tee(zipped, n)
for i in xrange(n):
gen = lambda teed, i=i: (e[i] for e in teed[i])
yield gen(teed)
|
|
# Copyright 2018 Google. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train NMT with low level API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import threading
import time
import tensorflow as tf
from tensorflow.contrib import tpu
from tensorflow.python.data.util import nest as data_nest
from tensorflow.python.framework import graph_io
from mlperf_compliance import mlperf_log
from utils import iterator_utils
from utils import vocab_utils
_INITIAL_LOSS = 1e7
def wrap_computation_in_while_loop(op_fn, n, host_name):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def computation(i):
ops = op_fn()
if not isinstance(ops, list):
ops = [ops]
with tf.control_dependencies(ops):
return i + 1
with tf.device(device_for_host(host_name)):
return tf.while_loop(
lambda i: tf.less(i, n),
computation, [tf.constant(0)],
parallel_iterations=1)
def get_resolver(hparams):
if hparams.master:
return tf.contrib.cluster_resolver.TPUClusterResolver(hparams.master)
elif hparams.tpu_name:
return tf.contrib.cluster_resolver.TPUClusterResolver(hparams.tpu_name)
else:
return None
def get_host(resolver, host_id=0):
if resolver is None:
return "/replica:0/task:0"
else:
job_name = resolver.get_job_name() or "tpu_worker"
return "/job:%s/task:%d" % (job_name, host_id)
def device_for_host(host_name):
return host_name + "/device:CPU:0"
def device_for_tpu_core(host_name, core=0):
return host_name + "/device:TPU_REPLICATED_CORE:%d" % core
class TrainLowLevelRunner(object):
"""Run Train via direct session.run calls."""
def __init__(self, iterations, hparams, per_host_v1=False):
tf.logging.info("TrainLowLevelRunner: constructor")
self.feature_structure = {}
self.loss = None
self.infeed_queue = []
self.enqueue_ops = []
self.dataset_initializer = []
self.is_local = ((hparams.master == "") and (hparams.tpu_name is None))
self.per_host_v1 = per_host_v1
self.iterations = iterations
self.sess = None
self.graph = tf.Graph()
self.hparams = hparams
with self.graph.as_default():
self.tpu_init = [tpu.initialize_system()]
self.tpu_shutdown = tpu.shutdown_system()
self.resolver = get_resolver(hparams)
session_config = tf.ConfigProto(allow_soft_placement=True,
isolate_session_state=True)
if self.hparams.tpu_name is None:
master = self.hparams.master
else:
cluster_spec = self.resolver.cluster_spec()
if cluster_spec:
session_config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
master = self.resolver.get_master()
self.sess = tf.Session(master, graph=self.graph, config=session_config)
self.sess.run(self.tpu_init)
def initialize(self, input_fn, params):
"""Initialize all the things required for training."""
tf.logging.info("TrainLowLevelRunner: initialize method")
num_hosts = self.hparams.num_shards // self.hparams.num_shards_per_host
def get_enqueue_ops_fn(host_id):
"""Generate the enqueue ops graph function."""
params["dataset_num_shards"] = num_hosts
params["dataset_index"] = host_id
output = input_fn(params)
device = device_for_host(get_host(self.resolver, host_id))
with tf.device(device):
if self.per_host_v1:
iterator = input_fn._iterator
else:
# output is a dateset
iterator = output.make_initializable_iterator()
self.dataset_initializer.append(iterator.initializer)
def enqueue_ops_fn_v1():
"""Enqueue ops function for one host.."""
features = output
self.feature_structure["features"] = features
self.feature_structure["labels"] = {}
flattened_inputs = data_nest.flatten(self.feature_structure)
infeed = tpu.InfeedQueue(
tuple_types=[t.dtype for t in flattened_inputs],
tuple_shapes=[t.shape for t in flattened_inputs],
shard_dimensions=None)
infeed.set_number_of_shards(self.hparams.num_shards_per_host)
self.infeed_queue.append(infeed)
def tpu_ordinal_fn(shard_index_in_host):
return shard_index_in_host % self.hparams.num_shards_per_host
per_host_enqueue_ops = (
infeed.split_inputs_and_generate_enqueue_ops(
flattened_inputs,
placement_function=lambda x: device,
tpu_ordinal_function=tpu_ordinal_fn))
return per_host_enqueue_ops
def enqueue_ops_fn_v2():
"""Enqueue ops function for one host."""
per_host_sharded_inputs = []
control_deps = []
for _ in range(self.hparams.num_shards_per_host):
with tf.control_dependencies(control_deps):
features = iterator.get_next()
self.feature_structure["features"] = features
self.feature_structure["labels"] = {}
flattened_inputs = data_nest.flatten(self.feature_structure)
control_deps.extend(flattened_inputs)
per_host_sharded_inputs.append(flattened_inputs)
infeed = tpu.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
self.infeed_queue.append(infeed)
def tpu_ordinal_fn(shard_index_in_host):
return shard_index_in_host % self.hparams.num_shards_per_host
return infeed.generate_enqueue_ops(
per_host_sharded_inputs,
tpu_ordinal_function=tpu_ordinal_fn)
if self.per_host_v1:
return enqueue_ops_fn_v1
else:
return enqueue_ops_fn_v2
with self.graph.as_default():
for i in range(num_hosts):
if self.per_host_v1:
self.enqueue_ops.append(get_enqueue_ops_fn(i)())
else:
self.enqueue_ops.append(
wrap_computation_in_while_loop(
get_enqueue_ops_fn(i), n=self.iterations,
host_name=get_host(self.resolver, i)))
init_tables = tf.tables_initializer()
self.sess.run(init_tables)
# Initialize dataset variables
self.sess.run(self.dataset_initializer)
def build_model(self, model_fn, params):
"""Build the TPU model and infeed enqueue ops."""
tf.logging.info("TrainLowLevelRunner: build_model method")
def tpu_train_step(loss):
"""Generate the TPU graph."""
del loss
values = self.infeed_queue[0].generate_dequeue_op(tpu_device=0)
unflattened_inputs = data_nest.pack_sequence_as(self.feature_structure,
values)
features = unflattened_inputs["features"]
labels = unflattened_inputs["labels"]
estimator_spec = model_fn(features, labels, tf.estimator.ModeKeys.TRAIN,
params)
loss, train_op = estimator_spec.loss, estimator_spec.train_op
with tf.control_dependencies([train_op]):
return tf.identity(loss)
def train_loop():
return tpu.repeat(self.iterations, tpu_train_step, [_INITIAL_LOSS])
with self.graph.as_default():
(self.loss,) = tpu.shard(
train_loop,
inputs=[],
num_shards=self.hparams.num_shards,
outputs_from_all_shards=False,
)
global_initializer = tf.global_variables_initializer()
local_initializer = tf.local_variables_initializer()
graph_io.write_graph(
self.graph.as_graph_def(add_shapes=True), self.hparams.out_dir,
"graph.pbtxt")
self.saver = tf.train.Saver()
self.sess.run(global_initializer)
self.sess.run(local_initializer)
def train(self, start_step, train_steps, num_threads=2):
"""Run the Train loop on the TPU device."""
tf.logging.info("TrainLowLevelRunner: train for %d steps in total",
train_steps)
def infeed_thread_fn(sess, enqueue_ops):
assert train_steps % self.iterations == 0
if self.per_host_v1:
steps = train_steps
else:
steps = train_steps // self.iterations
for _ in range(steps):
sess.run([enqueue_ops])
def checkpoint_thread_fn(saver, sess):
saver.save(sess, self.hparams.out_dir + "/model.ckpt-%d" % (start_step + cur_step))
infeed_thread = threading.Thread(
target=infeed_thread_fn, args=(self.sess, self.enqueue_ops))
infeed_thread.start()
cur_step = 0
thread_id = 0
checkpoint_threads = []
for i in range(num_threads):
checkpoint_threads.append(None)
while cur_step < train_steps:
start = time.time()
tf.logging.info("TrainLowLevelRunner: start train step:%d", cur_step)
cur_step += self.iterations
loss = self.sess.run([self.loss])
tf.logging.info("TrainLowLevelRunner: sess run loss: %s", loss)
if checkpoint_threads[thread_id] is not None:
checkpoint_threads[thread_id].join()
checkpoint_threads[thread_id] = threading.Thread(
target=checkpoint_thread_fn, args=(self.saver, self.sess))
checkpoint_threads[thread_id].start()
thread_id += 1
if thread_id >= num_threads:
thread_id = 0
end = time.time()
tf.logging.info(
"TrainLowLevelRunner: step time {} sec {} examples/sec".format(
end - start,
self.iterations * self.hparams.batch_size / (end - start)))
infeed_thread.join()
for i in range(num_threads):
if checkpoint_threads[i] is not None:
checkpoint_threads[i].join()
checkpoint_threads[i] = None
class EvalLowLevelRunner(object):
"""Run eval via direct session.run calls."""
def __init__(self, eval_steps, hparams):
tf.logging.info("EvalLowLevelRunner: constructor")
tf.logging.info("eval_steps: %s", eval_steps)
self.feature_structure = {}
self.infeed_queue = []
self.enqueue_ops = []
self.dataset_initializer = []
self.is_local = ((hparams.master == "") and (hparams.tpu_name is None))
self.eval_steps = eval_steps
self.sess = None
self.eval_op = None
self.graph = tf.Graph()
self.hparams = hparams
self.outfeed_tensors = []
self.outfeed_names = []
self.dequeue_ops = {}
self.saver = None
with self.graph.as_default():
self.tpu_init = [tpu.initialize_system()]
self.tpu_shutdown = tpu.shutdown_system()
self.resolver = get_resolver(hparams)
session_config = tf.ConfigProto(
allow_soft_placement=True,
operation_timeout_in_ms=600 * 60 * 1000) # 10 hours
if self.hparams.tpu_name is None:
master = self.hparams.master
else:
cluster_spec = self.resolver.cluster_spec()
if cluster_spec:
session_config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
master = self.resolver.get_master()
self.sess = tf.Session(
master,
graph=self.graph,
config=session_config)
self.sess.run(self.tpu_init)
def initialize(self, input_fn, params):
"""Initialize all the things required for evaluation."""
tf.logging.info("EvalLowLevelRunner: initialize method")
def get_enqueue_ops_fn():
"""Generate the enqueue ops graph function."""
dataset = input_fn(params)
with tf.device(device_for_host(get_host(self.resolver))):
iterator = dataset.make_initializable_iterator()
self.dataset_initializer.append(iterator.initializer)
def enqueue_ops_fn():
"""Enqueue ops function for one host."""
per_host_sharded_inputs = []
control_deps = []
for _ in range(self.hparams.num_shards_per_host):
with tf.control_dependencies(control_deps):
features = iterator.get_next()
self.feature_structure["features"] = features
flattened_inputs = data_nest.flatten(self.feature_structure)
control_deps.extend(flattened_inputs)
per_host_sharded_inputs.append(flattened_inputs)
infeed = tpu.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
self.infeed_queue.append(infeed)
def tpu_ordinal_fn(shard_index_in_host):
return shard_index_in_host % self.hparams.num_shards_per_host
return infeed.generate_enqueue_ops(
per_host_sharded_inputs,
tpu_ordinal_function=tpu_ordinal_fn)
return enqueue_ops_fn
with self.graph.as_default():
self.enqueue_ops.append(
wrap_computation_in_while_loop(
get_enqueue_ops_fn(), n=self.eval_steps,
host_name=get_host(self.resolver)))
init_tables = tf.tables_initializer()
self.sess.run(init_tables)
def build_model(self, model_fn, params):
"""Build the TPU model and infeed enqueue ops."""
tf.logging.info("EvalLowLevelRunner: build_model method")
def tpu_eval_step():
"""Generate the TPU graph."""
values = self.infeed_queue[0].generate_dequeue_op(tpu_device=0)
unflattened_inputs = data_nest.pack_sequence_as(self.feature_structure,
values)
features = unflattened_inputs["features"]
estimator_spec = model_fn(features, None, tf.estimator.ModeKeys.PREDICT,
params)
for k, v in six.iteritems(estimator_spec.predictions):
self.outfeed_names.append(k)
self.outfeed_tensors.append(v)
with tf.device(device_for_tpu_core(get_host(self.resolver))):
outfeed_enqueue_ops = tpu.outfeed_enqueue_tuple(self.outfeed_tensors)
with tf.control_dependencies([outfeed_enqueue_ops]):
return tf.no_op()
def eval_loop():
return tpu.repeat(self.eval_steps, tpu_eval_step, [])
def create_dequeue_ops():
"""Create outfeed dequeue ops."""
dequeue_ops = []
tensor_dtypes = []
tensor_shapes = []
for v in self.outfeed_tensors:
dequeue_ops.append([])
tensor_dtypes.append(v.dtype)
tensor_shapes.append(v.shape)
for i in range(self.hparams.num_shards):
with tf.device(device_for_host(get_host(self.resolver))):
outfeed_tensors = tpu.outfeed_dequeue_tuple(
dtypes=tensor_dtypes, shapes=tensor_shapes, device_ordinal=i)
for j, item in enumerate(outfeed_tensors):
dequeue_ops[j].append(item)
for j in range(len(outfeed_tensors)):
dequeue_ops[j] = tf.concat(dequeue_ops[j], axis=0)
return dequeue_ops
with self.graph.as_default():
(self.eval_op,) = tpu.shard(
eval_loop,
inputs=[],
num_shards=self.hparams.num_shards,
outputs_from_all_shards=False,
)
for i, dequeue_tenor in enumerate(create_dequeue_ops()):
self.dequeue_ops[self.outfeed_names[i]] = dequeue_tenor
self.saver = tf.train.Saver()
def predict(self, checkpoint_path=None):
"""Run the predict loop on the TPU device."""
if not checkpoint_path:
checkpoint_path = tf.train.latest_checkpoint(self.hparams.out_dir)
self.saver.restore(self.sess, checkpoint_path)
# Initialize dataset variables
self.sess.run(self.dataset_initializer)
# Infeed thread.
def infeed_thread_fn(sess, enqueue_ops):
sess.run([enqueue_ops])
infeed_thread = threading.Thread(
target=infeed_thread_fn, args=(self.sess, self.enqueue_ops))
infeed_thread.start()
# Eval thread.
def eval_thread_fn(sess, eval_op):
sess.run([eval_op])
eval_thread = threading.Thread(
target=eval_thread_fn, args=(self.sess, self.eval_op))
eval_thread.start()
for step in range(self.eval_steps):
tf.logging.info("EvalLowLevelRunner: start eval step:%d", step)
predictions = self.sess.run(self.dequeue_ops)
for i in range(self.hparams.infer_batch_size):
yield {key: value[i] for key, value in six.iteritems(predictions)}
infeed_thread.join()
eval_thread.join()
|
|
# coding: utf-8
from __future__ import absolute_import
from .base_model_ import Model
from datetime import date, datetime
from typing import List, Dict
from ..util import deserialize_model
class Occurrence(Model):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, occ_id: str=None, taxon: str=None, taxon_id: str=None, max_age: float=None, min_age: float=None, age_units: str=None, lat: float=None, lon: float=None, geog_coords: str=None):
"""
Occurrence - a model defined in Swagger
:param occ_id: The occ_id of this Occurrence.
:type occ_id: str
:param taxon: The taxon of this Occurrence.
:type taxon: str
:param taxon_id: The taxon_id of this Occurrence.
:type taxon_id: str
:param max_age: The max_age of this Occurrence.
:type max_age: float
:param min_age: The min_age of this Occurrence.
:type min_age: float
:param age_units: The age_units of this Occurrence.
:type age_units: str
:param lat: The lat of this Occurrence.
:type lat: float
:param lon: The lon of this Occurrence.
:type lon: float
:param geog_coords: The geog_coords of this Occurrence.
:type geog_coords: str
"""
self.swagger_types = {
'occ_id': str,
'taxon': str,
'taxon_id': str,
'max_age': float,
'min_age': float,
'age_units': str,
'lat': float,
'lon': float,
'geog_coords': str
}
self.attribute_map = {
'occ_id': 'occ_id',
'taxon': 'taxon',
'taxon_id': 'taxon_id',
'max_age': 'max_age',
'min_age': 'min_age',
'age_units': 'age_units',
'lat': 'lat',
'lon': 'lon',
'geog_coords': 'geog_coords'
}
self._occ_id = occ_id
self._taxon = taxon
self._taxon_id = taxon_id
self._max_age = max_age
self._min_age = min_age
self._age_units = age_units
self._lat = lat
self._lon = lon
self._geog_coords = geog_coords
@classmethod
def from_dict(cls, dikt) -> 'Occurrence':
"""
Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The occurrence of this Occurrence.
:rtype: Occurrence
"""
return deserialize_model(dikt, cls)
@property
def occ_id(self) -> str:
"""
Gets the occ_id of this Occurrence.
Unique occurrence ID
:return: The occ_id of this Occurrence.
:rtype: str
"""
return self._occ_id
@occ_id.setter
def occ_id(self, occ_id: str):
"""
Sets the occ_id of this Occurrence.
Unique occurrence ID
:param occ_id: The occ_id of this Occurrence.
:type occ_id: str
"""
self._occ_id = occ_id
@property
def taxon(self) -> str:
"""
Gets the taxon of this Occurrence.
Taxonomic name
:return: The taxon of this Occurrence.
:rtype: str
"""
return self._taxon
@taxon.setter
def taxon(self, taxon: str):
"""
Sets the taxon of this Occurrence.
Taxonomic name
:param taxon: The taxon of this Occurrence.
:type taxon: str
"""
self._taxon = taxon
@property
def taxon_id(self) -> str:
"""
Gets the taxon_id of this Occurrence.
Unique taxonomic ID
:return: The taxon_id of this Occurrence.
:rtype: str
"""
return self._taxon_id
@taxon_id.setter
def taxon_id(self, taxon_id: str):
"""
Sets the taxon_id of this Occurrence.
Unique taxonomic ID
:param taxon_id: The taxon_id of this Occurrence.
:type taxon_id: str
"""
self._taxon_id = taxon_id
@property
def max_age(self) -> float:
"""
Gets the max_age of this Occurrence.
Oldest age of the occurrence
:return: The max_age of this Occurrence.
:rtype: float
"""
return self._max_age
@max_age.setter
def max_age(self, max_age: float):
"""
Sets the max_age of this Occurrence.
Oldest age of the occurrence
:param max_age: The max_age of this Occurrence.
:type max_age: float
"""
self._max_age = max_age
@property
def min_age(self) -> float:
"""
Gets the min_age of this Occurrence.
Youngest age of the occurrence
:return: The min_age of this Occurrence.
:rtype: float
"""
return self._min_age
@min_age.setter
def min_age(self, min_age: float):
"""
Sets the min_age of this Occurrence.
Youngest age of the occurrence
:param min_age: The min_age of this Occurrence.
:type min_age: float
"""
self._min_age = min_age
@property
def age_units(self) -> str:
"""
Gets the age_units of this Occurrence.
Units of min and max age as yr, ka or ma
:return: The age_units of this Occurrence.
:rtype: str
"""
return self._age_units
@age_units.setter
def age_units(self, age_units: str):
"""
Sets the age_units of this Occurrence.
Units of min and max age as yr, ka or ma
:param age_units: The age_units of this Occurrence.
:type age_units: str
"""
self._age_units = age_units
@property
def lat(self) -> float:
"""
Gets the lat of this Occurrence.
Occurrence latitude in decimal degrees
:return: The lat of this Occurrence.
:rtype: float
"""
return self._lat
@lat.setter
def lat(self, lat: float):
"""
Sets the lat of this Occurrence.
Occurrence latitude in decimal degrees
:param lat: The lat of this Occurrence.
:type lat: float
"""
self._lat = lat
@property
def lon(self) -> float:
"""
Gets the lon of this Occurrence.
Occurrence longitude in decimal degrees
:return: The lon of this Occurrence.
:rtype: float
"""
return self._lon
@lon.setter
def lon(self, lon: float):
"""
Sets the lon of this Occurrence.
Occurrence longitude in decimal degrees
:param lon: The lon of this Occurrence.
:type lon: float
"""
self._lon = lon
@property
def geog_coords(self) -> str:
"""
Gets the geog_coords of this Occurrence.
Geographic coordinate type as modern or paleo
:return: The geog_coords of this Occurrence.
:rtype: str
"""
return self._geog_coords
@geog_coords.setter
def geog_coords(self, geog_coords: str):
"""
Sets the geog_coords of this Occurrence.
Geographic coordinate type as modern or paleo
:param geog_coords: The geog_coords of this Occurrence.
:type geog_coords: str
"""
self._geog_coords = geog_coords
|
|
import datetime
import logging
from pprint import pprint as pp
import pymongo
import pytest
from scout.constants import VERBS_MAP
logger = logging.getLogger(__name__)
def test_create_event(adapter, institute_obj, case_obj, user_obj):
## GIVEN a database without any events
assert sum(1 for i in adapter.event_collection.find()) == 0
## WHEN inserting a event
verb = "status"
adapter.create_event(
institute=institute_obj,
case=case_obj,
user=user_obj,
link="a link",
category="case",
verb=verb,
subject="a subject",
level="specific",
)
# THEN assert that the event was added to the database
assert sum(1 for i in adapter.event_collection.find()) == 1
res = adapter.event_collection.find_one()
assert res["verb"] == verb
def test_assign(adapter, institute_obj, case_obj, user_obj):
logger.info("Testing assign a user to a case")
# GIVEN a populated database without events
adapter.case_collection.insert_one(case_obj)
adapter.institute_collection.insert_one(institute_obj)
adapter.user_collection.insert_one(user_obj)
assert sum(1 for i in adapter.event_collection.find()) == 0
## WHEN assigning a user to a case
link = "assignlink"
updated_case = adapter.assign(institute=institute_obj, case=case_obj, user=user_obj, link=link)
# THEN the case should have the user assigned
assert updated_case["assignees"] == [user_obj["_id"]]
# THEN an event should have been created
assert sum(1 for i in adapter.event_collection.find()) == 1
event_obj = adapter.event_collection.find_one()
assert event_obj["link"] == link
def test_unassign(adapter, institute_obj, case_obj, user_obj):
case_obj["status"] = "active"
# GIVEN an adapter to a database with one case
adapter.case_collection.insert_one(case_obj)
adapter.institute_collection.insert_one(institute_obj)
adapter.user_collection.insert_one(user_obj)
# assign case to test user
link = "assignlink"
updated_case = adapter.assign(institute=institute_obj, case=case_obj, user=user_obj, link=link)
# Make sure that case is active
assert updated_case["status"] == "active"
assert updated_case["assignees"] == [user_obj["_id"]]
assert sum(1 for i in adapter.event_collection.find()) == 1
# WHEN unassigning the only user assigned to case
# and wishes to inactivate it
updated_case = adapter.unassign(
institute=institute_obj,
case=updated_case,
user=user_obj,
link="unassignlink",
inactivate=True,
)
# case should become inactive
assert updated_case["status"] == "inactive"
# THEN two events should have been created
assert sum(1 for i in adapter.event_collection.find()) == 2
# THEN the case should not be assigned
assert updated_case.get("assignees") == []
# THEN a unassign event should be created
event = adapter.event_collection.find_one({"verb": "unassign"})
assert event["link"] == "unassignlink"
def test_update_synopsis(adapter, institute_obj, case_obj, user_obj):
## GIVEN a populated database without events
adapter.case_collection.insert_one(case_obj)
adapter.institute_collection.insert_one(institute_obj)
adapter.user_collection.insert_one(user_obj)
assert sum(1 for i in adapter.event_collection.find()) == 0
# WHEN updating synopsis for a case
link = "synopsislink"
synopsis = "The updated synopsis"
updated_case = adapter.update_synopsis(
institute=institute_obj,
case=case_obj,
user=user_obj,
link=link,
content=synopsis,
)
# THEN the case should have the synopsis added
assert updated_case["synopsis"] == synopsis
# THEN an event for synopsis should have been added
event = adapter.event_collection.find_one({"link": link})
assert event["content"] == synopsis
def test_archive_case(adapter, institute_obj, case_obj, user_obj):
logger.info("Set a case to archive status")
## GIVEN a populated database without events
adapter.case_collection.insert_one(case_obj)
adapter.institute_collection.insert_one(institute_obj)
adapter.user_collection.insert_one(user_obj)
assert sum(1 for i in adapter.event_collection.find()) == 0
## WHEN setting a case in archive status
link = "archivelink"
updated_case = adapter.archive_case(
institute=institute_obj, case=case_obj, user=user_obj, link=link
)
## THEN the case status should be archived
assert updated_case["status"] == "archived"
## THEN a event for archiving should be added
event = adapter.event_collection.find_one({"link": link})
assert event["link"] == link
def test_open_research(adapter, institute_obj, case_obj, user_obj):
## GIVEN a populated database without events
adapter.case_collection.insert_one(case_obj)
adapter.institute_collection.insert_one(institute_obj)
adapter.user_collection.insert_one(user_obj)
assert sum(1 for i in adapter.event_collection.find()) == 0
case = adapter.case_collection.find_one({"_id": case_obj["_id"]})
assert case.get("research_requested", False) is False
## WHEN setting opening research for a case
link = "openresearchlink"
updated_case = adapter.open_research(
institute=institute_obj, case=case_obj, user=user_obj, link=link
)
## THEN research_requested should be True
assert updated_case["research_requested"] is True
## THEN an event for opening research should be created
event = adapter.event_collection.find_one({"link": link})
assert event["link"] == "openresearchlink"
def test_add_hpo(adapter, institute_obj, case_obj, user_obj):
## GIVEN a populated database without a gene and a hpo term
adapter.case_collection.insert_one(case_obj)
adapter.institute_collection.insert_one(institute_obj)
adapter.user_collection.insert_one(user_obj)
gene_obj = dict(
hgnc_id=1,
hgnc_symbol="test",
ensembl_id="anothertest",
chromosome="1",
start=10,
end=20,
build="37",
)
adapter.load_hgnc_gene(gene_obj)
hpo_obj = dict(_id="HP:0000878", hpo_id="HP:0000878", description="A term", genes=[1])
adapter.load_hpo_term(hpo_obj)
hpo_term = hpo_obj["hpo_id"]
## WHEN adding a hpo term for a case
link = "addhpolink"
updated_case = adapter.add_phenotype(
institute=institute_obj,
case=case_obj,
user=user_obj,
link=link,
hpo_term=hpo_term,
)
## THEN the case should have a hpo term
for term in updated_case["phenotype_terms"]:
assert term["phenotype_id"] == hpo_term
## THEN a event should have been created
event = adapter.event_collection.find_one({"link": link})
assert event["link"] == link
def test_add_phenotype_group(adapter, institute_obj, case_obj, user_obj):
## GIVEN a populated database without a gene and a hpo term
adapter.case_collection.insert_one(case_obj)
adapter.institute_collection.insert_one(institute_obj)
adapter.user_collection.insert_one(user_obj)
gene_obj = dict(
hgnc_id=1,
hgnc_symbol="test",
ensembl_id="anothertest",
chromosome="1",
start=10,
end=20,
build="37",
)
adapter.load_hgnc_gene(gene_obj)
hpo_obj = dict(_id="HP:0000878", hpo_id="HP:0000878", description="A term", genes=[1])
adapter.load_hpo_term(hpo_obj)
hpo_term = hpo_obj["hpo_id"]
# GIVEN a populated database with no events
assert sum(1 for i in adapter.hpo_term_collection.find()) > 0
assert adapter.hpo_term_collection.find({"_id": hpo_term})
assert sum(1 for i in adapter.event_collection.find()) == 0
## WHEN adding a phenotype group
updated_case = adapter.add_phenotype(
institute=institute_obj,
case=case_obj,
user=user_obj,
link="hpolink",
hpo_term=hpo_term,
is_group=True,
)
# THEN the case should have phenotypes
assert len(updated_case["phenotype_terms"]) > 0
assert len(updated_case["phenotype_groups"]) > 0
# THEN there should be phenotype events
assert sum(1 for i in adapter.event_collection.find()) > 0
def test_add_wrong_hpo(adapter, institute_obj, case_obj, user_obj):
## GIVEN a populated database
adapter.case_collection.insert_one(case_obj)
adapter.institute_collection.insert_one(institute_obj)
adapter.user_collection.insert_one(user_obj)
# WHEN adding a non exoisting hpo term for a case
hpo_term = "k"
with pytest.raises(ValueError):
# THEN a value error should be raised
adapter.add_phenotype(
institute=institute_obj,
case=case_obj,
user=user_obj,
link="addhpolink",
hpo_term=hpo_term,
)
def test_add_no_term(adapter, institute_obj, case_obj, user_obj):
logger.info("Add a HPO term for a case")
## GIVEN a populated database
adapter.case_collection.insert_one(case_obj)
adapter.institute_collection.insert_one(institute_obj)
adapter.user_collection.insert_one(user_obj)
## WHEN adding hpo term without a term
with pytest.raises(ValueError):
## THEN a value error should be raised
adapter.add_phenotype(
institute=institute_obj, case=case_obj, user=user_obj, link="addhpolink"
)
def test_add_non_existing_mim(adapter, institute_obj, case_obj, user_obj):
## GIVEN a populated database
adapter.case_collection.insert_one(case_obj)
adapter.institute_collection.insert_one(institute_obj)
adapter.user_collection.insert_one(user_obj)
# Non existing mim phenotype
mim_term = "MIM:0000002"
# WHEN adding a non-existing OMIM term to a case
# Then the function should raise ValueError
with pytest.raises(ValueError):
updated_case = adapter.add_phenotype(
institute=institute_obj,
case=case_obj,
user=user_obj,
link="mimlink",
omim_term=mim_term,
)
def test_add_mim(adapter, institute_obj, case_obj, user_obj):
## GIVEN a populated adapter with a disease term
adapter.case_collection.insert_one(case_obj)
adapter.institute_collection.insert_one(institute_obj)
adapter.user_collection.insert_one(user_obj)
gene_obj = dict(
hgnc_id=1,
hgnc_symbol="test",
ensembl_id="anothertest",
chromosome="1",
start=10,
end=20,
build="37",
)
adapter.load_hgnc_gene(gene_obj)
mim_obj = {
"_id": "OMIM:605606",
"disease_id": "OMIM:605606",
"disease_nr": 605606,
"description": "Psoriasis susceptibility 7",
"source": "OMIM",
"genes": [1],
"inheritance": [],
"hpo_terms": ["HP:0000878"],
}
adapter.disease_term_collection.insert_one(mim_obj)
hpo_obj = dict(_id="HP:0000878", hpo_id="HP:0000878", description="A term", genes=[1])
adapter.load_hpo_term(hpo_obj)
# Existing mim phenotype
mim_obj = adapter.disease_term_collection.find_one()
mim_term = mim_obj["_id"]
assert sum(1 for i in adapter.hpo_term_collection.find()) > 0
# GIVEN a populated database
assert sum(1 for i in adapter.event_collection.find()) == 0
# WHEN adding a existing phenotype term
updated_case = adapter.add_phenotype(
institute=institute_obj,
case=case_obj,
user=user_obj,
link="mimlink",
omim_term=mim_term,
)
# THEN the case should have phenotypes
assert len(updated_case["phenotype_terms"]) > 0
# THEN there should be phenotype events
assert sum(1 for i in adapter.event_collection.find()) > 0
def test_remove_hpo(hpo_database, institute_obj, case_obj, user_obj):
adapter = hpo_database
logger.info("Add a HPO term for a case")
adapter._add_case(case_obj)
# GIVEN a populated database
assert sum(1 for i in adapter.event_collection.find()) == 0
hpo_term = "HP:0000878"
updated_case = adapter.add_phenotype(
institute=institute_obj,
case=case_obj,
user=user_obj,
link="addhpolink",
hpo_term=hpo_term,
)
# Assert that the term was added to the case
assert len(updated_case["phenotype_terms"]) == 1
# Check that the event exists
assert sum(1 for i in adapter.event_collection.find()) == 1
# WHEN removing the phenotype term
updated_case = adapter.remove_phenotype(
institute=institute_obj,
case=case_obj,
user=user_obj,
link="removehpolink",
phenotype_id=hpo_term,
)
# THEN the case should not have phenotype terms
assert len(updated_case["phenotype_terms"]) == 0
# THEN an event should have been created
assert sum(1 for i in adapter.event_collection.find()) == 2
def test_add_cohort(adapter, institute_obj, case_obj, user_obj):
## GIVEN a populated database
adapter.case_collection.insert_one(case_obj)
adapter.institute_collection.insert_one(institute_obj)
adapter.user_collection.insert_one(user_obj)
assert sum(1 for i in adapter.event_collection.find()) == 0
cohort_name = "cohort"
link = "cohortlink"
## WHEN adding a cohort to a case
updated_case = adapter.add_cohort(
institute=institute_obj,
case=case_obj,
user=user_obj,
link=link,
tag=cohort_name,
)
# THEN the case should have the cohort saved
assert set(updated_case["cohorts"]) == set([cohort_name])
# THEN an event should have been created
assert sum(1 for i in adapter.event_collection.find()) == 1
event_obj = adapter.event_collection.find_one()
assert event_obj["link"] == link
def test_remove_cohort(adapter, institute_obj, case_obj, user_obj):
## GIVEN a populated database
adapter.case_collection.insert_one(case_obj)
adapter.institute_collection.insert_one(institute_obj)
adapter.user_collection.insert_one(user_obj)
assert sum(1 for i in adapter.event_collection.find()) == 0
cohort_name = "cohort"
link = "cohortlink"
## WHEN adding a cohort to a case
updated_case = adapter.add_cohort(
institute=institute_obj,
case=case_obj,
user=user_obj,
link=link,
tag=cohort_name,
)
assert sum(1 for i in adapter.event_collection.find()) == 1
remove_cohort_link = "removeCohortlink"
## WHEN removing a cohort from a case
updated_case = adapter.remove_cohort(
institute=institute_obj,
case=case_obj,
user=user_obj,
link=link,
tag=cohort_name,
)
# THEN the case should have the cohort saved
assert updated_case["cohorts"] == []
# THEN an event should have been created
assert sum(1 for i in adapter.event_collection.find()) == 2
def test_update_clinical_filter_hpo(adapter, institute_obj, case_obj, user_obj):
# GIVEN a case (and its user, institute)
adapter.case_collection.insert_one(case_obj)
adapter.institute_collection.insert_one(institute_obj)
adapter.user_collection.insert_one(user_obj)
# GIVEN no events in the event collection
assert sum(1 for i in adapter.event_collection.find()) == 0
# WHEN calling update function
hpo_clinical_filter = True
updated_case = adapter.update_clinical_filter_hpo(
institute_obj=institute_obj,
case_obj=case_obj,
user_obj=user_obj,
link="update_clinical_filter_hpo_link",
hpo_clinical_filter=hpo_clinical_filter,
)
# THEN hpo_clinical_filter is actually updated
assert updated_case["hpo_clinical_filter"] == hpo_clinical_filter
# THEN an event should have been created
assert sum(1 for i in adapter.event_collection.find()) == 1
def test_filter_stash(adapter, institute_obj, case_obj, user_obj, filter_obj):
# GIVEN a case, institute and user in a store
adapter.case_collection.insert_one(case_obj)
adapter.institute_collection.insert_one(institute_obj)
adapter.user_collection.insert_one(user_obj)
# WHEN asking for filters
institute_id = institute_obj.get("_id")
category = "snv"
filters = adapter.filters(institute_id, category)
# THEN no filters are returned
assert sum(1 for i in filters) == 0
# WHEN no events are yet in the events collection
assert sum(1 for i in adapter.event_collection.find()) == 0
# WHEN storing a filter
filter_id = adapter.stash_filter(
filter_obj, institute_obj, case_obj, user_obj, category, link="mock_link"
)
# THEN a filter id is returned
assert filter_id
# THEN an event can be found in the event collection
assert sum(1 for i in adapter.event_collection.find()) > 0
# WHEN listing filters
filters = adapter.filters(institute_id, category)
# THEN one filter is returned
assert sum(1 for i in filters) == 1
# WHEN retrieving a stored filter
retrieved_filter_obj = adapter.retrieve_filter(filter_id)
# THEN a filter_obj is retireved
assert retrieved_filter_obj
user_id = user_obj.get("_id")
# WHEN deleting filter
result = adapter.delete_filter(filter_id, institute_id, user_id)
# THEN no filters are returned
filters = adapter.filters(institute_id, category)
assert sum(1 for i in filters) == 0
def test_filter_lock(adapter, institute_obj, case_obj, user_obj, filter_obj):
# GIVEN a case, institute and user in a store
adapter.case_collection.insert_one(case_obj)
adapter.institute_collection.insert_one(institute_obj)
adapter.user_collection.insert_one(user_obj)
# WHEN storing a filter
category = "snv"
filter_id = adapter.stash_filter(
filter_obj, institute_obj, case_obj, user_obj, category, link="mock_link"
)
# THEN a filter id is returned
assert filter_id
# THEN an event can be found in the event collection
assert sum(1 for i in adapter.event_collection.find()) > 0
owner_id = "someone@somewhere.se"
# WHEN locking the filter
adapter.lock_filter(filter_id, owner_id)
# WHEN listing filters
institute_id = institute_obj.get("_id")
filters = list(adapter.filters(institute_id, category))
# THEN one filter is returned
assert len(filters) == 1
# THEN the one filter is locked
assert filters[0].get("lock") is True
# WHEN attempting to unlock filter with a non-owner user
adapter.unlock_filter(filter_id, "noone@nowhere.no")
filters = list(adapter.filters(institute_id, category))
# THEN one filter is returned
assert len(filters) == 1
# THEN the one filter is still locked
assert filters[0].get("lock") is True
# WHEN attempting to unlock filter with the owner user
adapter.unlock_filter(filter_id, owner_id)
filters = list(adapter.filters(institute_id, category))
# THEN one filter is returned
assert len(filters) == 1
# THEN the one filter is still locked
assert filters[0].get("lock") is False
def test_filter_audit(adapter, institute_obj, case_obj, user_obj, filter_obj):
# GIVEN a case, institute and user in a store
adapter.case_collection.insert_one(case_obj)
adapter.institute_collection.insert_one(institute_obj)
adapter.user_collection.insert_one(user_obj)
# WHEN storing a filter
category = "snv"
filter_id = adapter.stash_filter(
filter_obj, institute_obj, case_obj, user_obj, category, link="mock_link"
)
# THEN an event can be found in the event collection
n_events_before = sum(1 for i in adapter.event_collection.find())
assert n_events_before > 0
# WHEN marking a filter audited for a case
returned_filter = adapter.audit_filter(
filter_id, institute_obj, case_obj, user_obj, category, link="audit_mock_link"
)
# THEN the operation was successful
assert returned_filter is not None
# THEN another event has been logged for the audit
n_events_after = sum(1 for i in adapter.event_collection.find())
assert n_events_after > n_events_before
def test_update_default_panels(adapter, institute_obj, case_obj, user_obj, testpanel_obj):
adapter.case_collection.insert_one(case_obj)
adapter.institute_collection.insert_one(institute_obj)
adapter.user_collection.insert_one(user_obj)
adapter.panel_collection.insert_one(testpanel_obj)
# GIVEN a case with one gene panel
case_panels = case_obj["panels"]
assert len(case_panels) == 1
panel = case_panels[0]
assert panel["panel_name"] == "panel1"
assert panel["is_default"] is True
new_panel = {
"_id": "an_id",
"panel_id": "an_id",
"panel_name": "panel2",
"display_name": "Test panel2",
"version": 1.0,
"updated_at": datetime.datetime.now(),
"nr_genes": 263,
"is_default": False,
}
case_obj = adapter.case_collection.find_one_and_update(
{"_id": case_obj["_id"]},
{"$addToSet": {"panels": new_panel}},
return_document=pymongo.ReturnDocument.AFTER,
)
assert len(case_obj["panels"]) == 2
# WHEN updating the default panels
updated_case = adapter.update_default_panels(
institute_obj=institute_obj,
case_obj=case_obj,
user_obj=user_obj,
link="update_default_link",
panel_objs=[new_panel],
)
# THEN the the updated case should have panel1 as not default and panel2
# as default
for panel in updated_case["panels"]:
if panel["panel_name"] == "panel1":
assert panel["is_default"] is False
elif panel["panel_name"] == "panel2":
assert panel["is_default"] is True
# assert sum(1 for i in adapter.event_collection.find()) == 2
#
# # THEN the case should not be assigned
# assert updated_case.get('assignees') == []
# # THEN a unassign event should be created
# event = adapter.event_collection.find_one({'verb': 'unassign'})
# assert event['link'] == 'unassignlink'
def test_update_case_group(adapter, institute_obj, case_obj, user_obj):
adapter.case_collection.insert_one(case_obj)
adapter.institute_collection.insert_one(institute_obj)
adapter.user_collection.insert_one(user_obj)
# GIVEN no events in the event collection
assert sum(1 for i in adapter.event_collection.find()) == 0
group_ids = ["1234", "5678"]
# WHEN calling update function
hpo_clinical_filter = True
updated_case = adapter.update_case_group_ids(
institute_obj=institute_obj,
case_obj=case_obj,
user_obj=user_obj,
link="update_case_group_link",
group_ids=group_ids,
)
# THEN group ids are actually updated
assert updated_case["group"] == group_ids
# THEN an event should have been created
assert sum(1 for i in adapter.event_collection.find()) == 1
|
|
#
#
#
# SDL_Pi_WeatherRack.py - Raspberry Pi Python Library for SwitchDoc Labs WeatherRack.
#
# SparkFun Weather Station Meters
# Argent Data Systems
# Created by SwitchDoc Labs February 13, 2015
# Released into the public domain.
# Version 1.3 - remove 300ms Bounce
# Version 2.0 - Update for WeatherPiArduino V2
#
# imports
import sys
import time as time_
sys.path.append('./Adafruit_ADS1x15')
from Adafruit_ADS1x15 import ADS1x15
import RPi.GPIO as GPIO
GPIO.setwarnings(False)
from datetime import *
# constants
SDL_MODE_INTERNAL_AD = 0
SDL_MODE_I2C_ADS1015 = 1
#sample mode means return immediately. THe wind speed is averaged at sampleTime or when you ask, whichever is longer
SDL_MODE_SAMPLE = 0
#Delay mode means to wait for sampleTime and the average after that time.
SDL_MODE_DELAY = 1
WIND_FACTOR = 2.400
# Helper Functions
def fuzzyCompare(compareValue, value):
VARYVALUE = 0.05
if ( (value > (compareValue * (1.0-VARYVALUE))) and (value < (compareValue *(1.0+VARYVALUE))) ):
return True
return False
def voltageToDegrees(value, defaultWindDirection):
# Note: The original documentation for the wind vane says 16 positions. Typically only recieve 8 positions. And 315 degrees was wrong.
# For 5V, use 1.0. For 3.3V use 0.66
ADJUST3OR5 = 0.66
PowerVoltage = 5.0
if (fuzzyCompare(3.84 * ADJUST3OR5, value)):
return 0.0
if (fuzzyCompare(1.98 * ADJUST3OR5, value)):
return 22.5
if (fuzzyCompare(2.25 * ADJUST3OR5, value)):
return 45
if (fuzzyCompare(0.41 * ADJUST3OR5, value)):
return 67.5
if (fuzzyCompare(0.45 * ADJUST3OR5, value)):
return 90.0
if (fuzzyCompare(0.32 * ADJUST3OR5, value)):
return 112.5
if (fuzzyCompare(0.90 * ADJUST3OR5, value)):
return 135.0
if (fuzzyCompare(0.62 * ADJUST3OR5, value)):
return 157.5
if (fuzzyCompare(1.40 * ADJUST3OR5, value)):
return 180
if (fuzzyCompare(1.19 * ADJUST3OR5, value)):
return 202.5
if (fuzzyCompare(3.08 * ADJUST3OR5, value)):
return 225
if (fuzzyCompare(2.93 * ADJUST3OR5, value)):
return 247.5
if (fuzzyCompare(4.62 * ADJUST3OR5, value)):
return 270.0
if (fuzzyCompare(4.04 * ADJUST3OR5, value)):
return 292.5
if (fuzzyCompare(4.34 * ADJUST3OR5, value)): # chart in manufacturers documentation wrong
return 315.0
if (fuzzyCompare(3.43 * ADJUST3OR5, value)):
return 337.5
return defaultWindDirection # return previous value if not found
# return current microseconds
def micros():
microseconds = int(round(time_.time() * 1000000))
return microseconds
class SDL_Pi_WeatherRack:
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
# instance variables
_currentWindCount = 0
_currentRainCount = 0
_shortestWindTime = 0
_pinAnem = 0
_pinRain = 0
_intAnem = 0
_intRain = 0
_ADChannel = 0
_ADMode = 0
_currentRainCount = 0
_currentWindCount = 0
_currentWindSpeed = 0.0
_currentWindDirection = 0.0
_lastWindTime = 0
_shortestWindTime = 0
_sampleTime = 5.0
_selectedMode = SDL_MODE_SAMPLE
_startSampleTime = 0
_currentRainMin = 0
_lastRainTime = 0
_ads1015 = 0
def __init__(self, pinAnem, pinRain, intAnem, intRain, ADMode ):
GPIO.setup(pinAnem, GPIO.IN)
GPIO.setup(pinRain, GPIO.IN)
# when a falling edge is detected on port pinAnem, regardless of whatever
# else is happening in the program, the function callback will be run
GPIO.add_event_detect(pinAnem, GPIO.RISING, callback=self.serviceInterruptAnem )
GPIO.add_event_detect(pinRain, GPIO.RISING, callback=self.serviceInterruptRain )
ADS1015 = 0x00 # 12-bit ADC
ADS1115 = 0x01 # 16-bit ADC
# Select the gain
self.gain = 6144 # +/- 6.144V
#self.gain = 4096 # +/- 4.096V
# Select the sample rate
self.sps = 250 # 250 samples per second
# Initialise the ADC using the default mode (use default I2C address)
# Set this to ADS1015 or ADS1115 depending on the ADC you are using!
self.ads1015 = ADS1x15(ic=ADS1015, address=0x48)
# determine if device present
try:
value = self.ads1015.readRaw(1, self.gain, self.sps) # AIN1 wired to wind vane on WeatherPiArduino
time_.sleep(1.0)
value = self.ads1015.readRaw(1, self.gain, self.sps) # AIN1 wired to wind vane on WeatherPiArduino
# now figure out if it is an ADS1015 or ADS1115
if ((0x0F & value) == 0):
# check again (1 out 16 chance of zero)
value = self.ads1015.readRaw(0, self.gain, self.sps) # AIN1 wired to wind vane on WeatherPiArduino
if ((0x0F & value) != 0):
self.ads1015 = ADS1x15(ic=ADS1115, address=0x48)
else:
self.ads1015 = ADS1x15(ic=ADS1115, address=0x48)
except TypeError as e:
print "Type Error"
SDL_Pi_WeatherRack._ADMode = ADMode
# Wind Direction Routines
def current_wind_direction(self):
if (SDL_Pi_WeatherRack._ADMode == SDL_MODE_I2C_ADS1015):
value = self.ads1015.readADCSingleEnded(1, self.gain, self.sps) # AIN1 wired to wind vane on WeatherPiArduino
voltageValue = value/1000
else:
# user internal A/D converter
voltageValue = 0.0
direction = voltageToDegrees(voltageValue, SDL_Pi_WeatherRack._currentWindDirection)
return direction;
def current_wind_direction_voltage(self):
if (SDL_Pi_WeatherRack._ADMode == SDL_MODE_I2C_ADS1015):
value = self.ads1015.readADCSingleEnded(1, self.gain, self.sps) # AIN1 wired to wind vane on WeatherPiArduino
voltageValue = value/1000
else:
# user internal A/D converter
voltageValue = 0.0
return voltageValue
# Utility methods
def reset_rain_total(self):
SDL_Pi_WeatherRack._currentRainCount = 0;
def accessInternalCurrentWindDirection(self):
return SDL_Pi_WeatherRack._currentWindDirection;
def reset_wind_gust(self):
SDL_Pi_WeatherRack._shortestWindTime = 0xffffffff;
def startWindSample(self, sampleTime):
SDL_Pi_WeatherRack._startSampleTime = micros();
SDL_Pi_WeatherRack._sampleTime = sampleTime;
# get current wind
def get_current_wind_speed_when_sampling(self):
compareValue = SDL_Pi_WeatherRack._sampleTime*1000000;
if (micros() - SDL_Pi_WeatherRack._startSampleTime >= compareValue):
# sample time exceeded, calculate currentWindSpeed
timeSpan = (micros() - SDL_Pi_WeatherRack._startSampleTime);
SDL_Pi_WeatherRack._currentWindSpeed = (float(SDL_Pi_WeatherRack._currentWindCount)/float(timeSpan)) * WIND_FACTOR*1000000.0
#print "SDL_CWS = %f, SDL_Pi_WeatherRack._shortestWindTime = %i, CWCount=%i TPS=%f" % (SDL_Pi_WeatherRack._currentWindSpeed,SDL_Pi_WeatherRack._shortestWindTime, SDL_Pi_WeatherRack._currentWindCount, float(SDL_Pi_WeatherRack._currentWindCount)/float(SDL_Pi_WeatherRack._sampleTime))
SDL_Pi_WeatherRack._currentWindCount = 0
SDL_Pi_WeatherRack._startSampleTime = micros()
#print "SDL_Pi_WeatherRack._currentWindSpeed=", SDL_Pi_WeatherRack._currentWindSpeed
return SDL_Pi_WeatherRack._currentWindSpeed
def setWindMode(self, selectedMode, sampleTime): # time in seconds
SDL_Pi_WeatherRack._sampleTime = sampleTime;
SDL_Pi_WeatherRack._selectedMode = selectedMode;
if (SDL_Pi_WeatherRack._selectedMode == SDL_MODE_SAMPLE):
self.startWindSample(SDL_Pi_WeatherRack._sampleTime);
#def get current values
def get_current_rain_total(self):
rain_amount = 0.2794 * float(SDL_Pi_WeatherRack._currentRainCount)
SDL_Pi_WeatherRack._currentRainCount = 0;
return rain_amount;
def current_wind_speed(self): # in milliseconds
if (SDL_Pi_WeatherRack._selectedMode == SDL_MODE_SAMPLE):
SDL_Pi_WeatherRack._currentWindSpeed = self.get_current_wind_speed_when_sampling();
else:
# km/h * 1000 msec
SDL_Pi_WeatherRack._currentWindCount = 0;
delay(SDL_Pi_WeatherRack._sampleTime*1000);
SDL_Pi_WeatherRack._currentWindSpeed = (float(SDL_Pi_WeatherRack._currentWindCount)/float(SDL_Pi_WeatherRack._sampleTime)) * WIND_FACTOR;
return SDL_Pi_WeatherRack._currentWindSpeed;
def get_wind_gust(self):
latestTime =SDL_Pi_WeatherRack._shortestWindTime;
SDL_Pi_WeatherRack._shortestWindTime=0xffffffff;
time=latestTime/1000000.0; # in microseconds
if (time == 0):
return 0
else:
return (1.0/float(time))*WIND_FACTOR;
# Interrupt Routines
def serviceInterruptAnem(self,channel):
#print "Anem Interrupt Service Routine"
currentTime= (micros()-SDL_Pi_WeatherRack._lastWindTime);
SDL_Pi_WeatherRack._lastWindTime=micros();
if(currentTime>1000): # debounce
SDL_Pi_WeatherRack._currentWindCount = SDL_Pi_WeatherRack._currentWindCount+1
if(currentTime<SDL_Pi_WeatherRack._shortestWindTime):
SDL_Pi_WeatherRack._shortestWindTime=currentTime;
def serviceInterruptRain(self,channel):
#print "Rain Interrupt Service Routine"
currentTime=(micros()-SDL_Pi_WeatherRack._lastRainTime);
SDL_Pi_WeatherRack._lastRainTime=micros();
if(currentTime>500): # debounce
SDL_Pi_WeatherRack._currentRainCount = SDL_Pi_WeatherRack._currentRainCount+1
if(currentTime<SDL_Pi_WeatherRack._currentRainMin):
SDL_Pi_WeatherRack._currentRainMin=currentTime;
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RouteTablesOperations:
"""RouteTablesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2015_06_15.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
route_table_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-06-15"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
route_table_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def get(
self,
resource_group_name: str,
route_table_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.RouteTable":
"""Gets the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteTable, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2015_06_15.models.RouteTable
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-06-15"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
route_table_name: str,
parameters: "_models.RouteTable",
**kwargs: Any
) -> "_models.RouteTable":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-06-15"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'RouteTable')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteTable', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
route_table_name: str,
parameters: "_models.RouteTable",
**kwargs: Any
) -> AsyncLROPoller["_models.RouteTable"]:
"""Create or updates a route table in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param parameters: Parameters supplied to the create or update route table operation.
:type parameters: ~azure.mgmt.network.v2015_06_15.models.RouteTable
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteTable or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2015_06_15.models.RouteTable]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.RouteTableListResult"]:
"""Gets all route tables in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteTableListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2015_06_15.models.RouteTableListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTableListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-06-15"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteTableListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.RouteTableListResult"]:
"""Gets all route tables in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteTableListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2015_06_15.models.RouteTableListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTableListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-06-15"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteTableListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeTables'} # type: ignore
|
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from functools import wraps
import os
import contextlib
from airflow import settings
from airflow.utils.log.logging_mixin import LoggingMixin
log = LoggingMixin().log
@contextlib.contextmanager
def create_session():
"""
Contextmanager that will create and teardown a session.
"""
session = settings.Session()
try:
yield session
session.expunge_all()
session.commit()
except:
session.rollback()
raise
finally:
session.close()
def provide_session(func):
"""
Function decorator that provides a session if it isn't provided.
If you want to reuse a session or run the function as part of a
database transaction, you pass it to the function, if not this wrapper
will create one and close it for you.
"""
@wraps(func)
def wrapper(*args, **kwargs):
arg_session = 'session'
func_params = func.__code__.co_varnames
session_in_args = arg_session in func_params and \
func_params.index(arg_session) < len(args)
session_in_kwargs = arg_session in kwargs
if session_in_kwargs or session_in_args:
return func(*args, **kwargs)
else:
with create_session() as session:
kwargs[arg_session] = session
return func(*args, **kwargs)
return wrapper
@provide_session
def merge_conn(conn, session=None):
from airflow import models
C = models.Connection
if not session.query(C).filter(C.conn_id == conn.conn_id).first():
session.add(conn)
session.commit()
def initdb():
session = settings.Session()
from airflow import models
upgradedb()
merge_conn(
models.Connection(
conn_id='airflow_db', conn_type='mysql',
host='localhost', login='root', password='',
schema='airflow'))
merge_conn(
models.Connection(
conn_id='airflow_ci', conn_type='mysql',
host='localhost', login='root', extra="{\"local_infile\": true}",
schema='airflow_ci'))
merge_conn(
models.Connection(
conn_id='beeline_default', conn_type='beeline', port="10000",
host='localhost', extra="{\"use_beeline\": true, \"auth\": \"\"}",
schema='default'))
merge_conn(
models.Connection(
conn_id='bigquery_default', conn_type='google_cloud_platform',
schema='default'))
merge_conn(
models.Connection(
conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow', password='airflow',
schema='airflow'))
merge_conn(
models.Connection(
conn_id='presto_default', conn_type='presto',
host='localhost',
schema='hive', port=3400))
merge_conn(
models.Connection(
conn_id='google_cloud_default', conn_type='google_cloud_platform',
schema='default',))
merge_conn(
models.Connection(
conn_id='hive_cli_default', conn_type='hive_cli',
schema='default',))
merge_conn(
models.Connection(
conn_id='hiveserver2_default', conn_type='hiveserver2',
host='localhost',
schema='default', port=10000))
merge_conn(
models.Connection(
conn_id='metastore_default', conn_type='hive_metastore',
host='localhost', extra="{\"authMechanism\": \"PLAIN\"}",
port=9083))
merge_conn(
models.Connection(
conn_id='mysql_default', conn_type='mysql',
login='root',
host='localhost'))
merge_conn(
models.Connection(
conn_id='postgres_default', conn_type='postgres',
login='postgres',
schema='airflow',
host='localhost'))
merge_conn(
models.Connection(
conn_id='sqlite_default', conn_type='sqlite',
host='/tmp/sqlite_default.db'))
merge_conn(
models.Connection(
conn_id='http_default', conn_type='http',
host='https://www.google.com/'))
merge_conn(
models.Connection(
conn_id='mssql_default', conn_type='mssql',
host='localhost', port=1433))
merge_conn(
models.Connection(
conn_id='vertica_default', conn_type='vertica',
host='localhost', port=5433))
merge_conn(
models.Connection(
conn_id='wasb_default', conn_type='wasb',
extra='{"sas_token": null}'))
merge_conn(
models.Connection(
conn_id='webhdfs_default', conn_type='hdfs',
host='localhost', port=50070))
merge_conn(
models.Connection(
conn_id='ssh_default', conn_type='ssh',
host='localhost'))
merge_conn(
models.Connection(
conn_id='fs_default', conn_type='fs',
extra='{"path": "/"}'))
merge_conn(
models.Connection(
conn_id='aws_default', conn_type='aws',
extra='{"region_name": "us-east-1"}'))
merge_conn(
models.Connection(
conn_id='spark_default', conn_type='spark',
host='yarn', extra='{"queue": "root.default"}'))
merge_conn(
models.Connection(
conn_id='druid_ingest_default', conn_type='druid',
host='druid-overlord', port=8081, extra='{"endpoint": "druid/indexer/v1/task"}'))
merge_conn(
models.Connection(
conn_id='redis_default', conn_type='redis',
host='localhost', port=6379,
extra='{"db": 0}'))
merge_conn(
models.Connection(
conn_id='sqoop_default', conn_type='sqoop',
host='rmdbs', extra=''))
merge_conn(
models.Connection(
conn_id='emr_default', conn_type='emr',
extra='''
{ "Name": "default_job_flow_name",
"LogUri": "s3://my-emr-log-bucket/default_job_flow_location",
"ReleaseLabel": "emr-4.6.0",
"Instances": {
"InstanceGroups": [
{
"Name": "Master nodes",
"Market": "ON_DEMAND",
"InstanceRole": "MASTER",
"InstanceType": "r3.2xlarge",
"InstanceCount": 1
},
{
"Name": "Slave nodes",
"Market": "ON_DEMAND",
"InstanceRole": "CORE",
"InstanceType": "r3.2xlarge",
"InstanceCount": 1
}
]
},
"Ec2KeyName": "mykey",
"KeepJobFlowAliveWhenNoSteps": false,
"TerminationProtected": false,
"Ec2SubnetId": "somesubnet",
"Applications":[
{ "Name": "Spark" }
],
"VisibleToAllUsers": true,
"JobFlowRole": "EMR_EC2_DefaultRole",
"ServiceRole": "EMR_DefaultRole",
"Tags": [
{
"Key": "app",
"Value": "analytics"
},
{
"Key": "environment",
"Value": "development"
}
]
}
'''))
merge_conn(
models.Connection(
conn_id='databricks_default', conn_type='databricks',
host='localhost'))
merge_conn(
models.Connection(
conn_id='qubole_default', conn_type='qubole',
host= 'localhost'))
# Known event types
KET = models.KnownEventType
if not session.query(KET).filter(KET.know_event_type == 'Holiday').first():
session.add(KET(know_event_type='Holiday'))
if not session.query(KET).filter(KET.know_event_type == 'Outage').first():
session.add(KET(know_event_type='Outage'))
if not session.query(KET).filter(
KET.know_event_type == 'Natural Disaster').first():
session.add(KET(know_event_type='Natural Disaster'))
if not session.query(KET).filter(
KET.know_event_type == 'Marketing Campaign').first():
session.add(KET(know_event_type='Marketing Campaign'))
session.commit()
dagbag = models.DagBag()
# Save individual DAGs in the ORM
for dag in dagbag.dags.values():
dag.sync_to_db()
# Deactivate the unknown ones
models.DAG.deactivate_unknown_dags(dagbag.dags.keys())
Chart = models.Chart
chart_label = "Airflow task instance by type"
chart = session.query(Chart).filter(Chart.label == chart_label).first()
if not chart:
chart = Chart(
label=chart_label,
conn_id='airflow_db',
chart_type='bar',
x_is_date=False,
sql=(
"SELECT state, COUNT(1) as number "
"FROM task_instance "
"WHERE dag_id LIKE 'example%' "
"GROUP BY state"),
)
session.add(chart)
session.commit()
def upgradedb():
# alembic adds significant import time, so we import it lazily
from alembic import command
from alembic.config import Config
log.info("Creating tables")
current_dir = os.path.dirname(os.path.abspath(__file__))
package_dir = os.path.normpath(os.path.join(current_dir, '..'))
directory = os.path.join(package_dir, 'migrations')
config = Config(os.path.join(package_dir, 'alembic.ini'))
config.set_main_option('script_location', directory)
config.set_main_option('sqlalchemy.url', settings.SQL_ALCHEMY_CONN)
command.upgrade(config, 'heads')
def resetdb():
'''
Clear out the database
'''
from airflow import models
# alembic adds significant import time, so we import it lazily
from alembic.migration import MigrationContext
log.info("Dropping tables that exist")
models.Base.metadata.drop_all(settings.engine)
mc = MigrationContext.configure(settings.engine)
if mc._version.exists(settings.engine):
mc._version.drop(settings.engine)
initdb()
|
|
"""
Oracle database backend for Django.
Requires cx_Oracle: http://cx-oracle.sourceforge.net/
"""
import datetime
import os
import sys
import time
from decimal import Decimal
# Oracle takes client-side character set encoding from the environment.
os.environ['NLS_LANG'] = '.UTF8'
# This prevents unicode from getting mangled by getting encoded into the
# potentially non-unicode database character set.
os.environ['ORA_NCHAR_LITERAL_REPLACE'] = 'TRUE'
try:
import cx_Oracle as Database
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading cx_Oracle module: %s" % e)
from django.db import utils
from django.db.backends import *
from django.db.backends.signals import connection_created
from django.db.backends.oracle.client import DatabaseClient
from django.db.backends.oracle.creation import DatabaseCreation
from django.db.backends.oracle.introspection import DatabaseIntrospection
from django.utils.encoding import smart_str, force_unicode
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
# Check whether cx_Oracle was compiled with the WITH_UNICODE option. This will
# also be True in Python 3.0.
if int(Database.version.split('.', 1)[0]) >= 5 and not hasattr(Database, 'UNICODE'):
convert_unicode = force_unicode
else:
convert_unicode = smart_str
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = ()
needs_datetime_string_cast = False
interprets_empty_strings_as_nulls = True
uses_savepoints = True
can_return_id_from_insert = True
allow_sliced_subqueries = False
supports_subqueries_in_group_by = False
supports_timezones = False
supports_bitwise_or = False
can_defer_constraint_checks = True
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.oracle.compiler"
def autoinc_sql(self, table, column):
# To simulate auto-incrementing primary keys in Oracle, we have to
# create a sequence and a trigger.
sq_name = get_sequence_name(table)
tr_name = get_trigger_name(table)
tbl_name = self.quote_name(table)
col_name = self.quote_name(column)
sequence_sql = """
DECLARE
i INTEGER;
BEGIN
SELECT COUNT(*) INTO i FROM USER_CATALOG
WHERE TABLE_NAME = '%(sq_name)s' AND TABLE_TYPE = 'SEQUENCE';
IF i = 0 THEN
EXECUTE IMMEDIATE 'CREATE SEQUENCE "%(sq_name)s"';
END IF;
END;
/""" % locals()
trigger_sql = """
CREATE OR REPLACE TRIGGER "%(tr_name)s"
BEFORE INSERT ON %(tbl_name)s
FOR EACH ROW
WHEN (new.%(col_name)s IS NULL)
BEGIN
SELECT "%(sq_name)s".nextval
INTO :new.%(col_name)s FROM dual;
END;
/""" % locals()
return sequence_sql, trigger_sql
def date_extract_sql(self, lookup_type, field_name):
# http://download-east.oracle.com/docs/cd/B10501_01/server.920/a96540/functions42a.htm#1017163
if lookup_type == 'week_day':
# TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday.
return "TO_CHAR(%s, 'D')" % field_name
else:
return "EXTRACT(%s FROM %s)" % (lookup_type, field_name)
def date_trunc_sql(self, lookup_type, field_name):
# Oracle uses TRUNC() for both dates and numbers.
# http://download-east.oracle.com/docs/cd/B10501_01/server.920/a96540/functions155a.htm#SQLRF06151
if lookup_type == 'day':
sql = 'TRUNC(%s)' % field_name
else:
sql = "TRUNC(%s, '%s')" % (field_name, lookup_type)
return sql
def convert_values(self, value, field):
if isinstance(value, Database.LOB):
value = value.read()
if field and field.get_internal_type() == 'TextField':
value = force_unicode(value)
# Oracle stores empty strings as null. We need to undo this in
# order to adhere to the Django convention of using the empty
# string instead of null, but only if the field accepts the
# empty string.
if value is None and field and field.empty_strings_allowed:
value = u''
# Convert 1 or 0 to True or False
elif value in (1, 0) and field and field.get_internal_type() in ('BooleanField', 'NullBooleanField'):
value = bool(value)
# Force floats to the correct type
elif value is not None and field and field.get_internal_type() == 'FloatField':
value = float(value)
# Convert floats to decimals
elif value is not None and field and field.get_internal_type() == 'DecimalField':
value = util.typecast_decimal(field.format_number(value))
# cx_Oracle always returns datetime.datetime objects for
# DATE and TIMESTAMP columns, but Django wants to see a
# python datetime.date, .time, or .datetime. We use the type
# of the Field to determine which to cast to, but it's not
# always available.
# As a workaround, we cast to date if all the time-related
# values are 0, or to time if the date is 1/1/1900.
# This could be cleaned a bit by adding a method to the Field
# classes to normalize values from the database (the to_python
# method is used for validation and isn't what we want here).
elif isinstance(value, Database.Timestamp):
# In Python 2.3, the cx_Oracle driver returns its own
# Timestamp object that we must convert to a datetime class.
if not isinstance(value, datetime.datetime):
value = datetime.datetime(value.year, value.month,
value.day, value.hour, value.minute, value.second,
value.fsecond)
if field and field.get_internal_type() == 'DateTimeField':
pass
elif field and field.get_internal_type() == 'DateField':
value = value.date()
elif field and field.get_internal_type() == 'TimeField' or (value.year == 1900 and value.month == value.day == 1):
value = value.time()
elif value.hour == value.minute == value.second == value.microsecond == 0:
value = value.date()
return value
def datetime_cast_sql(self):
return "TO_TIMESTAMP(%s, 'YYYY-MM-DD HH24:MI:SS.FF')"
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def drop_sequence_sql(self, table):
return "DROP SEQUENCE %s;" % self.quote_name(get_sequence_name(table))
def fetch_returned_insert_id(self, cursor):
return long(cursor._insert_id_var.getvalue())
def field_cast_sql(self, db_type):
if db_type and db_type.endswith('LOB'):
return "DBMS_LOB.SUBSTR(%s)"
else:
return "%s"
def last_insert_id(self, cursor, table_name, pk_name):
sq_name = get_sequence_name(table_name)
cursor.execute('SELECT "%s".currval FROM dual' % sq_name)
return cursor.fetchone()[0]
def lookup_cast(self, lookup_type):
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
return "UPPER(%s)"
return "%s"
def max_in_list_size(self):
return 1000
def max_name_length(self):
return 30
def prep_for_iexact_query(self, x):
return x
def process_clob(self, value):
if value is None:
return u''
return force_unicode(value.read())
def quote_name(self, name):
# SQL92 requires delimited (quoted) names to be case-sensitive. When
# not quoted, Oracle has case-insensitive behavior for identifiers, but
# always defaults to uppercase.
# We simplify things by making Oracle identifiers always uppercase.
if not name.startswith('"') and not name.endswith('"'):
name = '"%s"' % util.truncate_name(name.upper(),
self.max_name_length())
return name.upper()
def random_function_sql(self):
return "DBMS_RANDOM.RANDOM"
def regex_lookup_9(self, lookup_type):
raise NotImplementedError("Regexes are not supported in Oracle before version 10g.")
def regex_lookup_10(self, lookup_type):
if lookup_type == 'regex':
match_option = "'c'"
else:
match_option = "'i'"
return 'REGEXP_LIKE(%%s, %%s, %s)' % match_option
def regex_lookup(self, lookup_type):
# If regex_lookup is called before it's been initialized, then create
# a cursor to initialize it and recur.
from django.db import connection
connection.cursor()
return connection.ops.regex_lookup(lookup_type)
def return_insert_id(self):
return "RETURNING %s INTO %%s", (InsertIdVar(),)
def savepoint_create_sql(self, sid):
return convert_unicode("SAVEPOINT " + self.quote_name(sid))
def savepoint_rollback_sql(self, sid):
return convert_unicode("ROLLBACK TO SAVEPOINT " + self.quote_name(sid))
def sql_flush(self, style, tables, sequences):
# Return a list of 'TRUNCATE x;', 'TRUNCATE y;',
# 'TRUNCATE z;'... style SQL statements
if tables:
# Oracle does support TRUNCATE, but it seems to get us into
# FK referential trouble, whereas DELETE FROM table works.
sql = ['%s %s %s;' % \
(style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table)))
for table in tables]
# Since we've just deleted all the rows, running our sequence
# ALTER code will reset the sequence to 0.
for sequence_info in sequences:
sequence_name = get_sequence_name(sequence_info['table'])
table_name = self.quote_name(sequence_info['table'])
column_name = self.quote_name(sequence_info['column'] or 'id')
query = _get_sequence_reset_sql() % {'sequence': sequence_name,
'table': table_name,
'column': column_name}
sql.append(query)
return sql
else:
return []
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
query = _get_sequence_reset_sql()
for model in model_list:
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
table_name = self.quote_name(model._meta.db_table)
sequence_name = get_sequence_name(model._meta.db_table)
column_name = self.quote_name(f.column)
output.append(query % {'sequence': sequence_name,
'table': table_name,
'column': column_name})
# Only one AutoField is allowed per model, so don't
# continue to loop
break
for f in model._meta.many_to_many:
if not f.rel.through:
table_name = self.quote_name(f.m2m_db_table())
sequence_name = get_sequence_name(f.m2m_db_table())
column_name = self.quote_name('id')
output.append(query % {'sequence': sequence_name,
'table': table_name,
'column': column_name})
return output
def start_transaction_sql(self):
return ''
def tablespace_sql(self, tablespace, inline=False):
return "%sTABLESPACE %s" % ((inline and "USING INDEX " or ""),
self.quote_name(tablespace))
def value_to_db_datetime(self, value):
# Oracle doesn't support tz-aware datetimes
if getattr(value, 'tzinfo', None) is not None:
raise ValueError("Oracle backend does not support timezone-aware datetimes.")
return super(DatabaseOperations, self).value_to_db_datetime(value)
def value_to_db_time(self, value):
if value is None:
return None
if isinstance(value, basestring):
return datetime.datetime(*(time.strptime(value, '%H:%M:%S')[:6]))
# Oracle doesn't support tz-aware datetimes
if value.tzinfo is not None:
raise ValueError("Oracle backend does not support timezone-aware datetimes.")
return datetime.datetime(1900, 1, 1, value.hour, value.minute,
value.second, value.microsecond)
def year_lookup_bounds_for_date_field(self, value):
first = '%s-01-01'
second = '%s-12-31'
return [first % value, second % value]
def combine_expression(self, connector, sub_expressions):
"Oracle requires special cases for %% and & operators in query expressions"
if connector == '%%':
return 'MOD(%s)' % ','.join(sub_expressions)
elif connector == '&':
return 'BITAND(%s)' % ','.join(sub_expressions)
elif connector == '|':
raise NotImplementedError("Bit-wise or is not supported in Oracle.")
return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'oracle'
operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'icontains': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'endswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'istartswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'iendswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
}
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.oracle_version = None
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations()
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def _valid_connection(self):
return self.connection is not None
def _connect_string(self):
settings_dict = self.settings_dict
if not settings_dict['HOST'].strip():
settings_dict['HOST'] = 'localhost'
if settings_dict['PORT'].strip():
dsn = Database.makedsn(settings_dict['HOST'],
int(settings_dict['PORT']),
settings_dict['NAME'])
else:
dsn = settings_dict['NAME']
return "%s/%s@%s" % (settings_dict['USER'],
settings_dict['PASSWORD'], dsn)
def _cursor(self):
cursor = None
if not self._valid_connection():
conn_string = convert_unicode(self._connect_string())
self.connection = Database.connect(conn_string, **self.settings_dict['OPTIONS'])
cursor = FormatStylePlaceholderCursor(self.connection)
# Set oracle date to ansi date format. This only needs to execute
# once when we create a new connection. We also set the Territory
# to 'AMERICA' which forces Sunday to evaluate to a '1' in TO_CHAR().
cursor.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS' "
"NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF' "
"NLS_TERRITORY = 'AMERICA'")
try:
self.oracle_version = int(self.connection.version.split('.')[0])
# There's no way for the DatabaseOperations class to know the
# currently active Oracle version, so we do some setups here.
# TODO: Multi-db support will need a better solution (a way to
# communicate the current version).
if self.oracle_version <= 9:
self.ops.regex_lookup = self.ops.regex_lookup_9
else:
self.ops.regex_lookup = self.ops.regex_lookup_10
except ValueError:
pass
try:
self.connection.stmtcachesize = 20
except:
# Django docs specify cx_Oracle version 4.3.1 or higher, but
# stmtcachesize is available only in 4.3.2 and up.
pass
connection_created.send(sender=self.__class__, connection=self)
if not cursor:
cursor = FormatStylePlaceholderCursor(self.connection)
return cursor
# Oracle doesn't support savepoint commits. Ignore them.
def _savepoint_commit(self, sid):
pass
def _commit(self):
if self.connection is not None:
try:
return self.connection.commit()
except Database.IntegrityError, e:
# In case cx_Oracle implements (now or in a future version)
# raising this specific exception
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.DatabaseError, e:
# cx_Oracle 5.0.4 raises a cx_Oracle.DatabaseError exception
# with the following attributes and values:
# code = 2091
# message = 'ORA-02091: transaction rolled back
# 'ORA-02291: integrity constraint (TEST_DJANGOTEST.SYS
# _C00102056) violated - parent key not found'
# We convert that particular case to our IntegrityError exception
x = e.args[0]
if hasattr(x, 'code') and hasattr(x, 'message') \
and x.code == 2091 and 'ORA-02291' in x.message:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
class OracleParam(object):
"""
Wrapper object for formatting parameters for Oracle. If the string
representation of the value is large enough (greater than 4000 characters)
the input size needs to be set as CLOB. Alternatively, if the parameter
has an `input_size` attribute, then the value of the `input_size` attribute
will be used instead. Otherwise, no input size will be set for the
parameter when executing the query.
"""
def __init__(self, param, cursor, strings_only=False):
if hasattr(param, 'bind_parameter'):
self.smart_str = param.bind_parameter(cursor)
else:
self.smart_str = convert_unicode(param, cursor.charset,
strings_only)
if hasattr(param, 'input_size'):
# If parameter has `input_size` attribute, use that.
self.input_size = param.input_size
elif isinstance(param, basestring) and len(param) > 4000:
# Mark any string param greater than 4000 characters as a CLOB.
self.input_size = Database.CLOB
else:
self.input_size = None
class VariableWrapper(object):
"""
An adapter class for cursor variables that prevents the wrapped object
from being converted into a string when used to instanciate an OracleParam.
This can be used generally for any other object that should be passed into
Cursor.execute as-is.
"""
def __init__(self, var):
self.var = var
def bind_parameter(self, cursor):
return self.var
def __getattr__(self, key):
return getattr(self.var, key)
def __setattr__(self, key, value):
if key == 'var':
self.__dict__[key] = value
else:
setattr(self.var, key, value)
class InsertIdVar(object):
"""
A late-binding cursor variable that can be passed to Cursor.execute
as a parameter, in order to receive the id of the row created by an
insert statement.
"""
def bind_parameter(self, cursor):
param = cursor.cursor.var(Database.NUMBER)
cursor._insert_id_var = param
return param
class FormatStylePlaceholderCursor(object):
"""
Django uses "format" (e.g. '%s') style placeholders, but Oracle uses ":var"
style. This fixes it -- but note that if you want to use a literal "%s" in
a query, you'll need to use "%%s".
We also do automatic conversion between Unicode on the Python side and
UTF-8 -- for talking to Oracle -- in here.
"""
charset = 'utf-8'
def __init__(self, connection):
self.cursor = connection.cursor()
# Necessary to retrieve decimal values without rounding error.
self.cursor.numbersAsStrings = True
# Default arraysize of 1 is highly sub-optimal.
self.cursor.arraysize = 100
def _format_params(self, params):
return tuple([OracleParam(p, self, True) for p in params])
def _guess_input_sizes(self, params_list):
sizes = [None] * len(params_list[0])
for params in params_list:
for i, value in enumerate(params):
if value.input_size:
sizes[i] = value.input_size
self.setinputsizes(*sizes)
def _param_generator(self, params):
return [p.smart_str for p in params]
def execute(self, query, params=None):
if params is None:
params = []
else:
params = self._format_params(params)
args = [(':arg%d' % i) for i in range(len(params))]
# cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it
# it does want a trailing ';' but not a trailing '/'. However, these
# characters must be included in the original query in case the query
# is being passed to SQL*Plus.
if query.endswith(';') or query.endswith('/'):
query = query[:-1]
query = convert_unicode(query % tuple(args), self.charset)
self._guess_input_sizes([params])
try:
return self.cursor.execute(query, self._param_generator(params))
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.DatabaseError, e:
# cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400.
if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError):
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def executemany(self, query, params=None):
try:
args = [(':arg%d' % i) for i in range(len(params[0]))]
except (IndexError, TypeError):
# No params given, nothing to do
return None
# cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it
# it does want a trailing ';' but not a trailing '/'. However, these
# characters must be included in the original query in case the query
# is being passed to SQL*Plus.
if query.endswith(';') or query.endswith('/'):
query = query[:-1]
query = convert_unicode(query % tuple(args), self.charset)
formatted = [self._format_params(i) for i in params]
self._guess_input_sizes(formatted)
try:
return self.cursor.executemany(query,
[self._param_generator(p) for p in formatted])
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.DatabaseError, e:
# cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400.
if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError):
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def fetchone(self):
row = self.cursor.fetchone()
if row is None:
return row
return _rowfactory(row, self.cursor)
def fetchmany(self, size=None):
if size is None:
size = self.arraysize
return tuple([_rowfactory(r, self.cursor)
for r in self.cursor.fetchmany(size)])
def fetchall(self):
return tuple([_rowfactory(r, self.cursor)
for r in self.cursor.fetchall()])
def var(self, *args):
return VariableWrapper(self.cursor.var(*args))
def arrayvar(self, *args):
return VariableWrapper(self.cursor.arrayvar(*args))
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return CursorIterator(self.cursor)
class CursorIterator(object):
"""Cursor iterator wrapper that invokes our custom row factory."""
def __init__(self, cursor):
self.cursor = cursor
self.iter = iter(cursor)
def __iter__(self):
return self
def next(self):
return _rowfactory(self.iter.next(), self.cursor)
def _rowfactory(row, cursor):
# Cast numeric values as the appropriate Python type based upon the
# cursor description, and convert strings to unicode.
casted = []
for value, desc in zip(row, cursor.description):
if value is not None and desc[1] is Database.NUMBER:
precision, scale = desc[4:6]
if scale == -127:
if precision == 0:
# NUMBER column: decimal-precision floating point
# This will normally be an integer from a sequence,
# but it could be a decimal value.
if '.' in value:
value = Decimal(value)
else:
value = int(value)
else:
# FLOAT column: binary-precision floating point.
# This comes from FloatField columns.
value = float(value)
elif precision > 0:
# NUMBER(p,s) column: decimal-precision fixed point.
# This comes from IntField and DecimalField columns.
if scale == 0:
value = int(value)
else:
value = Decimal(value)
elif '.' in value:
# No type information. This normally comes from a
# mathematical expression in the SELECT list. Guess int
# or Decimal based on whether it has a decimal point.
value = Decimal(value)
else:
value = int(value)
elif desc[1] in (Database.STRING, Database.FIXED_CHAR,
Database.LONG_STRING):
value = to_unicode(value)
casted.append(value)
return tuple(casted)
def to_unicode(s):
"""
Convert strings to Unicode objects (and return all other data types
unchanged).
"""
if isinstance(s, basestring):
return force_unicode(s)
return s
def _get_sequence_reset_sql():
# TODO: colorize this SQL code with style.SQL_KEYWORD(), etc.
return """
DECLARE
table_value integer;
seq_value integer;
BEGIN
SELECT NVL(MAX(%(column)s), 0) INTO table_value FROM %(table)s;
SELECT NVL(last_number - cache_size, 0) INTO seq_value FROM user_sequences
WHERE sequence_name = '%(sequence)s';
WHILE table_value > seq_value LOOP
SELECT "%(sequence)s".nextval INTO seq_value FROM dual;
END LOOP;
END;
/"""
def get_sequence_name(table):
name_length = DatabaseOperations().max_name_length() - 3
return '%s_SQ' % util.truncate_name(table, name_length).upper()
def get_trigger_name(table):
name_length = DatabaseOperations().max_name_length() - 3
return '%s_TR' % util.truncate_name(table, name_length).upper()
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import json
import os
import re
import subprocess
from twitter.common.collections import maybe_list
from pants.base.build_environment import get_buildroot
from pants.build_graph.intermediate_target_factory import hash_target
from pants.ivy.ivy_subsystem import IvySubsystem
from pants_test.backend.project_info.tasks.resolve_jars_test_mixin import ResolveJarsTestMixin
from pants_test.pants_run_integration_test import PantsRunIntegrationTest, ensure_engine
from pants_test.subsystem.subsystem_util import global_subsystem_instance
class ExportIntegrationTest(ResolveJarsTestMixin, PantsRunIntegrationTest):
_confs_args = [
'--export-libraries-sources',
'--export-libraries-javadocs',
]
def run_export(self, test_target, workdir, load_libs=False, only_default=False, extra_args=None):
"""Runs ./pants export ... and returns its json output.
:param string|list test_target: spec of the targets to run on.
:param string workdir: working directory to run pants with.
:param bool load_libs: whether to load external libraries (of any conf).
:param bool only_default: if loading libraries, whether to only resolve the default conf, or to
additionally resolve sources and javadocs.
:param list extra_args: list of extra arguments for the pants invocation.
:return: the json output of the console task.
:rtype: dict
"""
export_out_file = os.path.join(workdir, 'export_out.txt')
args = ['export',
'--output-file={out_file}'.format(out_file=export_out_file)] + maybe_list(test_target)
libs_args = ['--no-export-libraries'] if not load_libs else self._confs_args
if load_libs and only_default:
libs_args = []
pants_run = self.run_pants_with_workdir(args + libs_args + (extra_args or []), workdir)
self.assert_success(pants_run)
self.assertTrue(os.path.exists(export_out_file),
msg='Could not find export output file in {out_file}'
.format(out_file=export_out_file))
with open(export_out_file) as json_file:
json_data = json.load(json_file)
if not load_libs:
self.assertIsNone(json_data.get('libraries'))
return json_data
def evaluate_subtask(self, targets, workdir, load_extra_confs, extra_args, expected_jars):
json_data = self.run_export(targets, workdir, load_libs=True, only_default=not load_extra_confs,
extra_args=extra_args)
for jar in expected_jars:
self.assertIn(jar, json_data['libraries'])
for path in json_data['libraries'][jar].values():
self.assertTrue(os.path.exists(path), 'Expected jar at {} to actually exist.'.format(path))
def test_export_code_gen(self):
with self.temporary_workdir() as workdir:
test_target = 'examples/tests/java/org/pantsbuild/example/usethrift:usethrift'
json_data = self.run_export(test_target, workdir, load_libs=True)
thrift_target_name = ('examples.src.thrift.org.pantsbuild.example.precipitation'
'.precipitation-java')
codegen_target_regex = os.path.join(os.path.relpath(workdir, get_buildroot()),
'gen/thrift-java/[^/]*/[^/:]*/[^/:]*:{0}'.format(thrift_target_name))
p = re.compile(codegen_target_regex)
print(json_data.get('targets').keys())
self.assertTrue(any(p.match(target) for target in json_data.get('targets').keys()))
def test_export_json_transitive_jar(self):
with self.temporary_workdir() as workdir:
test_target = 'examples/tests/java/org/pantsbuild/example/usethrift:usethrift'
json_data = self.run_export(test_target, workdir, load_libs=True)
targets = json_data.get('targets')
self.assertIn('org.hamcrest:hamcrest-core:1.3', targets[test_target]['libraries'])
def test_export_jar_path_with_excludes(self):
with self.temporary_workdir() as workdir:
test_target = 'testprojects/src/java/org/pantsbuild/testproject/exclude:foo'
json_data = self.run_export(test_target, workdir, load_libs=True)
self.assertIsNone(json_data
.get('libraries')
.get('com.typesafe.sbt:incremental-compiler:0.13.7'))
foo_target = (json_data
.get('targets')
.get('testprojects/src/java/org/pantsbuild/testproject/exclude:foo'))
self.assertTrue('com.typesafe.sbt:incremental-compiler' in foo_target.get('excludes'))
def test_export_jar_path_with_excludes_soft(self):
with self.temporary_workdir() as workdir:
test_target = 'testprojects/src/java/org/pantsbuild/testproject/exclude:'
json_data = self.run_export(test_target,
workdir,
load_libs=True,
extra_args=['--resolve-ivy-soft-excludes'])
self.assertIsNotNone(json_data
.get('libraries')
.get('com.martiansoftware:nailgun-server:0.9.1'))
self.assertIsNotNone(json_data.get('libraries').get('org.pantsbuild:jmake:1.3.8-10'))
foo_target = (json_data
.get('targets')
.get('testprojects/src/java/org/pantsbuild/testproject/exclude:foo'))
self.assertTrue('com.typesafe.sbt:incremental-compiler' in foo_target.get('excludes'))
self.assertTrue('org.pantsbuild' in foo_target.get('excludes'))
# This test fails when the `PANTS_IVY_CACHE_DIR` is set to something that isn't
# the default location. The set cache_dir likely needs to be plumbed down
# to the sub-invocation of pants.
# https://github.com/pantsbuild/pants/issues/3126
def test_export_jar_path(self):
with self.temporary_workdir() as workdir:
test_target = 'examples/tests/java/org/pantsbuild/example/usethrift:usethrift'
json_data = self.run_export(test_target, workdir, load_libs=True)
ivy_subsystem = global_subsystem_instance(IvySubsystem)
ivy_cache_dir = ivy_subsystem.get_options().cache_dir
common_lang_lib_info = json_data.get('libraries').get('junit:junit:4.12')
self.assertIsNotNone(common_lang_lib_info)
self.assertEquals(
common_lang_lib_info.get('default'),
os.path.join(ivy_cache_dir, 'junit/junit/jars/junit-4.12.jar')
)
self.assertEquals(
common_lang_lib_info.get('javadoc'),
os.path.join(ivy_cache_dir,
'junit/junit/javadocs/junit-4.12-javadoc.jar')
)
self.assertEquals(
common_lang_lib_info.get('sources'),
os.path.join(ivy_cache_dir,
'junit/junit/sources/junit-4.12-sources.jar')
)
def test_dep_map_for_java_sources(self):
with self.temporary_workdir() as workdir:
test_target = 'examples/src/scala/org/pantsbuild/example/scala_with_java_sources'
json_data = self.run_export(test_target, workdir)
targets = json_data.get('targets')
self.assertIn('examples/src/java/org/pantsbuild/example/java_sources:java_sources', targets)
def test_sources_and_javadocs(self):
with self.temporary_workdir() as workdir:
test_target = 'examples/src/scala/org/pantsbuild/example/scala_with_java_sources'
json_data = self.run_export(test_target, workdir, load_libs=True)
scala_lang_lib = json_data.get('libraries').get('org.scala-lang:scala-library:2.11.8')
self.assertIsNotNone(scala_lang_lib)
self.assertIsNotNone(scala_lang_lib['default'])
self.assertIsNotNone(scala_lang_lib['sources'])
self.assertIsNotNone(scala_lang_lib['javadoc'])
# This test fails when the `PANTS_IVY_CACHE_DIR` is set to something that isn't
# the default location. The set cache_dir likely needs to be plumbed down
# to the sub-invocation of pants.
# See https://github.com/pantsbuild/pants/issues/3126
def test_ivy_classifiers(self):
with self.temporary_workdir() as workdir:
test_target = 'testprojects/tests/java/org/pantsbuild/testproject/ivyclassifier:ivyclassifier'
json_data = self.run_export(test_target, workdir, load_libs=True)
ivy_subsystem = global_subsystem_instance(IvySubsystem)
ivy_cache_dir = ivy_subsystem.get_options().cache_dir
avro_lib_info = json_data.get('libraries').get('org.apache.avro:avro:1.7.7')
self.assertIsNotNone(avro_lib_info)
self.assertEquals(
avro_lib_info.get('default'),
os.path.join(ivy_cache_dir, 'org.apache.avro/avro/jars/avro-1.7.7.jar')
)
self.assertEquals(
avro_lib_info.get('tests'),
os.path.join(ivy_cache_dir, 'org.apache.avro/avro/jars/avro-1.7.7-tests.jar')
)
self.assertEquals(
avro_lib_info.get('javadoc'),
os.path.join(ivy_cache_dir, 'org.apache.avro/avro/javadocs/avro-1.7.7-javadoc.jar')
)
self.assertEquals(
avro_lib_info.get('sources'),
os.path.join(ivy_cache_dir, 'org.apache.avro/avro/sources/avro-1.7.7-sources.jar')
)
def test_distributions_and_platforms(self):
with self.temporary_workdir() as workdir:
test_target = 'examples/src/java/org/pantsbuild/example/hello/simple'
json_data = self.run_export(test_target, workdir, load_libs=False, extra_args=[
'--jvm-platform-default-platform=java7',
'--jvm-platform-platforms={'
' "java7": {"source": "1.7", "target": "1.7", "args": [ "-X123" ]},'
' "java8": {"source": "1.8", "target": "1.8", "args": [ "-X456" ]}'
'}',
'--jvm-distributions-paths={'
' "macos": [ "/Library/JDK" ],'
' "linux": [ "/usr/lib/jdk7", "/usr/lib/jdk8"]'
'}'
])
self.assertFalse('python_setup' in json_data)
target_name = 'examples/src/java/org/pantsbuild/example/hello/simple:simple'
targets = json_data.get('targets')
self.assertEquals('java7', targets[target_name]['platform'])
self.assertEquals(
{
'default_platform' : 'java7',
'platforms': {
'java7': {
'source_level': '1.7',
'args': ['-X123'],
'target_level': '1.7'},
'java8': {
'source_level': '1.8',
'args': ['-X456'],
'target_level': '1.8'},
}
},
json_data['jvm_platforms'])
def test_test_platform(self):
with self.temporary_workdir() as workdir:
test_target = 'testprojects/tests/java/org/pantsbuild/testproject/testjvms:eight-test-platform'
json_data = self.run_export(test_target, workdir)
self.assertEquals('java7', json_data['targets'][test_target]['platform'])
self.assertEquals('java8', json_data['targets'][test_target]['test_platform'])
@ensure_engine
def test_intellij_integration(self):
with self.temporary_workdir() as workdir:
exported_file = os.path.join(workdir, "export_file.json")
p = subprocess.Popen(['build-support/pants-intellij.sh', '--export-output-file=' + exported_file],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
self.assertEqual(p.returncode, 0)
with open(exported_file) as data_file:
json_data = json.load(data_file)
python_setup = json_data['python_setup']
self.assertIsNotNone(python_setup)
self.assertIsNotNone(python_setup['interpreters'])
default_interpreter = python_setup['default_interpreter']
self.assertIsNotNone(default_interpreter)
self.assertIsNotNone(python_setup['interpreters'][default_interpreter])
self.assertTrue(os.path.exists(python_setup['interpreters'][default_interpreter]['binary']))
self.assertTrue(os.path.exists(python_setup['interpreters'][default_interpreter]['chroot']))
python_target = json_data['targets']['src/python/pants/backend/python/targets:targets']
self.assertIsNotNone(python_target)
self.assertEquals(default_interpreter, python_target['python_interpreter'])
def test_intransitive_and_scope(self):
with self.temporary_workdir() as workdir:
test_path = 'testprojects/maven_layout/provided_patching/one/src/main/java'
test_target = '{}:common'.format(test_path)
json_data = self.run_export(test_target, workdir)
h = hash_target('{}:shadow'.format(test_path), 'provided')
synthetic_target = '{}:shadow-unstable-provided-{}'.format(test_path, h)
self.assertEquals(False, json_data['targets'][synthetic_target]['transitive'])
self.assertEquals('compile test', json_data['targets'][synthetic_target]['scope'])
def test_export_is_target_roots(self):
with self.temporary_workdir() as workdir:
test_target = 'examples/tests/java/org/pantsbuild/example/::'
json_data = self.run_export(test_target, workdir, load_libs=False)
for target_address, attributes in json_data['targets'].items():
# Make sure all targets under `test_target`'s directory are target roots.
self.assertEqual(
attributes['is_target_root'],
target_address.startswith("examples/tests/java/org/pantsbuild/example")
)
|
|
from direct.showbase.InputStateGlobal import inputState
from direct.task.Task import Task
from panda3d.core import *
from . import GravityWalker
BattleStrafe = 0
def ToggleStrafe():
global BattleStrafe
BattleStrafe = not BattleStrafe
def SetStrafe(status):
global BattleStrafe
BattleStrafe = status
class BattleWalker(GravityWalker.GravityWalker):
def __init__(self):
GravityWalker.GravityWalker.__init__(self)
self.slideSpeed = 0
self.advanceSpeed = 0
def getSpeeds(self):
return (self.speed, self.rotationSpeed, self.slideSpeed, self.advanceSpeed)
def handleAvatarControls(self, task):
"""
Check on the arrow keys and update the avatar.
"""
# get the button states:
run = inputState.isSet("run")
forward = inputState.isSet("forward")
reverse = inputState.isSet("reverse")
turnLeft = inputState.isSet("turnLeft")
turnRight = inputState.isSet("turnRight")
slideLeft = inputState.isSet("slideLeft")
slideRight = inputState.isSet("slideRight")
jump = inputState.isSet("jump")
# Check for Auto-Run
if base.localAvatar.getAutoRun():
forward = 1
reverse = 0
# Determine what the speeds are based on the buttons:
self.speed=(forward and self.avatarControlForwardSpeed or
reverse and -self.avatarControlReverseSpeed)
# Slide speed is a scaled down version of forward speed
self.slideSpeed=(slideLeft and -self.avatarControlForwardSpeed or
slideRight and self.avatarControlForwardSpeed) * 0.5
self.rotationSpeed=not (slideLeft or slideRight) and (
(turnLeft and self.avatarControlRotateSpeed) or
(turnRight and -self.avatarControlRotateSpeed))
debugRunning = inputState.isSet("debugRunning")
if(debugRunning):
self.speed*=base.debugRunningMultiplier
self.slideSpeed*=base.debugRunningMultiplier
self.rotationSpeed*=1.25
if self.needToDeltaPos:
self.setPriorParentVector()
self.needToDeltaPos = 0
if self.wantDebugIndicator:
self.displayDebugInfo()
if self.lifter.isOnGround():
if self.isAirborne:
self.isAirborne = 0
assert self.debugPrint("isAirborne 0 due to isOnGround() true")
impact = self.lifter.getImpactVelocity()
if impact < -30.0:
messenger.send("jumpHardLand")
self.startJumpDelay(0.3)
else:
messenger.send("jumpLand")
if impact < -5.0:
self.startJumpDelay(0.2)
# else, ignore the little potholes.
assert self.isAirborne == 0
self.priorParent = Vec3.zero()
if jump and self.mayJump:
# The jump button is down and we're close
# enough to the ground to jump.
self.lifter.addVelocity(self.avatarControlJumpForce)
messenger.send("jumpStart")
self.isAirborne = 1
assert self.debugPrint("isAirborne 1 due to jump")
else:
if self.isAirborne == 0:
assert self.debugPrint("isAirborne 1 due to isOnGround() false")
self.isAirborne = 1
self.__oldPosDelta = self.avatarNodePath.getPosDelta(render)
# How far did we move based on the amount of time elapsed?
self.__oldDt = ClockObject.getGlobalClock().getDt()
dt=self.__oldDt
# Check to see if we're moving at all:
self.moving = self.speed or self.slideSpeed or self.rotationSpeed or (self.priorParent!=Vec3.zero())
if self.moving:
distance = dt * self.speed
slideDistance = dt * self.slideSpeed
rotation = dt * self.rotationSpeed
# Take a step in the direction of our previous heading.
if distance or slideDistance or self.priorParent != Vec3.zero():
# rotMat is the rotation matrix corresponding to
# our previous heading.
rotMat=Mat3.rotateMatNormaxis(self.avatarNodePath.getH(), Vec3.up())
if self.isAirborne:
forward = Vec3.forward()
else:
contact = self.lifter.getContactNormal()
forward = contact.cross(Vec3.right())
# Consider commenting out this normalize. If you do so
# then going up and down slops is a touch slower and
# steeper terrain can cut the movement in half. Without
# the normalize the movement is slowed by the cosine of
# the slope (i.e. it is multiplied by the sign as a
# side effect of the cross product above).
forward.normalize()
self.vel=Vec3(forward * distance)
if slideDistance:
if self.isAirborne:
right = Vec3.right()
else:
right = forward.cross(contact)
# See note above for forward.normalize()
right.normalize()
self.vel=Vec3(self.vel + (right * slideDistance))
self.vel=Vec3(rotMat.xform(self.vel))
step=self.vel + (self.priorParent * dt)
self.avatarNodePath.setFluidPos(Point3(
self.avatarNodePath.getPos()+step))
self.avatarNodePath.setH(self.avatarNodePath.getH()+rotation)
else:
self.vel.set(0.0, 0.0, 0.0)
if self.moving or jump:
messenger.send("avatarMoving")
return Task.cont
if 0:
def handleAvatarControls(self, task):
# If targetNp is not available, revert back to GravityWalker.handleAvatarControls.
# This situation occurs when the target dies, but we aren't switched out of
# battle walker control mode.
targetNp = self.avatarNodePath.currentTarget
if not BattleStrafe or targetNp == None or targetNp.isEmpty():
return GravityWalker.GravityWalker.handleAvatarControls(self, task)
# get the button states:
run = inputState.isSet("run")
forward = inputState.isSet("forward")
reverse = inputState.isSet("reverse")
turnLeft = inputState.isSet("turnLeft")
turnRight = inputState.isSet("turnRight")
slide = inputState.isSet("slide")
jump = inputState.isSet("jump")
# Determine what the speeds are based on the buttons:
self.advanceSpeed=(forward and self.avatarControlForwardSpeed or
reverse and -self.avatarControlReverseSpeed)
if run and self.advanceSpeed>0.0:
self.advanceSpeed*=2.0 #*#
# Should fSlide be renamed slideButton?
self.slideSpeed=.15*(turnLeft and -self.avatarControlForwardSpeed or
turnRight and self.avatarControlForwardSpeed)
print('slideSpeed: %s' % self.slideSpeed)
self.rotationSpeed=0
self.speed=0
debugRunning = inputState.isSet("debugRunning")
if debugRunning:
self.advanceSpeed*=4.0
self.slideSpeed*=4.0
self.rotationSpeed*=1.25
if self.needToDeltaPos:
self.setPriorParentVector()
self.needToDeltaPos = 0
if self.wantDebugIndicator:
self.displayDebugInfo()
if self.lifter.isOnGround():
if self.isAirborne:
self.isAirborne = 0
assert self.debugPrint("isAirborne 0 due to isOnGround() true")
impact = self.lifter.getImpactVelocity()
if impact < -30.0:
messenger.send("jumpHardLand")
self.startJumpDelay(0.3)
else:
messenger.send("jumpLand")
if impact < -5.0:
self.startJumpDelay(0.2)
# else, ignore the little potholes.
assert self.isAirborne == 0
self.priorParent = Vec3.zero()
if jump and self.mayJump:
# The jump button is down and we're close
# enough to the ground to jump.
self.lifter.addVelocity(self.avatarControlJumpForce)
messenger.send("jumpStart")
self.isAirborne = 1
assert self.debugPrint("isAirborne 1 due to jump")
else:
if self.isAirborne == 0:
assert self.debugPrint("isAirborne 1 due to isOnGround() false")
self.isAirborne = 1
self.__oldPosDelta = self.avatarNodePath.getPosDelta(render)
# How far did we move based on the amount of time elapsed?
self.__oldDt = ClockObject.getGlobalClock().getDt()
dt=self.__oldDt
# Before we do anything with position or orientation, make the avatar
# face it's target. Only allow rMax degrees rotation per frame, so
# we don't get an unnatural spinning effect
curH = self.avatarNodePath.getH()
self.avatarNodePath.headsUp(targetNp)
newH = self.avatarNodePath.getH()
delH = reduceAngle(newH-curH)
rMax = 10
if delH < -rMax:
self.avatarNodePath.setH(curH-rMax)
self.rotationSpeed=-self.avatarControlRotateSpeed
elif delH > rMax:
self.avatarNodePath.setH(curH+rMax)
self.rotationSpeed=self.avatarControlRotateSpeed
# Check to see if we're moving at all:
self.moving = self.speed or self.slideSpeed or self.rotationSpeed or (self.priorParent!=Vec3.zero())
if self.moving:
distance = dt * self.speed
slideDistance = dt * self.slideSpeed
print('slideDistance: %s' % slideDistance)
rotation = dt * self.rotationSpeed
# Take a step in the direction of our previous heading.
self.vel=Vec3(Vec3.forward() * distance +
Vec3.right() * slideDistance)
if self.vel != Vec3.zero() or self.priorParent != Vec3.zero():
if 1:
# rotMat is the rotation matrix corresponding to
# our previous heading.
rotMat=Mat3.rotateMatNormaxis(self.avatarNodePath.getH(), Vec3.up())
step=(self.priorParent * dt) + rotMat.xform(self.vel)
self.avatarNodePath.setFluidPos(Point3(
self.avatarNodePath.getPos()+step))
self.avatarNodePath.setH(self.avatarNodePath.getH()+rotation)
else:
self.vel.set(0.0, 0.0, 0.0)
"""
# Check to see if we're moving at all:
self.moving = self.advanceSpeed or self.slideSpeed or self.rotationSpeed or (self.priorParent!=Vec3.zero())
if self.moving:
distance = dt * self.advanceSpeed
slideDistance = dt * self.slideSpeed
rotation = dt * self.rotationSpeed
# Prevent avatar from getting too close to target
d = self.avatarNodePath.getPos(targetNp)
# TODO: make min distance adjust for current weapon
if (d[0]*d[0]+d[1]*d[1] < 6.0 and distance > 0):
# move the avatar sideways instead of forward
slideDistance += .2
distance = 0
# Take a step in the direction of our previous heading.
self.vel=Vec3(Vec3.forward() * distance +
Vec3.right() * slideDistance)
if self.vel != Vec3.zero() or self.priorParent != Vec3.zero():
# rotMat is the rotation matrix corresponding to
# our previous heading.
rotMat=Mat3.rotateMatNormaxis(self.avatarNodePath.getH(), Vec3.up())
step=rotMat.xform(self.vel) + (self.priorParent * dt)
self.avatarNodePath.setFluidPos(Point3(
self.avatarNodePath.getPos()+step))
self.avatarNodePath.setH(self.avatarNodePath.getH()+rotation)
else:
self.vel.set(0.0, 0.0, 0.0)
"""
if self.moving or jump:
messenger.send("avatarMoving")
return Task.cont
|
|
from contextlib import contextmanager
import datetime
from functools import partial
import inspect
import re
from nose.tools import ( # noqa
assert_almost_equal,
assert_almost_equals,
assert_dict_contains_subset,
assert_false,
assert_greater,
assert_greater_equal,
assert_in,
assert_is,
assert_is_instance,
assert_is_none,
assert_is_not,
assert_is_not_none,
assert_less,
assert_less_equal,
assert_multi_line_equal,
assert_not_almost_equal,
assert_not_almost_equals,
assert_not_equal,
assert_not_equals,
assert_not_in,
assert_not_is_instance,
assert_raises,
assert_raises_regexp,
assert_regexp_matches,
assert_true,
assert_tuple_equal,
)
import numpy as np
import pandas as pd
from pandas.util.testing import (
assert_frame_equal,
assert_panel_equal,
assert_series_equal,
assert_index_equal,
)
from six import iteritems, viewkeys, PY2
from toolz import dissoc, keyfilter
import toolz.curried.operator as op
from zipline.testing.core import ensure_doctest
from zipline.dispatch import dispatch
from zipline.lib.adjustment import Adjustment
from zipline.lib.labelarray import LabelArray
from zipline.utils.functional import dzip_exact, instance
from zipline.utils.math_utils import tolerant_equals
@instance
@ensure_doctest
class wildcard(object):
"""An object that compares equal to any other object.
This is useful when using :func:`~zipline.testing.predicates.assert_equal`
with a large recursive structure and some fields to be ignored.
Examples
--------
>>> wildcard == 5
True
>>> wildcard == 'ayy'
True
# reflected
>>> 5 == wildcard
True
>>> 'ayy' == wildcard
True
"""
@staticmethod
def __eq__(other):
return True
@staticmethod
def __ne__(other):
return False
def __repr__(self):
return '<%s>' % type(self).__name__
__str__ = __repr__
def keywords(func):
"""Get the argument names of a function
>>> def f(x, y=2):
... pass
>>> keywords(f)
['x', 'y']
Notes
-----
Taken from odo.utils
"""
if isinstance(func, type):
return keywords(func.__init__)
elif isinstance(func, partial):
return keywords(func.func)
return inspect.getargspec(func).args
def filter_kwargs(f, kwargs):
"""Return a dict of valid kwargs for `f` from a subset of `kwargs`
Examples
--------
>>> def f(a, b=1, c=2):
... return a + b + c
...
>>> raw_kwargs = dict(a=1, b=3, d=4)
>>> f(**raw_kwargs)
Traceback (most recent call last):
...
TypeError: f() got an unexpected keyword argument 'd'
>>> kwargs = filter_kwargs(f, raw_kwargs)
>>> f(**kwargs)
6
Notes
-----
Taken from odo.utils
"""
return keyfilter(op.contains(keywords(f)), kwargs)
def _s(word, seq, suffix='s'):
"""Adds a suffix to ``word`` if some sequence has anything other than
exactly one element.
word : str
The string to add the suffix to.
seq : sequence
The sequence to check the length of.
suffix : str, optional.
The suffix to add to ``word``
Returns
-------
maybe_plural : str
``word`` with ``suffix`` added if ``len(seq) != 1``.
"""
return word + (suffix if len(seq) != 1 else '')
def _fmt_path(path):
"""Format the path for final display.
Parameters
----------
path : iterable of str
The path to the values that are not equal.
Returns
-------
fmtd : str
The formatted path to put into the error message.
"""
if not path:
return ''
return 'path: _' + ''.join(path)
def _fmt_msg(msg):
"""Format the message for final display.
Parameters
----------
msg : str
The message to show to the user to provide additional context.
returns
-------
fmtd : str
The formatted message to put into the error message.
"""
if not msg:
return ''
return msg + '\n'
def _safe_cls_name(cls):
try:
return cls.__name__
except AttributeError:
return repr(cls)
def assert_is_subclass(subcls, cls, msg=''):
"""Assert that ``subcls`` is a subclass of ``cls``.
Parameters
----------
subcls : type
The type to check.
cls : type
The type to check ``subcls`` against.
msg : str, optional
An extra assertion message to print if this fails.
"""
assert issubclass(subcls, cls), (
'%s is not a subclass of %s\n%s' % (
_safe_cls_name(subcls),
_safe_cls_name(cls),
msg,
)
)
def assert_regex(result, expected, msg=''):
"""Assert that ``expected`` matches the result.
Parameters
----------
result : str
The string to search.
expected : str or compiled regex
The pattern to search for in ``result``.
msg : str, optional
An extra assertion message to print if this fails.
"""
assert re.search(expected, result), (
'%s%r not found in %r' % (_fmt_msg(msg), expected, result)
)
@contextmanager
def assert_raises_regex(exc, pattern, msg=''):
"""Assert that some exception is raised in a context and that the message
matches some pattern.
Parameters
----------
exc : type or tuple[type]
The exception type or types to expect.
pattern : str or compiled regex
The pattern to search for in the str of the raised exception.
msg : str, optional
An extra assertion message to print if this fails.
"""
try:
yield
except exc as e:
assert re.search(pattern, str(e)), (
'%s%r not found in %r' % (_fmt_msg(msg), pattern, str(e))
)
else:
raise AssertionError('%s%s was not raised' % (_fmt_msg(msg), exc))
@dispatch(object, object)
def assert_equal(result, expected, path=(), msg='', **kwargs):
"""Assert that two objects are equal using the ``==`` operator.
Parameters
----------
result : object
The result that came from the function under test.
expected : object
The expected result.
Raises
------
AssertionError
Raised when ``result`` is not equal to ``expected``.
"""
assert result == expected, '%s%s != %s\n%s' % (
_fmt_msg(msg),
result,
expected,
_fmt_path(path),
)
@assert_equal.register(float, float)
def assert_float_equal(result,
expected,
path=(),
msg='',
float_rtol=10e-7,
float_atol=10e-7,
float_equal_nan=True,
**kwargs):
assert tolerant_equals(
result,
expected,
rtol=float_rtol,
atol=float_atol,
equal_nan=float_equal_nan,
), '%s%s != %s with rtol=%s and atol=%s%s\n%s' % (
_fmt_msg(msg),
result,
expected,
float_rtol,
float_atol,
(' (with nan != nan)' if not float_equal_nan else ''),
_fmt_path(path),
)
def _check_sets(result, expected, msg, path, type_):
"""Compare two sets. This is used to check dictionary keys and sets.
Parameters
----------
result : set
expected : set
msg : str
path : tuple
type : str
The type of an element. For dict we use ``'key'`` and for set we use
``'element'``.
"""
if result != expected:
if result > expected:
diff = result - expected
msg = 'extra %s in result: %r' % (_s(type_, diff), diff)
elif result < expected:
diff = expected - result
msg = 'result is missing %s: %r' % (_s(type_, diff), diff)
else:
in_result = result - expected
in_expected = expected - result
msg = '%s only in result: %s\n%s only in expected: %s' % (
_s(type_, in_result),
in_result,
_s(type_, in_expected),
in_expected,
)
raise AssertionError(
'%s%ss do not match\n%s' % (
_fmt_msg(msg),
type_,
_fmt_path(path),
),
)
@assert_equal.register(dict, dict)
def assert_dict_equal(result, expected, path=(), msg='', **kwargs):
_check_sets(
viewkeys(result),
viewkeys(expected),
msg,
path + ('.%s()' % ('viewkeys' if PY2 else 'keys'),),
'key',
)
failures = []
for k, (resultv, expectedv) in iteritems(dzip_exact(result, expected)):
try:
assert_equal(
resultv,
expectedv,
path=path + ('[%r]' % (k,),),
msg=msg,
**kwargs
)
except AssertionError as e:
failures.append(str(e))
if failures:
raise AssertionError('\n'.join(failures))
@assert_equal.register(list, list)
@assert_equal.register(tuple, tuple)
def assert_sequence_equal(result, expected, path=(), msg='', **kwargs):
result_len = len(result)
expected_len = len(expected)
assert result_len == expected_len, (
'%s%s lengths do not match: %d != %d\n%s' % (
_fmt_msg(msg),
type(result).__name__,
result_len,
expected_len,
_fmt_path(path),
)
)
for n, (resultv, expectedv) in enumerate(zip(result, expected)):
assert_equal(
resultv,
expectedv,
path=path + ('[%d]' % n,),
msg=msg,
**kwargs
)
@assert_equal.register(set, set)
def assert_set_equal(result, expected, path=(), msg='', **kwargs):
_check_sets(
result,
expected,
msg,
path,
'element',
)
@assert_equal.register(np.ndarray, np.ndarray)
def assert_array_equal(result,
expected,
path=(),
msg='',
array_verbose=True,
array_decimal=None,
**kwargs):
f = (
np.testing.assert_array_equal
if array_decimal is None else
partial(np.testing.assert_array_almost_equal, decimal=array_decimal)
)
try:
f(
result,
expected,
verbose=array_verbose,
err_msg=msg,
)
except AssertionError as e:
raise AssertionError('\n'.join((str(e), _fmt_path(path))))
@assert_equal.register(LabelArray, LabelArray)
def assert_labelarray_equal(result, expected, path=(), **kwargs):
assert_equal(
result.categories,
expected.categories,
path=path + ('.categories',),
**kwargs
)
assert_equal(
result.as_int_array(),
expected.as_int_array(),
path=path + ('.as_int_array()',),
**kwargs
)
def _register_assert_equal_wrapper(type_, assert_eq):
"""Register a new check for an ndframe object.
Parameters
----------
type_ : type
The class to register an ``assert_equal`` dispatch for.
assert_eq : callable[type_, type_]
The function which checks that if the two ndframes are equal.
Returns
-------
assert_ndframe_equal : callable[type_, type_]
The wrapped function registered with ``assert_equal``.
"""
@assert_equal.register(type_, type_)
def assert_ndframe_equal(result, expected, path=(), msg='', **kwargs):
try:
assert_eq(
result,
expected,
**filter_kwargs(assert_eq, kwargs)
)
except AssertionError as e:
raise AssertionError(
_fmt_msg(msg) + '\n'.join((str(e), _fmt_path(path))),
)
return assert_ndframe_equal
assert_frame_equal = _register_assert_equal_wrapper(
pd.DataFrame,
assert_frame_equal,
)
assert_panel_equal = _register_assert_equal_wrapper(
pd.Panel,
assert_panel_equal,
)
assert_series_equal = _register_assert_equal_wrapper(
pd.Series,
assert_series_equal,
)
assert_index_equal = _register_assert_equal_wrapper(
pd.Index,
assert_index_equal,
)
@assert_equal.register(pd.Categorical, pd.Categorical)
def assert_categorical_equal(result, expected, path=(), msg='', **kwargs):
assert_equal(
result.categories,
expected.categories,
path=path + ('.categories',),
msg=msg,
**kwargs
)
assert_equal(
result.codes,
expected.codes,
path=path + ('.codes',),
msg=msg,
**kwargs
)
@assert_equal.register(Adjustment, Adjustment)
def assert_adjustment_equal(result, expected, path=(), **kwargs):
for attr in ('first_row', 'last_row', 'first_col', 'last_col', 'value'):
assert_equal(
getattr(result, attr),
getattr(expected, attr),
path=path + ('.' + attr,),
**kwargs
)
@assert_equal.register(
(datetime.datetime, np.datetime64),
(datetime.datetime, np.datetime64),
)
def assert_timestamp_and_datetime_equal(result,
expected,
path=(),
msg='',
allow_datetime_coercions=False,
compare_nat_equal=True,
**kwargs):
"""
Branch for comparing python datetime (which includes pandas Timestamp) and
np.datetime64 as equal.
Returns raises unless ``allow_datetime_coercions`` is passed as True.
"""
assert allow_datetime_coercions or type(result) == type(expected), (
"%sdatetime types (%s, %s) don't match and "
"allow_datetime_coercions was not set.\n%s" % (
_fmt_msg(msg),
type(result),
type(expected),
_fmt_path(path),
)
)
result = pd.Timestamp(result)
expected = pd.Timestamp(expected)
if compare_nat_equal and pd.isnull(result) and pd.isnull(expected):
return
assert_equal.dispatch(object, object)(
result,
expected,
path=path,
**kwargs
)
@assert_equal.register(slice, slice)
def assert_slice_equal(result, expected, path=(), msg=''):
diff_start = (
('starts are not equal: %s != %s' % (result.start, result.stop))
if result.start != expected.start else
''
)
diff_stop = (
('stops are not equal: %s != %s' % (result.stop, result.stop))
if result.stop != expected.stop else
''
)
diff_step = (
('steps are not equal: %s != %s' % (result.step, result.stop))
if result.step != expected.step else
''
)
diffs = diff_start, diff_stop, diff_step
assert not any(diffs), '%s%s\n%s' % (
_fmt_msg(msg),
'\n'.join(filter(None, diffs)),
_fmt_path(path),
)
def assert_isidentical(result, expected, msg=''):
assert result.isidentical(expected), (
'%s%s is not identical to %s' % (_fmt_msg(msg), result, expected)
)
try:
# pull the dshape cases in
from datashape.util.testing import assert_dshape_equal
except ImportError:
pass
else:
assert_equal.funcs.update(
dissoc(assert_dshape_equal.funcs, (object, object)),
)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2015, Luis Pedro Coelho <luis@luispedro.org>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Subcommands are now implemented using a modular design that allows extending
jug with additional commands.
This API is currently in experimental stage and may change in the future.
The following serves as an example of how to extend jug's commands.
Lets assume you wanted to create a custom report and have it available as::
$ jug my-fancy-report
One way to achieve this is to add the following code to ``~/.config/jug/jug_user_commands.py``::
from jug.subcommands import SubCommand
class FancyReport(SubCommand):
"Produces a fancy report of my results"
name = "my-fancy-report"
def run(self, *args, **kwargs):
...
fancy_report = FancyReport()
The first line of the class docstring is important as it will be shown in jug's
usage help page. The name attribute is also required and should be the name of
your subcommand on the command-line.
The body of the method ``run()`` defines what should happen when you
call the subcommand ``jug my-fancy-report``.
The ``run`` function will receive the following objects::
* ``options`` - object representing command-line and user options
* ``store`` - backend object reponsible for handling jobs
* ``jugspace`` - a namespace of jug internal variables (better not touch)
additional objects may be introduced in the future so make sure your function
uses ``*args, **kwargs`` to maintain compatibility.
Finally, in order to register the subcommand, you must instanciate the subcommand.
If your subcommand needs configurable options you can expose them via command-line
by defining two additional methods::
class FancyReport(SubCommand):
...
def parse(self, parser):
parser.add_argument('--tofile', action='store',
dest='report_tofile',
help='Name of file to use for report')
def parse_defaults(self):
return {
"report_tofile": "report.txt",
}
fancy_report = FancyReport()
The first method configures argparse arguments that will be available as
``jug my-fancy-report --tofile myreport.txt``. These will also be avaiable to
the ``run()`` method as part of the ``options`` object.
The second defines default values in case of omission. The ``key`` should match
the ``dest=`` attribute of ``add_argument()`` and the ``value`` should be any object
to be used by your ``run()`` method. Note that the value received in the command-line
will be automatically converted to the same type as this default (i.e. if your default
is ``True`` any ``--tofile john`` would result in ``bool("john") -> True``).
For more information on parser configuration refer to ``argparse``'s documentation.
NOTE: A few words of caution, we cannot rely on ``argparse``'s ``default=`` option since
it doesn't allow distinguishing between user supplied and built-in (default) values.
For the same reason, don't use ``action=`` with ``store_true`` or ``store_false``
instead use ``store_const`` and ``const=`` with ``True`` or ``False``.
Failing to do so will cause any matching setting on ``jugrc`` to not have any effect.
"""
__all__ = [
'cmdapi',
'SubCommand',
'SubCommandError',
'NoSuchCommandError',
]
import importlib
import logging
import os
import pkgutil
import sys
import traceback
from ..options import Options
from ..jug_version import CITATION
from abc import ABCMeta, abstractmethod, abstractproperty
from six import add_metaclass
class SubCommandError(Exception):
"Exception raised when a subcommand doesn't respect the API"
class NoSuchCommandError(Exception):
"Exception raised when a subcommand doesn't respect the API"
def _get_helptext(command):
"First line of the docstring is to be shown on the help/usage text"
try:
return command.__doc__.splitlines()[0]
except AttributeError:
raise SubCommandError("Command '%s' is missing a documentation string" % (command,))
def _invalid_module(module, e):
raise SubCommandError(
"""Invalid subcommand structure.
Please make sure that the subcommand(s) '%s' conform(s) to the API.
help(jug.subcommands) for more information
Original error was:
%s
""" % (module, e))
@add_metaclass(ABCMeta)
class SubCommand:
"""Define a subcommand and its command-line options
"""
def __init__(self):
cmdapi._register(self.name, self)
cmdapi.update_defaults(self.name)
def __call__(self, *args, **kwargs):
return self.run(*args, **kwargs)
@abstractproperty
def name(self):
pass
def parse(self, parser):
"""Define command line options using parser.add_argument()
The parser object is an argparser subparser group.
Anything returned by this method is ignored
"""
pass
def parse_defaults(self):
"""Define default values for parser options
Should return a dictionary mapping ``dest=`` targets to their default.
"""
pass
@abstractmethod
def run(self, *args, **kwargs):
"""Re-define to specify what your subcommand is supposed to do
This code will receive the following arguments:
* ``options`` - object representing command-line and user options
* ``store`` - backend object reponsible for handling jobs
* ``jugspace`` - a namespace of jug internal variables (better not touch)
Anything returned by this method is ignored
"""
pass
class SubCommandDict(dict):
def __getitem__(self, command):
if command not in self:
self.load_commands(command)
return super(SubCommandDict, self).__getitem__(command)
def load_commands(self, stop_on_command=None):
"""Load all modules in jug's subcommands and user's jug folder
If stop_on_command is given the function will return True as soon as
a matching command is found
"""
for _, name, _ in pkgutil.iter_modules(__path__):
module = __name__ + '.' + name
self._try_import(module)
if stop_on_command in self:
return True
self._load_user_commands()
def _try_import(self, module):
try:
importlib.import_module(module)
except Exception as e:
logging.warning("Couldn't load subcommand '%s' with error '%s'", module, e)
def _load_user_commands(self, user_path="~/.config/jug/"):
user_path = os.path.expanduser(user_path)
logging.debug("Loading user commands from '%s'", user_path)
if os.path.isdir(user_path):
if user_path not in sys.path:
logging.debug("Adding path '%s' to PYTHONPATH", user_path)
sys.path.insert(0, user_path)
user_commands = os.path.join(user_path, "jug_user_commands.py")
if os.path.isfile(user_commands):
self._try_import("jug_user_commands")
class SubCommandManager:
def __init__(self):
self._commands = SubCommandDict()
self.default_options = Options(None)
def _register(self, name, cmd_instance):
if name in self._commands and self._commands[name] != cmd_instance:
logging.warning("Jug: command: '%s' will be overriden with code from '%s'",
name, cmd_instance.__class__.__name__)
self._commands[name] = cmd_instance
def update_defaults(self, name):
cmd = self.get(name)
opts = cmd.parse_defaults()
if opts is not None:
self.default_options.update(opts)
def get(self, command):
try:
return self._commands[command]
except KeyError:
raise NoSuchCommandError("Unknown subcommand '%s'" % (command,))
def run(self, command, *args, **kwargs):
"""Execute subcommand
"""
try:
cmd = self.get(command)
except NoSuchCommandError as e:
self.usage(error=e)
try:
return cmd(*args, **kwargs)
except TypeError as e:
if "unexpected keyword" in str(e):
_invalid_module(command, e)
traceback.print_exc(file=sys.stderr)
self.usage(error=e)
def usage(self, error='', exit=True, _print=True, *args, **kwargs):
"Shows help/usage information"
usage_text = ['''\
jug SUBCOMMAND [JUGFILE] [OPTIONS...]
Docs: https://jug.readthedocs.io/
Copyright: 2008-2017, Luis Pedro Coelho
Citation: http://doi.org/10.5334/jors.161
If you use Jug for generating results for a peer-reviewed publication, please
cite:
Coelho, L.P., (2017). Jug: Software for Parallel Reproducible Computation in
Python. Journal of Open Research Software. 5(1), p.30.
http://doi.org/10.5334/jors.161
Subcommands
-----------
''']
self._commands.load_commands()
for name, cmd in sorted(self._commands.items()):
usage_text.append(" %-15s %s" % (name + ":", _get_helptext(cmd)))
usage_text.append("\nhelp:")
usage_text.append(" Use 'jug <subcommand> --help' for subcommand specific options")
if error:
usage_text.append("")
usage_text.append(str(error))
message = "\n".join(usage_text) + "\n \n"
if _print:
sys.stdout.write(message)
if exit:
sys.exit(1)
return message
def get_subcommand_parsers(self, subparsers):
self._commands.load_commands()
parsers = []
for name, cmd in sorted(self._commands.items()):
parser = subparsers.add_parser(
name,
# This is necessary to have all the same output on all subparsers
usage=self.usage(_print=False, exit=False),
)
parsers.append(parser)
group = parser.add_argument_group(name)
cmd.parse(group)
return parsers
def maybe_print_citation_info(options):
'''Unless options.will_cite, prints citation information'''
if not options.will_cite:
print("If you use Jug in a published research paper please cite")
print(CITATION)
print('')
print('Use the --will-cite option to suppress this message')
print('(Or set will_cite = True in the Jug configuration file)')
print('')
cmdapi = SubCommandManager()
|
|
#!/usr/bin/env python
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
import unittest
from spyne.test import FakeApp
from spyne.interface import Interface
from spyne.interface.wsdl import Wsdl11
from spyne.protocol.xml import XmlDocument
from spyne.model.fault import Fault
class FaultTests(unittest.TestCase):
def test_ctor_defaults(self):
fault = Fault()
self.assertEqual(fault.faultcode, 'Server')
self.assertEqual(fault.faultstring, 'Fault')
self.assertEqual(fault.faultactor, '')
self.assertEqual(fault.detail, None)
self.assertEqual(repr(fault), "Fault(Server: 'Fault')")
def test_ctor_faultcode_w_senv_prefix(self):
fault = Fault(faultcode='Other')
self.assertEqual(fault.faultcode, 'Other')
self.assertEqual(repr(fault), "Fault(Other: 'Fault')")
def test_ctor_explicit_faultstring(self):
fault = Fault(faultstring='Testing')
self.assertEqual(fault.faultstring, 'Testing')
self.assertEqual(repr(fault), "Fault(Server: 'Testing')")
def test_ctor_no_faultstring_overridden_get_type_name(self):
class Derived(Fault):
def get_type_name(self):
return 'Overridden'
fault = Derived()
self.assertEqual(fault.faultstring, 'Overridden')
self.assertEqual(repr(fault), "Fault(Server: 'Overridden')")
def test_to_parent_wo_detail(self):
from lxml.etree import Element
import spyne.const.xml_ns
ns_soap_env = spyne.const.xml_ns.soap11_env
soap_env = spyne.const.xml_ns.const_prefmap[spyne.const.xml_ns.soap11_env]
element = Element('testing')
fault = Fault()
cls = Fault
XmlDocument().to_parent(None, cls, fault, element, 'urn:ignored')
(child,) = element.getchildren()
self.assertEqual(child.tag, '{%s}Fault' % ns_soap_env)
self.assertEqual(child.find('faultcode').text, '%s:Server' % soap_env)
self.assertEqual(child.find('faultstring').text, 'Fault')
self.assertEqual(child.find('faultactor').text, '')
self.failIf(child.findall('detail'))
def test_to_parent_w_detail(self):
from lxml.etree import Element
element = Element('testing')
detail = Element('something')
fault = Fault(detail=detail)
cls = Fault
XmlDocument().to_parent(None, cls, fault, element, 'urn:ignored')
(child,) = element.getchildren()
self.failUnless(child.find('detail').find('something') is detail)
def test_from_xml_wo_detail(self):
from lxml.etree import Element
from lxml.etree import SubElement
import spyne.const.xml_ns
ns_soap_env = spyne.const.xml_ns.soap11_env
soap_env = spyne.const.xml_ns.const_prefmap[spyne.const.xml_ns.soap11_env]
element = Element('{%s}Fault' % ns_soap_env)
fcode = SubElement(element, 'faultcode')
fcode.text = '%s:other' % soap_env
fstr = SubElement(element, 'faultstring')
fstr.text = 'Testing'
actor = SubElement(element, 'faultactor')
actor.text = 'phreddy'
fault = XmlDocument().from_element(None, Fault, element)
self.assertEqual(fault.faultcode, '%s:other' % soap_env)
self.assertEqual(fault.faultstring, 'Testing')
self.assertEqual(fault.faultactor, 'phreddy')
self.assertEqual(fault.detail, None)
def test_from_xml_w_detail(self):
from lxml.etree import Element
from lxml.etree import SubElement
import spyne.const.xml_ns
ns_soap_env = spyne.const.xml_ns.soap11_env
element = Element('{%s}Fault' % ns_soap_env)
fcode = SubElement(element, 'faultcode')
fcode.text = 'soap11env:other'
fstr = SubElement(element, 'faultstring')
fstr.text = 'Testing'
actor = SubElement(element, 'faultactor')
actor.text = 'phreddy'
detail = SubElement(element, 'detail')
fault = XmlDocument().from_element(None, Fault, element)
self.failUnless(fault.detail is detail)
def test_add_to_schema_no_extends(self):
import spyne.const.xml_ns
ns_xsd = spyne.const.xml_ns.xsd
class cls(Fault):
__namespace__='ns'
@classmethod
def get_type_name_ns(self, app):
return 'testing:My'
interface = Interface(FakeApp())
interface.add_class(cls)
pref = cls.get_namespace_prefix(interface)
wsdl = Wsdl11(interface)
wsdl.build_interface_document('prot://addr')
schema = wsdl.get_schema_info(pref)
self.assertEqual(len(schema.types), 1)
c_cls = interface.classes['{ns}cls']
c_elt = schema.types[0]
self.failUnless(c_cls is cls)
self.assertEqual(c_elt.tag, '{%s}complexType' % ns_xsd)
self.assertEqual(c_elt.get('name'), 'cls')
self.assertEqual(len(schema.elements), 1)
e_elt = schema.elements.values()[0]
self.assertEqual(e_elt.tag, '{%s}element' % ns_xsd)
self.assertEqual(e_elt.get('name'), 'cls')
self.assertEqual(e_elt.get('type'), 'testing:My')
self.assertEqual(len(e_elt), 0)
def test_add_to_schema_w_extends(self):
import spyne.const.xml_ns
ns_xsd = spyne.const.xml_ns.xsd
class base(Fault):
__namespace__ = 'ns'
@classmethod
def get_type_name_ns(self, app):
return 'testing:Base'
class cls(Fault):
__namespace__ = 'ns'
@classmethod
def get_type_name_ns(self, app):
return 'testing:My'
interface = Interface(FakeApp())
interface.add_class(cls)
pref = cls.get_namespace_prefix(interface)
wsdl = Wsdl11(interface)
wsdl.build_interface_document('prot://addr')
schema = wsdl.get_schema_info(pref)
self.assertEqual(len(schema.types), 1)
self.assertEqual(len(interface.classes), 1)
c_cls = next(iter(interface.classes.values()))
c_elt = next(iter(schema.types.values()))
self.failUnless(c_cls is cls)
self.assertEqual(c_elt.tag, '{%s}complexType' % ns_xsd)
self.assertEqual(c_elt.get('name'), 'cls')
from lxml import etree
print(etree.tostring(c_elt, pretty_print=True))
self.assertEqual(len(c_elt), 0)
class DummySchemaEntries:
def __init__(self, app):
self.app = app
self._complex_types = []
self._elements = []
def add_complex_type(self, cls, ct):
self._complex_types.append((cls, ct))
def add_element(self, cls, elt):
self._elements.append((cls, elt))
if __name__ == '__main__': #pragma NO COVERAGE
unittest.main()
|
|
from __future__ import division, print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from os import path
from colorsys import hsv_to_rgb, rgb_to_hsv
from collections import OrderedDict
from random import shuffle
import numpy
from pysces.PyscesModelMap import ModelMap
from pysces import Scanner
import pysces
from matplotlib.pyplot import get_cmap
from .. import modeltools
from ..latextools import LatexExpr
from ..utils.plotting import ScanFig, LineData, Data2D
from ..utils.misc import silence_print
from ..utils.misc import DotDict
from ..utils.misc import formatter_factory
exportLAWH = silence_print(pysces.write.exportLabelledArrayWithHeader)
__all__ = ['RateChar']
def strip_nan_from_scan(array_like):
# this function assumes that column
# zero contains valid data (the scan input)
t_f = list(numpy.isnan(array_like[:, 1]))
start = t_f.index(False)
end = len(t_f) - t_f[::-1].index(False)
return array_like[start:end, :]
class RateChar(object):
def __init__(self, mod, min_concrange_factor=100,
max_concrange_factor=100,
scan_points=256,
auto_load=False):
super(RateChar, self).__init__()
self.mod = mod
self.mod.SetQuiet()
self._model_map = ModelMap(mod)
self.mod.doState()
self._analysis_method = 'ratechar'
self._working_dir = modeltools.make_path(self.mod,
self._analysis_method)
self._min_concrange_factor = min_concrange_factor
self._max_concrange_factor = max_concrange_factor
self._scan_points = scan_points
self._ltxe = LatexExpr(self.mod)
for species in self.mod.species:
setattr(self, species, None)
if auto_load:
self.load_session()
def do_ratechar(self, fixed='all',
scan_min=None,
scan_max=None,
min_concrange_factor=None,
max_concrange_factor=None,
scan_points=None,
solver=0,
auto_save=False):
# this function wraps _do_scan functionality in a user friendly bubble
if fixed == 'all':
to_scan = self.mod.species
elif type(fixed) is list or type(fixed) is tuple:
for each in fixed:
assert each in self.mod.species, 'Invalid species'
to_scan = fixed
else:
assert fixed in self.mod.species, 'Invalid species'
to_scan = [fixed]
for i in to_scan:
each = str(i) # fix for Python 2 compatibility
fixed_mod, fixed_ss = self._fix_at_ss(each)
scan_start = self._min_max_chooser(fixed_ss,
scan_min,
min_concrange_factor,
'min')
scan_end = self._min_max_chooser(fixed_ss,
scan_max,
max_concrange_factor,
'max')
# here there could be a situation where a scan_min > scan_max
# I wonder what will happen....
if not scan_points:
scan_points = self._scan_points
column_names, results = self._do_scan(fixed_mod,
each,
scan_start,
scan_end,
scan_points)
cleaned_results = strip_nan_from_scan(results)
rcd = RateCharData(fixed_ss,
fixed_mod,
self.mod,
column_names,
cleaned_results,
self._model_map,
self._ltxe)
setattr(self, each, rcd)
if auto_save:
self.save_session()
def _min_max_chooser(self, ss, point, concrange, min_max):
# chooses a minimum or maximum point based
# on the information given by a user
# ie if a specific min/max point is given - use that
# if only concentration range is given -use that
# if nothing is given - use the defualt conc_range_factor
# pretty simple stuff
if point:
the_point = point
if not point and concrange:
if min_max == 'min':
the_point = ss / concrange
elif min_max == 'max':
the_point = ss * concrange
if not point and not concrange:
if min_max == 'min':
the_point = ss / self._min_concrange_factor
elif min_max == 'max':
the_point = ss * self._max_concrange_factor
return the_point
@silence_print
def _do_scan(self,
fixed_mod,
fixed,
scan_min,
scan_max,
scan_points,
solver=0):
# do scan is a simplified interface to pysces.Scanner
# more intuitive than Scan1 (functional vs OO??)
# returns the names of the scanned blocks together with
# the results of the scan
assert solver in (0, 1, 2), 'Solver mode can only be one of 0, 1 or 2'
fixed_mod.mode_solver = solver
demand_blocks = [
'J_' + r for r in getattr(self._model_map, fixed).isSubstrateOf()]
demand_blocks = [str(i) for i in demand_blocks]
supply_blocks = [
'J_' + r for r in getattr(self._model_map, fixed).isProductOf()]
supply_blocks = [str(i) for i in supply_blocks]
user_output = [fixed] + demand_blocks + supply_blocks
scanner = Scanner(fixed_mod)
scanner.quietRun = True
scanner.addScanParameter(
fixed, scan_min, scan_max, scan_points, log=True)
scanner.addUserOutput(*user_output)
scanner.Run()
return user_output, scanner.UserOutputResults
@silence_print
def _fix_at_ss(self, fixed):
# fixes the metabolite at the steady_state
# (calls psctb.modeltools.fix_metabolite)
# and returns both the ss value and the fixed model
self.mod.doState()
fixed_ss = getattr(self.mod, fixed + '_ss')
fixed_mod = modeltools.fix_metabolite(self.mod, fixed)
fixed_mod.SetQuiet()
# i don't like this approach at all, too many possible unintended side
# effects
# setattr(fixed_mod, fixed, fixed_ss)
# setattr(fixed_mod, 'fixed', fixed)
# setattr(fixed_mod, 'fixed_ss', fixed_ss)
fixed_mod.doState()
return fixed_mod, fixed_ss
def save_session(self, file_name=None):
file_name = modeltools.get_file_path(working_dir=self._working_dir,
internal_filename='save_data',
fmt='npz',
file_name=file_name,
write_suffix=False)
to_save = {}
for species in self.mod.species:
species_object = getattr(self, species)
try:
column_array = numpy.array(species_object._column_names)
scan_results = species_object._scan_results
to_save['col_{0}'.format(species)] = column_array
to_save['res_{0}'.format(species)] = scan_results
except:
pass
numpy.savez(file_name, **to_save)
def save_results(self, folder=None, separator=',',format='%f'):
base_folder = folder
for species in self.mod.species:
if folder:
folder = path.join(base_folder, species)
getattr(self, species).save_all_results(folder=folder,
separator=separator)
def load_session(self, file_name=None):
file_name = modeltools.get_file_path(working_dir=self._working_dir,
internal_filename='save_data',
fmt='npz',
file_name=file_name,
write_suffix=False)
loaded_data = {}
try:
with numpy.load(file_name) as data_file:
for k, v in data_file.items():
loaded_data[k] = v
except IOError as e:
raise e
for species in self.mod.species:
try:
column_names = [str(each) for each in
list(loaded_data['col_{0}'.format(species)])]
scan_results = loaded_data['res_{0}'.format(species)]
fixed_species = species
fixed_mod, fixed_ss = self._fix_at_ss(fixed_species)
rcd = RateCharData(fixed_ss=fixed_ss,
fixed_mod=fixed_mod,
basemod=self.mod, column_names=column_names,
scan_results=scan_results,
model_map=self._model_map, ltxe=self._ltxe)
setattr(self, fixed_species, rcd)
except:
pass
class RateCharData(object):
def __init__(self,
fixed_ss,
fixed_mod,
basemod,
column_names,
scan_results,
model_map,
ltxe):
super(RateCharData, self).__init__()
self.mod = fixed_mod
self.scan_results = DotDict()
self.mca_results = DotDict()
self._slope_range_factor = 3.0
self.scan_results['fixed'] = column_names[0]
self.scan_results['fixed_ss'] = fixed_ss
self.scan_results['scan_range'] = scan_results[:, 0]
self.scan_results['flux_names'] = column_names[1:]
self.scan_results['flux_data'] = scan_results[:, 1:]
self.scan_results['scan_points'] = len(self.scan_results.scan_range)
self.scan_results['flux_max'] = None
self.scan_results['flux_min'] = None
self.scan_results['scan_max'] = None
self.scan_results['scan_min'] = None
self.scan_results['ec_names'] = None
self.scan_results['ec_data'] = None
self.scan_results['rc_names'] = None
self.scan_results['rc_data'] = None
self.scan_results['prc_names'] = None
self.scan_results['prc_data'] = None
self._column_names = column_names
self._scan_results = scan_results
self._model_map = model_map
self._analysis_method = 'ratechar'
self._basemod = basemod
self._working_dir = modeltools.make_path(self._basemod,
self._analysis_method,
[self.scan_results.fixed])
self._ltxe = ltxe
self._color_dict_ = None
self._data_setup()
self.mca_results._ltxe = ltxe
self.mca_results._make_repr(
'"$" + self._ltxe.expression_to_latex(k) + "$"', 'v',
formatter_factory())
# del self.scan_results
# del self.mca_results
def _data_setup(self):
# reset value to do mcarc
setattr(self.mod, self.scan_results.fixed, self.scan_results.fixed_ss)
self.mod.doMcaRC()
self._make_attach_total_fluxes()
self._min_max_setup()
self._attach_fluxes_to_self()
self._make_all_coefficient_lines()
self._attach_all_coefficients_to_self()
self._make_all_summary()
self._make_all_line_data()
def _change_colour_order(self, order=None):
if not order:
order = list(self._color_dict_.keys())
shuffle(order)
self._color_dict_ = dict(list(zip(order, list(self._color_dict_.values()))))
self._make_all_line_data()
def _make_all_line_data(self):
self._make_flux_ld()
self._make_ec_ld()
self._make_rc_ld()
self._make_prc_ld()
self._make_total_flux_ld()
self._line_data_dict = OrderedDict()
self._line_data_dict.update(self._prc_ld_dict)
self._line_data_dict.update(self._flux_ld_dict)
self._line_data_dict.update(self._total_flux_ld_dict)
self._line_data_dict.update(self._ec_ld_dict)
self._line_data_dict.update(self._rc_ld_dict)
del self._flux_ld_dict
del self._ec_ld_dict
del self._rc_ld_dict
del self._prc_ld_dict
del self._total_flux_ld_dict
def _make_all_summary(self):
self._make_ec_summary()
self._make_cc_summary()
self._make_rc_summary()
self._make_prc_summary()
self.mca_results.update(self._ec_summary)
self.mca_results.update(self._cc_summary)
self.mca_results.update(self._rc_summary)
self.mca_results.update(self._prc_summary)
del self._ec_summary
del self._cc_summary
del self._rc_summary
del self._prc_summary
def _make_ec_summary(self):
ecs = {}
reagent_of = [each[2:] for each in self.scan_results.flux_names]
modifier_of = getattr(
self._model_map, self.scan_results.fixed).isModifierOf()
all_reactions = reagent_of + modifier_of
for reaction in all_reactions:
name = 'ec%s_%s' % (reaction, self.scan_results.fixed)
val = getattr(self.mod, name)
ecs[name] = val
self._ec_summary = ecs
def _make_rc_summary(self):
rcs = {}
for flux in self.scan_results.flux_names:
reaction = flux[2:]
name = '%s_%s' % (reaction, self.scan_results.fixed)
val = getattr(self.mod.rc, name)
name = 'rcJ' + name
rcs[name] = val
self._rc_summary = rcs
def _make_cc_summary(self):
ccs = {}
reagent_of = [each[2:] for each in self.scan_results.flux_names]
modifier_of = getattr(
self._model_map, self.scan_results.fixed).isModifierOf()
all_reactions = reagent_of + modifier_of
for flux_reaction in reagent_of:
for reaction in all_reactions:
name = 'ccJ%s_%s' % (flux_reaction, reaction)
val = getattr(self.mod, name)
ccs[name] = val
self._cc_summary = ccs
def _make_prc_summary(self):
prcs = {}
reagent_of = [each[2:] for each in self.scan_results.flux_names]
modifier_of = getattr(
self._model_map, self.scan_results.fixed).isModifierOf()
all_reactions = reagent_of + modifier_of
for flux_reaction in reagent_of:
for route_reaction in all_reactions:
ec = getattr(self.mod,
'ec%s_%s' % (
route_reaction, self.scan_results.fixed))
cc = getattr(self.mod,
'ccJ%s_%s' % (flux_reaction, route_reaction))
val = ec * cc
name = 'prcJ%s_%s_%s' % (flux_reaction,
self.scan_results.fixed,
route_reaction)
prcs[name] = val
self._prc_summary = prcs
def save_summary(self, file_name=None, separator=',',fmt='%f'):
file_name = modeltools.get_file_path(working_dir=self._working_dir,
internal_filename='mca_summary',
fmt='csv',
file_name=file_name, )
keys = list(self.mca_results.keys())
keys.sort()
values = numpy.array([self.mca_results[k]
for k in keys]).reshape(len(keys), 1)
try:
exportLAWH(values,
names=keys,
header=['Value'],
fname=file_name,
sep=separator,
format=fmt)
except IOError as e:
print(e.strerror)
def save_flux_results(self, file_name=None, separator=',',fmt='%f'):
file_name = modeltools.get_file_path(working_dir=self._working_dir,
internal_filename='flux_results',
fmt='csv',
file_name=file_name, )
scan_points = self.scan_results.scan_points
all_cols = numpy.hstack([
self._scan_results,
self.scan_results.total_supply.reshape(scan_points, 1),
self.scan_results.total_demand.reshape(scan_points, 1)])
column_names = self._column_names + ['Total Supply', 'Total Demand']
try:
exportLAWH(all_cols,
names=None,
header=column_names,
fname=file_name,
sep=separator,
format=fmt)
except IOError as e:
print(e.strerror)
def save_coefficient_results(self,
coefficient,
file_name=None,
separator=',',
folder=None,
fmt='%f'):
assert_message = 'coefficient must be one of "ec", "rc" or "prc"'
assert coefficient in ['rc', 'ec', 'prc'], assert_message
base_name = coefficient + '_results'
file_name = modeltools.get_file_path(working_dir=self._working_dir,
internal_filename=base_name,
fmt='csv',
file_name=file_name, )
results = getattr(self.scan_results, coefficient + '_data')
names = getattr(self.scan_results, coefficient + '_names')
new_names = []
for each in names:
new_names.append('x_vals')
new_names.append(each)
try:
exportLAWH(results,
names=None,
header=new_names,
fname=file_name,
sep=separator,
format=fmt)
except IOError as e:
print(e.strerror)
# TODO fix this method so that folder is a parameter only her
def save_all_results(self, folder=None, separator=',',fmt='%f'):
if not folder:
folder = self._working_dir
file_name = modeltools.get_file_path(working_dir=folder,
internal_filename='flux_results',
fmt='csv')
self.save_flux_results(separator=separator, file_name=file_name,fmt=fmt)
file_name = modeltools.get_file_path(working_dir=folder,
internal_filename='mca_summary',
fmt='csv')
self.save_summary(separator=separator, file_name=file_name, fmt=fmt)
for each in ['ec', 'rc', 'prc']:
base_name = each + '_results'
file_name = modeltools.get_file_path(working_dir=folder,
internal_filename=base_name,
fmt='csv')
self.save_coefficient_results(coefficient=each,
separator=separator,
file_name=file_name,
fmt=fmt)
def _min_max_setup(self):
# Negative minimum linear values mean nothing
# because they don't translate to a log space
# therefore we want the minimum non-negative/non-zero values.
# lets make sure there are no zeros
n_z_f = self.scan_results.flux_data[
numpy.nonzero(self.scan_results.flux_data)]
n_z_s = self.scan_results.scan_range[
numpy.nonzero(self.scan_results.scan_range)]
totals = numpy.vstack([self.scan_results.total_demand,
self.scan_results.total_supply])
n_z_t = totals[numpy.nonzero(totals)]
# and that the array is not now somehow empty
# although if this happens-you have bigger problems
if len(n_z_f) == 0:
n_z_f = numpy.array([0.01, 1])
if len(n_z_s) == 0:
n_z_s = numpy.array([0.01, 1])
# lets also (clumsily) find the non-negative mins and maxes
# by converting to logspace (to get NaNs) and back
# and then getting the min/max non-NaN
# PS flux max is the max of the totals
with numpy.errstate(all='ignore'):
self.scan_results.flux_max = numpy.nanmax(10 ** numpy.log10(n_z_t))
self.scan_results.flux_min = numpy.nanmin(10 ** numpy.log10(n_z_f))
self.scan_results.scan_max = numpy.nanmax(10 ** numpy.log10(n_z_s))
self.scan_results.scan_min = numpy.nanmin(10 ** numpy.log10(n_z_s))
def _attach_fluxes_to_self(self):
for i, each in enumerate(self.scan_results.flux_names):
# setattr(self, each, self.scan_results.flux_data[:, i])
self.scan_results[each] = self.scan_results.flux_data[:, i]
def _attach_all_coefficients_to_self(self):
setup_for = ['ec', 'rc', 'prc']
for each in setup_for:
eval('self._attach_coefficients_to_self(self.scan_results.' + each + '_names,\
self.scan_results.' + each + '_data)')
def _make_all_coefficient_lines(self):
setup_for = ['ec', 'rc', 'prc']
for each in setup_for:
eval('self._make_' + each + '_lines()')
def _make_attach_total_fluxes(self):
demand_blocks = getattr(
self._model_map, self.scan_results.fixed).isSubstrateOf()
supply_blocks = getattr(
self._model_map, self.scan_results.fixed).isProductOf()
dem_pos = [self.scan_results.flux_names.index('J_' + flux)
for flux in demand_blocks]
sup_pos = [self.scan_results.flux_names.index('J_' + flux)
for flux in supply_blocks]
self.scan_results['total_demand'] = numpy.sum(
[self.scan_results.flux_data[:, i]
for i in dem_pos],
axis=0)
self.scan_results['total_supply'] = numpy.sum(
[self.scan_results.flux_data[:, i]
for i in sup_pos],
axis=0)
def _make_rc_lines(self):
names = []
resps = []
for each in self.scan_results.flux_names:
reaction = each[2:]
name = reaction + '_' + self.scan_results.fixed
J_ss = getattr(self.mod, each)
slope = getattr(self.mod.rc, name)
resp = self._tangent_line(J_ss, slope)
name = 'rcJ' + name
names.append(name)
resps.append(resp)
resps = numpy.hstack(resps)
self.scan_results.rc_names = names
self.scan_results.rc_data = resps
def _make_prc_lines(self):
names = []
prcs = []
reagent_of = [each[2:] for each in self.scan_results.flux_names]
all_reactions = reagent_of + \
getattr(self._model_map,
self.scan_results.fixed).isModifierOf()
for flux_reaction in self.scan_results.flux_names:
J_ss = getattr(self.mod, flux_reaction)
reaction = flux_reaction[2:]
for route_reaction in all_reactions:
ec = getattr(
self.mod,
'ec' + route_reaction + '_' + self.scan_results.fixed)
cc = getattr(self.mod, 'ccJ' + reaction + '_' + route_reaction)
slope = ec * cc
prc = self._tangent_line(J_ss, slope)
name = 'prcJ%s_%s_%s' % (reaction,
self.scan_results.fixed,
route_reaction)
names.append(name)
prcs.append(prc)
prcs = numpy.hstack(prcs)
self.scan_results.prc_names = names
self.scan_results.prc_data = prcs
def _make_ec_lines(self):
names = []
elasts = []
for each in self.scan_results.flux_names:
reaction = each[2:]
name = 'ec' + reaction + '_' + self.scan_results.fixed
J_ss = getattr(self.mod, each)
slope = getattr(self.mod, name)
elast = self._tangent_line(J_ss, slope)
names.append(name)
elasts.append(elast)
elasts = numpy.hstack(elasts)
self.scan_results.ec_names = names
self.scan_results.ec_data = elasts
def _attach_coefficients_to_self(self, names, tangent_lines):
sp = 0
ep = 2
for name in names:
# setattr(self, name, tangent_lines[:, sp:ep])
self.scan_results[name] = tangent_lines[:, sp:ep]
sp = ep
ep += 2
def _tangent_line(self, J_ss, slope):
fix_ss = self.scan_results.fixed_ss
constant = J_ss / (fix_ss ** slope)
ydist = numpy.log10(self.scan_results.flux_max / self.scan_results.flux_min)
xdist = numpy.log10(self.scan_results.scan_max / self.scan_results.scan_min)
golden_ratio = (1 + numpy.sqrt(5)) / 2
xyscale = xdist / (ydist * golden_ratio * 1.5)
scale_factor = numpy.cos(numpy.arctan(slope * xyscale))
distance = numpy.log10(self._slope_range_factor) * scale_factor
range_min = fix_ss / (10 ** distance)
range_max = fix_ss * (10 ** distance)
scan_range = numpy.linspace(range_min, range_max, num=2)
rate = constant * scan_range ** (slope)
return numpy.vstack((scan_range, rate)).transpose()
@property
def _color_dict(self):
if not self._color_dict_:
fix_map = getattr(self._model_map, self.scan_results.fixed)
relavent_reactions = fix_map.isProductOf() + \
fix_map.isSubstrateOf() + \
fix_map.isModifierOf()
num_of_cols = len(relavent_reactions) + 3
cmap = get_cmap('Set2')(
numpy.linspace(0, 1.0, num_of_cols))[:, :3]
color_list = [rgb_to_hsv(*cmap[i, :]) for i in range(num_of_cols)]
relavent_reactions.sort()
color_dict = dict(
list(zip(['Total Supply'] +
['J_' + reaction for reaction in relavent_reactions] +
['Total Demand'],
color_list)))
# just to darken the colors a bit
for k, v in color_dict.items():
color_dict[k] = [v[0], 1, v[2]]
self._color_dict_ = color_dict
return self._color_dict_
def _make_flux_ld(self):
color_dict = self._color_dict
flux_ld_dict = {}
demand_blocks = ['J_' + dem_reac for dem_reac in getattr(
self._model_map, self.scan_results.fixed).isSubstrateOf()]
supply_blocks = ['J_' + sup_reac for sup_reac in getattr(
self._model_map, self.scan_results.fixed).isProductOf()]
for flux in self.scan_results.flux_names:
flux_col = self.scan_results.flux_names.index(flux)
x_data = self.scan_results.scan_range
y_data = self.scan_results.flux_data[:, flux_col]
latex_expr = self._ltxe.expression_to_latex(flux)
flux_color = self._color_dict[flux]
color = hsv_to_rgb(flux_color[0],
flux_color[1],
flux_color[2] * 0.9)
for dem in demand_blocks:
if dem == flux:
flux_ld_dict[flux] = \
LineData(name=flux,
x_data=x_data,
y_data=y_data,
categories=['Fluxes',
'Demand',
flux],
properties={'label': '$%s$' % latex_expr,
'color': color})
break
for sup in supply_blocks:
if sup == flux:
flux_ld_dict[flux] = \
LineData(name=flux,
x_data=x_data,
y_data=y_data,
categories=['Fluxes',
'Supply',
flux],
properties={'label': '$%s$' % latex_expr,
'color': color})
break
self._flux_ld_dict = flux_ld_dict
def _make_ec_ld(self):
ec_ld_dict = {}
for ec_name in self.scan_results.ec_names:
ec_name = str(ec_name) # Py2 fix with unicode_literals
for flux, flux_ld in self._flux_ld_dict.items():
ec_reaction = flux[2:]
if 'ec' + ec_reaction + '_' + self.scan_results.fixed in ec_name:
flux_color = self._color_dict[flux]
color = hsv_to_rgb(flux_color[0],
flux_color[1] * 0.5,
flux_color[2])
ec_data = self.scan_results[ec_name]
categories = ['Elasticity Coefficients'] + \
flux_ld.categories[1:]
latex_expr = self._ltxe.expression_to_latex(ec_name)
ec_ld_dict[ec_name] = \
LineData(name=ec_name,
x_data=ec_data[:, 0],
y_data=ec_data[:, 1],
categories=categories,
properties={'label': '$%s$' % latex_expr,
'color': color})
self._ec_ld_dict = ec_ld_dict
def _make_rc_ld(self):
rc_ld_dict = {}
for rc_name in self.scan_results.rc_names:
rc_name = str(rc_name) # Py2 fix with unicode_literals
for flux, flux_ld in self._flux_ld_dict.items():
rc_flux = 'J' + flux[2:]
if 'rc' + rc_flux + '_' in rc_name:
flux_color = self._color_dict[flux]
color = hsv_to_rgb(flux_color[0],
flux_color[1],
flux_color[2] * 0.7)
rc_data = self.scan_results[rc_name]
categories = ['Response Coefficients'] + \
flux_ld.categories[1:]
latex_expr = self._ltxe.expression_to_latex(rc_name)
rc_ld_dict[rc_name] = \
LineData(name=rc_name,
x_data=rc_data[:, 0],
y_data=rc_data[:, 1],
categories=categories,
properties={'label': '$%s$' % latex_expr,
'color': color,
'ls': '--'})
self._rc_ld_dict = rc_ld_dict
def _make_prc_ld(self):
def get_prc_route(prc, flux, fixed):
without_prefix = prc.split('prc')[1]
without_flux = without_prefix.split(flux)[1][1:]
route = without_flux.split(fixed)[1][1:]
return route
prc_ld_dict = {}
for prc_name in self.scan_results.prc_names:
prc_name = str(prc_name) # Py2 fix with unicode_literals
for flux, flux_ld in self._flux_ld_dict.items():
prc_flux = 'J' + flux[2:]
if 'prc' + prc_flux + '_' + self.scan_results.fixed in prc_name:
route_reaction = get_prc_route(prc_name,
prc_flux,
self.scan_results.fixed)
flux_color = self._color_dict['J_' + route_reaction]
color = hsv_to_rgb(flux_color[0],
flux_color[1] * 0.5,
flux_color[2])
prc_data = self.scan_results[prc_name]
categories = ['Partial Response Coefficients'] + \
flux_ld.categories[1:]
latex_expr = self._ltxe.expression_to_latex(prc_name)
prc_ld_dict[prc_name] = \
LineData(name=prc_name,
x_data=prc_data[:, 0],
y_data=prc_data[:, 1],
categories=categories,
properties={'label': '$%s$' % latex_expr,
'color': color})
self._prc_ld_dict = prc_ld_dict
def _make_total_flux_ld(self):
total_flux_ld_dict = {}
col = self._color_dict['Total Supply']
total_flux_ld_dict['Total Supply'] = \
LineData(name='Total Supply',
x_data=self.scan_results.scan_range,
y_data=self.scan_results.total_supply,
categories=['Fluxes',
'Supply',
'Total Supply'],
properties={'label': '$%s$' % 'Total\,Supply',
'color': hsv_to_rgb(col[0], col[1],
col[2] * 0.9),
'ls': '--'})
col = self._color_dict['Total Demand']
total_flux_ld_dict['Total Demand'] = \
LineData(name='Total Demand',
x_data=self.scan_results.scan_range,
y_data=self.scan_results.total_demand,
categories=['Fluxes',
'Demand',
'Total Demand'],
properties={'label': '$%s$' % 'Total\,Demand',
'color': hsv_to_rgb(col[0], col[1],
col[2] * 0.9),
'ls': '--'})
self._total_flux_ld_dict = total_flux_ld_dict
def plot(self):
category_classes = OrderedDict([
('Supply/Demand', [
'Supply',
'Demand']),
('Reaction Blocks',
self.scan_results.flux_names +
['Total Supply', 'Total Demand']),
('Lines', [
'Fluxes',
'Elasticity Coefficients',
'Response Coefficients',
'Partial Response Coefficients'])])
line_data_list = [v for v in self._line_data_dict.values()]
scan_fig = ScanFig(line_data_list,
ax_properties={'xlabel': '[%s]' %
self.scan_results.fixed.replace(
'_', ' '),
'ylabel': 'Rate',
'xscale': 'log',
'yscale': 'log',
'xlim': [self.scan_results.scan_min,
self.scan_results.scan_max],
'ylim': [self.scan_results.flux_min,
self.scan_results.flux_max * 2
]},
category_classes=category_classes,
base_name=self._analysis_method,
working_dir=self._working_dir)
scan_fig.toggle_category('Supply', True)
scan_fig.toggle_category('Demand', True)
scan_fig.toggle_category('Fluxes', True)
scan_fig.ax.axvline(self.scan_results.fixed_ss, ls=':', color='gray')
return scan_fig
def plot_decompose(self):
from warnings import warn, simplefilter
simplefilter('always', DeprecationWarning)
warn('plot_decompose has been renamed to `do_mca_scan, use that '
'method in the future`', DeprecationWarning, stacklevel=1)
simplefilter('default', DeprecationWarning)
return self.do_mca_scan()
@silence_print
def do_mca_scan(self):
ecs = []
ccs = []
prc_names = []
rc_names = []
rc_pos = []
reagent_of = [each[2:] for each in self.scan_results.flux_names]
all_reactions = reagent_of + \
getattr(self._model_map,
self.scan_results.fixed).isModifierOf()
arl = len(all_reactions)
strt = 0
stp = arl
for flux_reaction in self.scan_results.flux_names:
reaction = flux_reaction[2:]
rc_names.append('rcJ%s_%s' % (reaction, self.scan_results.fixed))
rc_pos.append(list(range(strt, stp)))
strt += arl
stp += arl
for route_reaction in all_reactions:
ec = 'ec' + route_reaction + '_' + self.scan_results.fixed
cc = 'ccJ' + reaction + '_' + route_reaction
name = 'prcJ%s_%s_%s' % (reaction,
self.scan_results.fixed,
route_reaction)
# ecs.append(ec)
if ec not in ecs:
ecs.append(ec)
ccs.append(cc)
prc_names.append(name)
ec_len = len(ecs)
user_output = [self.scan_results.fixed] + ecs + ccs
scanner = pysces.Scanner(self.mod)
scanner.quietRun = True
scanner.addScanParameter(self.scan_results.fixed,
self.scan_results.scan_min,
self.scan_results.scan_max,
self.scan_results.scan_points,
log=True)
scanner.addUserOutput(*user_output)
scanner.Run()
ax_properties = {'ylabel': 'Coefficient Value',
'xlabel': '[%s]' %
self.scan_results.fixed.replace('_', ' '),
'xscale': 'log',
'yscale': 'linear',
'xlim': [self.scan_results.scan_min,
self.scan_results.scan_max]}
cc_ec_data_obj = Data2D(mod=self.mod,
column_names=user_output,
data_array=scanner.UserOutputResults,
ltxe=self._ltxe,
analysis_method=self._analysis_method,
ax_properties=ax_properties,
file_name='cc_ec_scan',
num_of_groups=ec_len,
working_dir=path.split(self._working_dir)[0])
rc_data = []
all_outs = scanner.UserOutputResults[:, 1:]
ec_outs = all_outs[:, :ec_len]
cc_outs = all_outs[:, ec_len:]
ec_positions = list(range(ec_len)) * (len(prc_names)/ec_len)
for i, prc_name in enumerate(prc_names):
ec_col_data = ec_outs[:, ec_positions[i]]
cc_col_data = cc_outs[:, i]
# ec_col_data = outs[:, i]
# cc_col_data = outs[:, i + cc_s_pos]
col = ec_col_data * cc_col_data
rc_data.append(col)
temp = numpy.vstack(rc_data).transpose()
rc_data += [numpy.sum(temp[:, rc_pos[i]], axis=1) for i in
range(len(rc_names))]
rc_out_arr = [scanner.UserOutputResults[:, 0]] + rc_data
rc_out_arr = numpy.vstack(rc_out_arr).transpose()
rc_data_obj = Data2D(mod=self.mod,
column_names=[self.scan_results.fixed] + prc_names + rc_names,
data_array=rc_out_arr,
ltxe=self._ltxe,
analysis_method=self._analysis_method,
ax_properties=ax_properties,
file_name='prc_scan',
num_of_groups=ec_len,
working_dir=path.split(self._working_dir)[0])
#rc_data_obj._working_dir = path.split(self._working_dir)[0]
#cc_ec_data_obj._working_dir = path.split(self._working_dir)[0]
return rc_data_obj, cc_ec_data_obj
|
|
"""This module contains data related to countries and is used for geo mapping"""
countries = [
{
"name": "Angola",
"area": 1246700,
"cioc": "ANG",
"cca2": "AO",
"capital": "Luanda",
"lat": -12.5,
"lng": 18.5,
"cca3": "AGO"
},
{
"name": "Algeria",
"area": 2381741,
"cioc": "ALG",
"cca2": "DZ",
"capital": "Algiers",
"lat": 28,
"lng": 3,
"cca3": "DZA"
},
{
"name": "Egypt",
"area": 1002450,
"cioc": "EGY",
"cca2": "EG",
"capital": "Cairo",
"lat": 27,
"lng": 30,
"cca3": "EGY"
},
{
"name": "Bangladesh",
"area": 147570,
"cioc": "BAN",
"cca2": "BD",
"capital": "Dhaka",
"lat": 24,
"lng": 90,
"cca3": "BGD"
},
{
"name": "Niger",
"area": 1267000,
"cioc": "NIG",
"cca2": "NE",
"capital": "Niamey",
"lat": 16,
"lng": 8,
"cca3": "NER"
},
{
"name": "Liechtenstein",
"area": 160,
"cioc": "LIE",
"cca2": "LI",
"capital": "Vaduz",
"lat": 47.26666666,
"lng": 9.53333333,
"cca3": "LIE"
},
{
"name": "Namibia",
"area": 825615,
"cioc": "NAM",
"cca2": "NA",
"capital": "Windhoek",
"lat": -22,
"lng": 17,
"cca3": "NAM"
},
{
"name": "Bulgaria",
"area": 110879,
"cioc": "BUL",
"cca2": "BG",
"capital": "Sofia",
"lat": 43,
"lng": 25,
"cca3": "BGR"
},
{
"name": "Bolivia",
"area": 1098581,
"cioc": "BOL",
"cca2": "BO",
"capital": "Sucre",
"lat": -17,
"lng": -65,
"cca3": "BOL"
},
{
"name": "Ghana",
"area": 238533,
"cioc": "GHA",
"cca2": "GH",
"capital": "Accra",
"lat": 8,
"lng": -2,
"cca3": "GHA"
},
{
"name": "Cocos (Keeling) Islands",
"area": 14,
"cioc": "",
"cca2": "CC",
"capital": "West Island",
"lat": -12.5,
"lng": 96.83333333,
"cca3": "CCK"
},
{
"name": "Pakistan",
"area": 881912,
"cioc": "PAK",
"cca2": "PK",
"capital": "Islamabad",
"lat": 30,
"lng": 70,
"cca3": "PAK"
},
{
"name": "Cape Verde",
"area": 4033,
"cioc": "CPV",
"cca2": "CV",
"capital": "Praia",
"lat": 16,
"lng": -24,
"cca3": "CPV"
},
{
"name": "Jordan",
"area": 89342,
"cioc": "JOR",
"cca2": "JO",
"capital": "Amman",
"lat": 31,
"lng": 36,
"cca3": "JOR"
},
{
"name": "Liberia",
"area": 111369,
"cioc": "LBR",
"cca2": "LR",
"capital": "Monrovia",
"lat": 6.5,
"lng": -9.5,
"cca3": "LBR"
},
{
"name": "Libya",
"area": 1759540,
"cioc": "LBA",
"cca2": "LY",
"capital": "Tripoli",
"lat": 25,
"lng": 17,
"cca3": "LBY"
},
{
"name": "Malaysia",
"area": 330803,
"cioc": "MAS",
"cca2": "MY",
"capital": "Kuala Lumpur",
"lat": 2.5,
"lng": 112.5,
"cca3": "MYS"
},
{
"name": "Dominican Republic",
"area": 48671,
"cioc": "DOM",
"cca2": "DO",
"capital": "Santo Domingo",
"lat": 19,
"lng": -70.66666666,
"cca3": "DOM"
},
{
"name": "Puerto Rico",
"area": 8870,
"cioc": "PUR",
"cca2": "PR",
"capital": "San Juan",
"lat": 18.25,
"lng": -66.5,
"cca3": "PRI"
},
{
"name": "Mayotte",
"area": 374,
"cioc": "",
"cca2": "YT",
"capital": "Mamoudzou",
"lat": -12.83333333,
"lng": 45.16666666,
"cca3": "MYT"
},
{
"name": "North Korea",
"area": 120538,
"cioc": "PRK",
"cca2": "KP",
"capital": "Pyongyang",
"lat": 40,
"lng": 127,
"cca3": "PRK"
},
{
"name": "Palestine",
"area": 6220,
"cioc": "PLE",
"cca2": "PS",
"capital": "Ramallah",
"lat": 31.9,
"lng": 35.2,
"cca3": "PSE"
},
{
"name": "Tanzania",
"area": 945087,
"cioc": "TAN",
"cca2": "TZ",
"capital": "Dodoma",
"lat": -6,
"lng": 35,
"cca3": "TZA"
},
{
"name": "Botswana",
"area": 582000,
"cioc": "BOT",
"cca2": "BW",
"capital": "Gaborone",
"lat": -22,
"lng": 24,
"cca3": "BWA"
},
{
"name": "Cambodia",
"area": 181035,
"cioc": "CAM",
"cca2": "KH",
"capital": "Phnom Penh",
"lat": 13,
"lng": 105,
"cca3": "KHM"
},
{
"name": "Nicaragua",
"area": 130373,
"cioc": "NCA",
"cca2": "NI",
"capital": "Managua",
"lat": 13,
"lng": -85,
"cca3": "NIC"
},
{
"name": "Trinidad and Tobago",
"area": 5130,
"cioc": "TTO",
"cca2": "TT",
"capital": "Port of Spain",
"lat": 11,
"lng": -61,
"cca3": "TTO"
},
{
"name": "Ethiopia",
"area": 1104300,
"cioc": "ETH",
"cca2": "ET",
"capital": "Addis Ababa",
"lat": 8,
"lng": 38,
"cca3": "ETH"
},
{
"name": "Paraguay",
"area": 406752,
"cioc": "PAR",
"cca2": "PY",
"capital": "Asuncion",
"lat": -23,
"lng": -58,
"cca3": "PRY"
},
{
"name": "Hong Kong",
"area": 1104,
"cioc": "HKG",
"cca2": "HK",
"capital": "City of Victoria",
"lat": 22.267,
"lng": 114.188,
"cca3": "HKG"
},
{
"name": "Saudi Arabia",
"area": 2149690,
"cioc": "KSA",
"cca2": "SA",
"capital": "Riyadh",
"lat": 25,
"lng": 45,
"cca3": "SAU"
},
{
"name": "Lebanon",
"area": 10452,
"cioc": "LIB",
"cca2": "LB",
"capital": "Beirut",
"lat": 33.83333333,
"lng": 35.83333333,
"cca3": "LBN"
},
{
"name": "Slovenia",
"area": 20273,
"cioc": "SLO",
"cca2": "SI",
"capital": "Ljubljana",
"lat": 46.11666666,
"lng": 14.81666666,
"cca3": "SVN"
},
{
"name": "Burkina Faso",
"area": 272967,
"cioc": "BUR",
"cca2": "BF",
"capital": "Ouagadougou",
"lat": 13,
"lng": -2,
"cca3": "BFA"
},
{
"name": "Switzerland",
"area": 41284,
"cioc": "SUI",
"cca2": "CH",
"capital": "Bern",
"lat": 47,
"lng": 8,
"cca3": "CHE"
},
{
"name": "Mauritania",
"area": 1030700,
"cioc": "MTN",
"cca2": "MR",
"capital": "Nouakchott",
"lat": 20,
"lng": -12,
"cca3": "MRT"
},
{
"name": "Croatia",
"area": 56594,
"cioc": "CRO",
"cca2": "HR",
"capital": "Zagreb",
"lat": 45.16666666,
"lng": 15.5,
"cca3": "HRV"
},
{
"name": "Chile",
"area": 756102,
"cioc": "CHI",
"cca2": "CL",
"capital": "Santiago",
"lat": -30,
"lng": -71,
"cca3": "CHL"
},
{
"name": "China",
"area": 9706961,
"cioc": "CHN",
"cca2": "CN",
"capital": "Beijing",
"lat": 35,
"lng": 105,
"cca3": "CHN"
},
{
"name": "Saint Kitts and Nevis",
"area": 261,
"cioc": "SKN",
"cca2": "KN",
"capital": "Basseterre",
"lat": 17.33333333,
"lng": -62.75,
"cca3": "KNA"
},
{
"name": "Sierra Leone",
"area": 71740,
"cioc": "SLE",
"cca2": "SL",
"capital": "Freetown",
"lat": 8.5,
"lng": -11.5,
"cca3": "SLE"
},
{
"name": "Jamaica",
"area": 10991,
"cioc": "JAM",
"cca2": "JM",
"capital": "Kingston",
"lat": 18.25,
"lng": -77.5,
"cca3": "JAM"
},
{
"name": "San Marino",
"area": 61,
"cioc": "SMR",
"cca2": "SM",
"capital": "City of San Marino",
"lat": 43.76666666,
"lng": 12.41666666,
"cca3": "SMR"
},
{
"name": "Gibraltar",
"area": 6,
"cioc": "",
"cca2": "GI",
"capital": "Gibraltar",
"lat": 36.13333333,
"lng": -5.35,
"cca3": "GIB"
},
{
"name": "Djibouti",
"area": 23200,
"cioc": "DJI",
"cca2": "DJ",
"capital": "Djibouti",
"lat": 11.5,
"lng": 43,
"cca3": "DJI"
},
{
"name": "Guinea",
"area": 245857,
"cioc": "GUI",
"cca2": "GN",
"capital": "Conakry",
"lat": 11,
"lng": -10,
"cca3": "GIN"
},
{
"name": "Finland",
"area": 338424,
"cioc": "FIN",
"cca2": "FI",
"capital": "Helsinki",
"lat": 64,
"lng": 26,
"cca3": "FIN"
},
{
"name": "Uruguay",
"area": 181034,
"cioc": "URU",
"cca2": "UY",
"capital": "Montevideo",
"lat": -33,
"lng": -56,
"cca3": "URY"
},
{
"name": "Thailand",
"area": 513120,
"cioc": "THA",
"cca2": "TH",
"capital": "Bangkok",
"lat": 15,
"lng": 100,
"cca3": "THA"
},
{
"name": "Sao Tome and Principe",
"area": 964,
"cioc": "STP",
"cca2": "ST",
"capital": "Sao Tome",
"lat": 1,
"lng": 7,
"cca3": "STP"
},
{
"name": "Seychelles",
"area": 452,
"cioc": "SEY",
"cca2": "SC",
"capital": "Victoria",
"lat": -4.58333333,
"lng": 55.66666666,
"cca3": "SYC"
},
{
"name": "Nepal",
"area": 147181,
"cioc": "NEP",
"cca2": "NP",
"capital": "Kathmandu",
"lat": 28,
"lng": 84,
"cca3": "NPL"
},
{
"name": "Christmas Island",
"area": 135,
"cioc": "",
"cca2": "CX",
"capital": "Flying Fish Cove",
"lat": -10.5,
"lng": 105.66666666,
"cca3": "CXR"
},
{
"name": "Laos",
"area": 236800,
"cioc": "LAO",
"cca2": "LA",
"capital": "Vientiane",
"lat": 18,
"lng": 105,
"cca3": "LAO"
},
{
"name": "Yemen",
"area": 527968,
"cioc": "YEM",
"cca2": "YE",
"capital": "Sana'a",
"lat": 15,
"lng": 48,
"cca3": "YEM"
},
{
"name": "Bouvet Island",
"area": 49,
"cioc": "",
"cca2": "BV",
"capital": "",
"lat": -54.43333333,
"lng": 3.4,
"cca3": "BVT"
},
{
"name": "South Africa",
"area": 1221037,
"cioc": "RSA",
"cca2": "ZA",
"capital": "Pretoria",
"lat": -29,
"lng": 24,
"cca3": "ZAF"
},
{
"name": "Kiribati",
"area": 811,
"cioc": "KIR",
"cca2": "KI",
"capital": "South Tarawa",
"lat": 1.41666666,
"lng": 173,
"cca3": "KIR"
},
{
"name": "Philippines",
"area": 342353,
"cioc": "PHI",
"cca2": "PH",
"capital": "Manila",
"lat": 13,
"lng": 122,
"cca3": "PHL"
},
{
"name": "Sint Maarten",
"area": 34,
"cioc": "",
"cca2": "SX",
"capital": "Philipsburg",
"lat": 18.033333,
"lng": -63.05,
"cca3": "SXM"
},
{
"name": "Romania",
"area": 238391,
"cioc": "ROU",
"cca2": "RO",
"capital": "Bucharest",
"lat": 46,
"lng": 25,
"cca3": "ROU"
},
{
"name": "United States Virgin Islands",
"area": 347,
"cioc": "ISV",
"cca2": "VI",
"capital": "Charlotte Amalie",
"lat": 18.35,
"lng": -64.933333,
"cca3": "VIR"
},
{
"name": "Syria",
"area": 185180,
"cioc": "SYR",
"cca2": "SY",
"capital": "Damascus",
"lat": 35,
"lng": 38,
"cca3": "SYR"
},
{
"name": "Macau",
"area": 30,
"cioc": "",
"cca2": "MO",
"capital": "",
"lat": 22.16666666,
"lng": 113.55,
"cca3": "MAC"
},
{
"name": "Saint Martin",
"area": 53,
"cioc": "",
"cca2": "MF",
"capital": "Marigot",
"lat": 18.08333333,
"lng": -63.95,
"cca3": "MAF"
},
{
"name": "Malta",
"area": 316,
"cioc": "MLT",
"cca2": "MT",
"capital": "Valletta",
"lat": 35.83333333,
"lng": 14.58333333,
"cca3": "MLT"
},
{
"name": "Kazakhstan",
"area": 2724900,
"cioc": "KAZ",
"cca2": "KZ",
"capital": "Astana",
"lat": 48,
"lng": 68,
"cca3": "KAZ"
},
{
"name": "Turks and Caicos Islands",
"area": 948,
"cioc": "",
"cca2": "TC",
"capital": "Cockburn Town",
"lat": 21.75,
"lng": -71.58333333,
"cca3": "TCA"
},
{
"name": "French Polynesia",
"area": 4167,
"cioc": "",
"cca2": "PF",
"capital": "Papeete",
"lat": -15,
"lng": -140,
"cca3": "PYF"
},
{
"name": "Niue",
"area": 260,
"cioc": "",
"cca2": "NU",
"capital": "Alofi",
"lat": -19.03333333,
"lng": -169.86666666,
"cca3": "NIU"
},
{
"name": "Dominica",
"area": 751,
"cioc": "DMA",
"cca2": "DM",
"capital": "Roseau",
"lat": 15.41666666,
"lng": -61.33333333,
"cca3": "DMA"
},
{
"name": "Benin",
"area": 112622,
"cioc": "BEN",
"cca2": "BJ",
"capital": "Porto-Novo",
"lat": 9.5,
"lng": 2.25,
"cca3": "BEN"
},
{
"name": "French Guiana",
"area": 83534,
"cioc": "",
"cca2": "GF",
"capital": "Cayenne",
"lat": 4,
"lng": -53,
"cca3": "GUF"
},
{
"name": "Belgium",
"area": 30528,
"cioc": "BEL",
"cca2": "BE",
"capital": "Brussels",
"lat": 50.83333333,
"lng": 4,
"cca3": "BEL"
},
{
"name": "Montserrat",
"area": 102,
"cioc": "",
"cca2": "MS",
"capital": "Plymouth",
"lat": 16.75,
"lng": -62.2,
"cca3": "MSR"
},
{
"name": "Togo",
"area": 56785,
"cioc": "TOG",
"cca2": "TG",
"capital": "Lome",
"lat": 8,
"lng": 1.16666666,
"cca3": "TGO"
},
{
"name": "Germany",
"area": 357114,
"cioc": "GER",
"cca2": "DE",
"capital": "Berlin",
"lat": 51,
"lng": 9,
"cca3": "DEU"
},
{
"name": "Guam",
"area": 549,
"cioc": "GUM",
"cca2": "GU",
"capital": "Hagatna",
"lat": 13.46666666,
"lng": 144.78333333,
"cca3": "GUM"
},
{
"name": "Sri Lanka",
"area": 65610,
"cioc": "SRI",
"cca2": "LK",
"capital": "Colombo",
"lat": 7,
"lng": 81,
"cca3": "LKA"
},
{
"name": "South Sudan",
"area": 619745,
"cioc": "",
"cca2": "SS",
"capital": "Juba",
"lat": 7,
"lng": 30,
"cca3": "SSD"
},
{
"name": "Falkland Islands",
"area": 12173,
"cioc": "",
"cca2": "FK",
"capital": "Stanley",
"lat": -51.75,
"lng": -59,
"cca3": "FLK"
},
{
"name": "United Kingdom",
"area": 242900,
"cioc": "GBR",
"cca2": "GB",
"capital": "London",
"lat": 54,
"lng": -2,
"cca3": "GBR"
},
{
"name": "Guyana",
"area": 214969,
"cioc": "GUY",
"cca2": "GY",
"capital": "Georgetown",
"lat": 5,
"lng": -59,
"cca3": "GUY"
},
{
"name": "Costa Rica",
"area": 51100,
"cioc": "CRC",
"cca2": "CR",
"capital": "San Jose",
"lat": 10,
"lng": -84,
"cca3": "CRI"
},
{
"name": "Cameroon",
"area": 475442,
"cioc": "CMR",
"cca2": "CM",
"capital": "Yaounde",
"lat": 6,
"lng": 12,
"cca3": "CMR"
},
{
"name": "Morocco",
"area": 446550,
"cioc": "MAR",
"cca2": "MA",
"capital": "Rabat",
"lat": 32,
"lng": -5,
"cca3": "MAR"
},
{
"name": "Northern Mariana Islands",
"area": 464,
"cioc": "",
"cca2": "MP",
"capital": "Saipan",
"lat": 15.2,
"lng": 145.75,
"cca3": "MNP"
},
{
"name": "Lesotho",
"area": 30355,
"cioc": "LES",
"cca2": "LS",
"capital": "Maseru",
"lat": -29.5,
"lng": 28.5,
"cca3": "LSO"
},
{
"name": "Hungary",
"area": 93028,
"cioc": "HUN",
"cca2": "HU",
"capital": "Budapest",
"lat": 47,
"lng": 20,
"cca3": "HUN"
},
{
"name": "Turkmenistan",
"area": 488100,
"cioc": "TKM",
"cca2": "TM",
"capital": "Ashgabat",
"lat": 40,
"lng": 60,
"cca3": "TKM"
},
{
"name": "Suriname",
"area": 163820,
"cioc": "SUR",
"cca2": "SR",
"capital": "Paramaribo",
"lat": 4,
"lng": -56,
"cca3": "SUR"
},
{
"name": "Netherlands",
"area": 41850,
"cioc": "NED",
"cca2": "NL",
"capital": "Amsterdam",
"lat": 52.5,
"lng": 5.75,
"cca3": "NLD"
},
{
"name": "Bermuda",
"area": 54,
"cioc": "BER",
"cca2": "BM",
"capital": "Hamilton",
"lat": 32.33333333,
"lng": -64.75,
"cca3": "BMU"
},
{
"name": "Heard Island and McDonald Islands",
"area": 412,
"cioc": "",
"cca2": "HM",
"capital": "",
"lat": -53.1,
"lng": 72.51666666,
"cca3": "HMD"
},
{
"name": "Chad",
"area": 1284000,
"cioc": "CHA",
"cca2": "TD",
"capital": "N'Djamena",
"lat": 15,
"lng": 19,
"cca3": "TCD"
},
{
"name": "Georgia",
"area": 69700,
"cioc": "GEO",
"cca2": "GE",
"capital": "Tbilisi",
"lat": 42,
"lng": 43.5,
"cca3": "GEO"
},
{
"name": "Montenegro",
"area": 13812,
"cioc": "MNE",
"cca2": "ME",
"capital": "Podgorica",
"lat": 42.5,
"lng": 19.3,
"cca3": "MNE"
},
{
"name": "Mongolia",
"area": 1564110,
"cioc": "MGL",
"cca2": "MN",
"capital": "Ulan Bator",
"lat": 46,
"lng": 105,
"cca3": "MNG"
},
{
"name": "Marshall Islands",
"area": 181,
"cioc": "MHL",
"cca2": "MH",
"capital": "Majuro",
"lat": 9,
"lng": 168,
"cca3": "MHL"
},
{
"name": "Martinique",
"area": 1128,
"cioc": "",
"cca2": "MQ",
"capital": "Fort-de-France",
"lat": 14.666667,
"lng": -61,
"cca3": "MTQ"
},
{
"name": "Belize",
"area": 22966,
"cioc": "BIZ",
"cca2": "BZ",
"capital": "Belmopan",
"lat": 17.25,
"lng": -88.75,
"cca3": "BLZ"
},
{
"name": "Norfolk Island",
"area": 36,
"cioc": "",
"cca2": "NF",
"capital": "Kingston",
"lat": -29.03333333,
"lng": 167.95,
"cca3": "NFK"
},
{
"name": "Myanmar",
"area": 676578,
"cioc": "MYA",
"cca2": "MM",
"capital": "Naypyidaw",
"lat": 22,
"lng": 98,
"cca3": "MMR"
},
{
"name": "Afghanistan",
"area": 652230,
"cioc": "AFG",
"cca2": "AF",
"capital": "Kabul",
"lat": 33,
"lng": 65,
"cca3": "AFG"
},
{
"name": "Burundi",
"area": 27834,
"cioc": "BDI",
"cca2": "BI",
"capital": "Bujumbura",
"lat": -3.5,
"lng": 30,
"cca3": "BDI"
},
{
"name": "British Virgin Islands",
"area": 151,
"cioc": "IVB",
"cca2": "VG",
"capital": "Road Town",
"lat": 18.431383,
"lng": -64.62305,
"cca3": "VGB"
},
{
"name": "Belarus",
"area": 207600,
"cioc": "BLR",
"cca2": "BY",
"capital": "Minsk",
"lat": 53,
"lng": 28,
"cca3": "BLR"
},
{
"name": "Saint Barthelemy",
"area": 21,
"cioc": "",
"cca2": "BL",
"capital": "Gustavia",
"lat": 18.5,
"lng": -63.41666666,
"cca3": "BLM"
},
{
"name": "Grenada",
"area": 344,
"cioc": "GRN",
"cca2": "GD",
"capital": "St. George's",
"lat": 12.11666666,
"lng": -61.66666666,
"cca3": "GRD"
},
{
"name": "Tokelau",
"area": 12,
"cioc": "",
"cca2": "TK",
"capital": "Fakaofo",
"lat": -9,
"lng": -172,
"cca3": "TKL"
},
{
"name": "Greece",
"area": 131990,
"cioc": "GRE",
"cca2": "GR",
"capital": "Athens",
"lat": 39,
"lng": 22,
"cca3": "GRC"
},
{
"name": "Russia",
"area": 17098242,
"cioc": "RUS",
"cca2": "RU",
"capital": "Moscow",
"lat": 60,
"lng": 100,
"cca3": "RUS"
},
{
"name": "Greenland",
"area": 2166086,
"cioc": "",
"cca2": "GL",
"capital": "Nuuk",
"lat": 72,
"lng": -40,
"cca3": "GRL"
},
{
"name": "Andorra",
"area": 468,
"cioc": "AND",
"cca2": "AD",
"capital": "Andorra la Vella",
"lat": 42.5,
"lng": 1.5,
"cca3": "AND"
},
{
"name": "Mozambique",
"area": 801590,
"cioc": "MOZ",
"cca2": "MZ",
"capital": "Maputo",
"lat": -18.25,
"lng": 35,
"cca3": "MOZ"
},
{
"name": "Tajikistan",
"area": 143100,
"cioc": "TJK",
"cca2": "TJ",
"capital": "Dushanbe",
"lat": 39,
"lng": 71,
"cca3": "TJK"
},
{
"name": "Haiti",
"area": 27750,
"cioc": "HAI",
"cca2": "HT",
"capital": "Port-au-Prince",
"lat": 19,
"lng": -72.41666666,
"cca3": "HTI"
},
{
"name": "Mexico",
"area": 1964375,
"cioc": "MEX",
"cca2": "MX",
"capital": "Mexico City",
"lat": 23,
"lng": -102,
"cca3": "MEX"
},
{
"name": "Zimbabwe",
"area": 390757,
"cioc": "ZIM",
"cca2": "ZW",
"capital": "Harare",
"lat": -20,
"lng": 30,
"cca3": "ZWE"
},
{
"name": "Saint Lucia",
"area": 616,
"cioc": "LCA",
"cca2": "LC",
"capital": "Castries",
"lat": 13.88333333,
"lng": -60.96666666,
"cca3": "LCA"
},
{
"name": "India",
"area": 3287590,
"cioc": "IND",
"cca2": "IN",
"capital": "New Delhi",
"lat": 20,
"lng": 77,
"cca3": "IND"
},
{
"name": "Latvia",
"area": 64559,
"cioc": "LAT",
"cca2": "LV",
"capital": "Riga",
"lat": 57,
"lng": 25,
"cca3": "LVA"
},
{
"name": "Bhutan",
"area": 38394,
"cioc": "BHU",
"cca2": "BT",
"capital": "Thimphu",
"lat": 27.5,
"lng": 90.5,
"cca3": "BTN"
},
{
"name": "Saint Vincent and the Grenadines",
"area": 389,
"cioc": "VIN",
"cca2": "VC",
"capital": "Kingstown",
"lat": 13.25,
"lng": -61.2,
"cca3": "VCT"
},
{
"name": "Vietnam",
"area": 331212,
"cioc": "VIE",
"cca2": "VN",
"capital": "Hanoi",
"lat": 16.16666666,
"lng": 107.83333333,
"cca3": "VNM"
},
{
"name": "Norway",
"area": 323802,
"cioc": "NOR",
"cca2": "NO",
"capital": "Oslo",
"lat": 62,
"lng": 10,
"cca3": "NOR"
},
{
"name": "Czech Republic",
"area": 78865,
"cioc": "CZE",
"cca2": "CZ",
"capital": "Prague",
"lat": 49.75,
"lng": 15.5,
"cca3": "CZE"
},
{
"name": "French Southern and Antarctic Lands",
"area": 7747,
"cioc": "",
"cca2": "TF",
"capital": "Port-aux-Francais",
"lat": -49.25,
"lng": 69.167,
"cca3": "ATF"
},
{
"name": "Antigua and Barbuda",
"area": 442,
"cioc": "ANT",
"cca2": "AG",
"capital": "Saint John's",
"lat": 17.05,
"lng": -61.8,
"cca3": "ATG"
},
{
"name": "Fiji",
"area": 18272,
"cioc": "FIJ",
"cca2": "FJ",
"capital": "Suva",
"lat": -18,
"lng": 175,
"cca3": "FJI"
},
{
"name": "British Indian Ocean Territory",
"area": 60,
"cioc": "",
"cca2": "IO",
"capital": "Diego Garcia",
"lat": -6,
"lng": 71.5,
"cca3": "IOT"
},
{
"name": "Honduras",
"area": 112492,
"cioc": "HON",
"cca2": "HN",
"capital": "Tegucigalpa",
"lat": 15,
"lng": -86.5,
"cca3": "HND"
},
{
"name": "Mauritius",
"area": 2040,
"cioc": "MRI",
"cca2": "MU",
"capital": "Port Louis",
"lat": -20.28333333,
"lng": 57.55,
"cca3": "MUS"
},
{
"name": "Antarctica",
"area": 14000000,
"cioc": "",
"cca2": "AQ",
"capital": "",
"lat": -90,
"lng": 0,
"cca3": "ATA"
},
{
"name": "Luxembourg",
"area": 2586,
"cioc": "LUX",
"cca2": "LU",
"capital": "Luxembourg",
"lat": 49.75,
"lng": 6.16666666,
"cca3": "LUX"
},
{
"name": "Israel",
"area": 20770,
"cioc": "ISR",
"cca2": "IL",
"capital": "Jerusalem",
"lat": 31.47,
"lng": 35.13,
"cca3": "ISR"
},
{
"name": "Micronesia",
"area": 702,
"cioc": "FSM",
"cca2": "FM",
"capital": "Palikir",
"lat": 6.91666666,
"lng": 158.25,
"cca3": "FSM"
},
{
"name": "Peru",
"area": 1285216,
"cioc": "PER",
"cca2": "PE",
"capital": "Lima",
"lat": -10,
"lng": -76,
"cca3": "PER"
},
{
"name": "Reunion",
"area": 2511,
"cioc": "",
"cca2": "RE",
"capital": "Saint-Denis",
"lat": -21.15,
"lng": 55.5,
"cca3": "REU"
},
{
"name": "Indonesia",
"area": 1904569,
"cioc": "INA",
"cca2": "ID",
"capital": "Jakarta",
"lat": -5,
"lng": 120,
"cca3": "IDN"
},
{
"name": "Vanuatu",
"area": 12189,
"cioc": "VAN",
"cca2": "VU",
"capital": "Port Vila",
"lat": -16,
"lng": 167,
"cca3": "VUT"
},
{
"name": "Macedonia",
"area": 25713,
"cioc": "MKD",
"cca2": "MK",
"capital": "Skopje",
"lat": 41.83333333,
"lng": 22,
"cca3": "MKD"
},
{
"name": "DR Congo",
"area": 2344858,
"cioc": "COD",
"cca2": "CD",
"capital": "Kinshasa",
"lat": 0,
"lng": 25,
"cca3": "COD"
},
{
"name": "Republic of the Congo",
"area": 342000,
"cioc": "CGO",
"cca2": "CG",
"capital": "Brazzaville",
"lat": -1,
"lng": 15,
"cca3": "COG"
},
{
"name": "Iceland",
"area": 103000,
"cioc": "ISL",
"cca2": "IS",
"capital": "Reykjavik",
"lat": 65,
"lng": -18,
"cca3": "ISL"
},
{
"name": "Guadeloupe",
"area": 1628,
"cioc": "",
"cca2": "GP",
"capital": "Basse-Terre",
"lat": 16.25,
"lng": -61.583333,
"cca3": "GLP"
},
{
"name": "Cook Islands",
"area": 236,
"cioc": "COK",
"cca2": "CK",
"capital": "Avarua",
"lat": -21.23333333,
"lng": -159.76666666,
"cca3": "COK"
},
{
"name": "Comoros",
"area": 1862,
"cioc": "COM",
"cca2": "KM",
"capital": "Moroni",
"lat": -12.16666666,
"lng": 44.25,
"cca3": "COM"
},
{
"name": "Colombia",
"area": 1141748,
"cioc": "COL",
"cca2": "CO",
"capital": "Bogota",
"lat": 4,
"lng": -72,
"cca3": "COL"
},
{
"name": "Nigeria",
"area": 923768,
"cioc": "NGR",
"cca2": "NG",
"capital": "Abuja",
"lat": 10,
"lng": 8,
"cca3": "NGA"
},
{
"name": "Timor-Leste",
"area": 14874,
"cioc": "TLS",
"cca2": "TL",
"capital": "Dili",
"lat": -8.83333333,
"lng": 125.91666666,
"cca3": "TLS"
},
{
"name": "Taiwan",
"area": 36193,
"cioc": "TPE",
"cca2": "TW",
"capital": "Taipei",
"lat": 23.5,
"lng": 121,
"cca3": "TWN"
},
{
"name": "Portugal",
"area": 92090,
"cioc": "POR",
"cca2": "PT",
"capital": "Lisbon",
"lat": 39.5,
"lng": -8,
"cca3": "PRT"
},
{
"name": "Moldova",
"area": 33846,
"cioc": "MDA",
"cca2": "MD",
"capital": "Chisinau",
"lat": 47,
"lng": 29,
"cca3": "MDA"
},
{
"name": "Guernsey",
"area": 78,
"cioc": "",
"cca2": "GG",
"capital": "St. Peter Port",
"lat": 49.46666666,
"lng": -2.58333333,
"cca3": "GGY"
},
{
"name": "Madagascar",
"area": 587041,
"cioc": "MAD",
"cca2": "MG",
"capital": "Antananarivo",
"lat": -20,
"lng": 47,
"cca3": "MDG"
},
{
"name": "Ecuador",
"area": 276841,
"cioc": "ECU",
"cca2": "EC",
"capital": "Quito",
"lat": -2,
"lng": -77.5,
"cca3": "ECU"
},
{
"name": "Senegal",
"area": 196722,
"cioc": "SEN",
"cca2": "SN",
"capital": "Dakar",
"lat": 14,
"lng": -14,
"cca3": "SEN"
},
{
"name": "New Zealand",
"area": 270467,
"cioc": "NZL",
"cca2": "NZ",
"capital": "Wellington",
"lat": -41,
"lng": 174,
"cca3": "NZL"
},
{
"name": "Maldives",
"area": 300,
"cioc": "MDV",
"cca2": "MV",
"capital": "Male",
"lat": 3.25,
"lng": 73,
"cca3": "MDV"
},
{
"name": "American Samoa",
"area": 199,
"cioc": "ASA",
"cca2": "AS",
"capital": "Pago Pago",
"lat": -14.33333333,
"lng": -170,
"cca3": "ASM"
},
{
"name": "Saint Pierre and Miquelon",
"area": 242,
"cioc": "",
"cca2": "PM",
"capital": "Saint-Pierre",
"lat": 46.83333333,
"lng": -56.33333333,
"cca3": "SPM"
},
{
"name": "Curacao",
"area": 444,
"cioc": "",
"cca2": "CW",
"capital": "Willemstad",
"lat": 12.116667,
"lng": -68.933333,
"cca3": "CUW"
},
{
"name": "France",
"area": 551695,
"cioc": "FRA",
"cca2": "FR",
"capital": "Paris",
"lat": 46,
"lng": 2,
"cca3": "FRA"
},
{
"name": "Lithuania",
"area": 65300,
"cioc": "LTU",
"cca2": "LT",
"capital": "Vilnius",
"lat": 56,
"lng": 24,
"cca3": "LTU"
},
{
"name": "Rwanda",
"area": 26338,
"cioc": "RWA",
"cca2": "RW",
"capital": "Kigali",
"lat": -2,
"lng": 30,
"cca3": "RWA"
},
{
"name": "Zambia",
"area": 752612,
"cioc": "ZAM",
"cca2": "ZM",
"capital": "Lusaka",
"lat": -15,
"lng": 30,
"cca3": "ZMB"
},
{
"name": "Gambia",
"area": 10689,
"cioc": "GAM",
"cca2": "GM",
"capital": "Banjul",
"lat": 13.46666666,
"lng": -16.56666666,
"cca3": "GMB"
},
{
"name": "Wallis and Futuna",
"area": 142,
"cioc": "",
"cca2": "WF",
"capital": "Mata-Utu",
"lat": -13.3,
"lng": -176.2,
"cca3": "WLF"
},
{
"name": "Jersey",
"area": 116,
"cioc": "",
"cca2": "JE",
"capital": "Saint Helier",
"lat": 49.25,
"lng": -2.16666666,
"cca3": "JEY"
},
{
"name": "Faroe Islands",
"area": 1393,
"cioc": "",
"cca2": "FO",
"capital": "Torshavn",
"lat": 62,
"lng": -7,
"cca3": "FRO"
},
{
"name": "Guatemala",
"area": 108889,
"cioc": "GUA",
"cca2": "GT",
"capital": "Guatemala City",
"lat": 15.5,
"lng": -90.25,
"cca3": "GTM"
},
{
"name": "Denmark",
"area": 43094,
"cioc": "DEN",
"cca2": "DK",
"capital": "Copenhagen",
"lat": 56,
"lng": 10,
"cca3": "DNK"
},
{
"name": "Isle of Man",
"area": 572,
"cioc": "",
"cca2": "IM",
"capital": "Douglas",
"lat": 54.25,
"lng": -4.5,
"cca3": "IMN"
},
{
"name": "Australia",
"area": 7692024,
"cioc": "AUS",
"cca2": "AU",
"capital": "Canberra",
"lat": -27,
"lng": 133,
"cca3": "AUS"
},
{
"name": "Austria",
"area": 83871,
"cioc": "AUT",
"cca2": "AT",
"capital": "Vienna",
"lat": 47.33333333,
"lng": 13.33333333,
"cca3": "AUT"
},
{
"name": "Svalbard and Jan Mayen",
"area": -1,
"cioc": "",
"cca2": "SJ",
"capital": "Longyearbyen",
"lat": 78,
"lng": 20,
"cca3": "SJM"
},
{
"name": "Venezuela",
"area": 916445,
"cioc": "VEN",
"cca2": "VE",
"capital": "Caracas",
"lat": 8,
"lng": -66,
"cca3": "VEN"
},
{
"name": "Kosovo",
"area": 10908,
"cioc": "KOS",
"cca2": "XK",
"capital": "Pristina",
"lat": 42.666667,
"lng": 21.166667,
"cca3": "UNK"
},
{
"name": "Palau",
"area": 459,
"cioc": "PLW",
"cca2": "PW",
"capital": "Ngerulmud",
"lat": 7.5,
"lng": 134.5,
"cca3": "PLW"
},
{
"name": "Kenya",
"area": 580367,
"cioc": "KEN",
"cca2": "KE",
"capital": "Nairobi",
"lat": 1,
"lng": 38,
"cca3": "KEN"
},
{
"name": "Samoa",
"area": 2842,
"cioc": "SAM",
"cca2": "WS",
"capital": "Apia",
"lat": -13.58333333,
"lng": -172.33333333,
"cca3": "WSM"
},
{
"name": "Turkey",
"area": 783562,
"cioc": "TUR",
"cca2": "TR",
"capital": "Ankara",
"lat": 39,
"lng": 35,
"cca3": "TUR"
},
{
"name": "Albania",
"area": 28748,
"cioc": "ALB",
"cca2": "AL",
"capital": "Tirana",
"lat": 41,
"lng": 20,
"cca3": "ALB"
},
{
"name": "Oman",
"area": 309500,
"cioc": "OMA",
"cca2": "OM",
"capital": "Muscat",
"lat": 21,
"lng": 57,
"cca3": "OMN"
},
{
"name": "Tuvalu",
"area": 26,
"cioc": "TUV",
"cca2": "TV",
"capital": "Funafuti",
"lat": -8,
"lng": 178,
"cca3": "TUV"
},
{
"name": "Aland Islands",
"area": 1580,
"cioc": "",
"cca2": "AX",
"capital": "Mariehamn",
"lat": 60.116667,
"lng": 19.9,
"cca3": "ALA"
},
{
"name": "Brunei",
"area": 5765,
"cioc": "BRU",
"cca2": "BN",
"capital": "Bandar Seri Begawan",
"lat": 4.5,
"lng": 114.66666666,
"cca3": "BRN"
},
{
"name": "Tunisia",
"area": 163610,
"cioc": "TUN",
"cca2": "TN",
"capital": "Tunis",
"lat": 34,
"lng": 9,
"cca3": "TUN"
},
{
"name": "Pitcairn Islands",
"area": 47,
"cioc": "",
"cca2": "PN",
"capital": "Adamstown",
"lat": -25.06666666,
"lng": -130.1,
"cca3": "PCN"
},
{
"name": "Barbados",
"area": 430,
"cioc": "BAR",
"cca2": "BB",
"capital": "Bridgetown",
"lat": 13.16666666,
"lng": -59.53333333,
"cca3": "BRB"
},
{
"name": "Brazil",
"area": 8515767,
"cioc": "BRA",
"cca2": "BR",
"capital": "Brasilia",
"lat": -10,
"lng": -55,
"cca3": "BRA"
},
{
"name": "Ivory Coast",
"area": 322463,
"cioc": "CIV",
"cca2": "CI",
"capital": "Yamoussoukro",
"lat": 8,
"lng": -5,
"cca3": "CIV"
},
{
"name": "Serbia",
"area": 88361,
"cioc": "SRB",
"cca2": "RS",
"capital": "Belgrade",
"lat": 44,
"lng": 21,
"cca3": "SRB"
},
{
"name": "Equatorial Guinea",
"area": 28051,
"cioc": "GEQ",
"cca2": "GQ",
"capital": "Malabo",
"lat": 2,
"lng": 10,
"cca3": "GNQ"
},
{
"name": "United States",
"area": 9372610,
"cioc": "USA",
"cca2": "US",
"capital": "Washington D.C.",
"lat": 38,
"lng": -97,
"cca3": "USA"
},
{
"name": "Qatar",
"area": 11586,
"cioc": "QAT",
"cca2": "QA",
"capital": "Doha",
"lat": 25.5,
"lng": 51.25,
"cca3": "QAT"
},
{
"name": "Sweden",
"area": 450295,
"cioc": "SWE",
"cca2": "SE",
"capital": "Stockholm",
"lat": 62,
"lng": 15,
"cca3": "SWE"
},
{
"name": "Azerbaijan",
"area": 86600,
"cioc": "AZE",
"cca2": "AZ",
"capital": "Baku",
"lat": 40.5,
"lng": 47.5,
"cca3": "AZE"
},
{
"name": "Guinea-Bissau",
"area": 36125,
"cioc": "GBS",
"cca2": "GW",
"capital": "Bissau",
"lat": 12,
"lng": -15,
"cca3": "GNB"
},
{
"name": "Swaziland",
"area": 17364,
"cioc": "SWZ",
"cca2": "SZ",
"capital": "Lobamba",
"lat": -26.5,
"lng": 31.5,
"cca3": "SWZ"
},
{
"name": "Tonga",
"area": 747,
"cioc": "TGA",
"cca2": "TO",
"capital": "Nuku'alofa",
"lat": -20,
"lng": -175,
"cca3": "TON"
},
{
"name": "Canada",
"area": 9984670,
"cioc": "CAN",
"cca2": "CA",
"capital": "Ottawa",
"lat": 60,
"lng": -95,
"cca3": "CAN"
},
{
"name": "Ukraine",
"area": 603500,
"cioc": "UKR",
"cca2": "UA",
"capital": "Kiev",
"lat": 49,
"lng": 32,
"cca3": "UKR"
},
{
"name": "South Korea",
"area": 100210,
"cioc": "KOR",
"cca2": "KR",
"capital": "Seoul",
"lat": 37,
"lng": 127.5,
"cca3": "KOR"
},
{
"name": "Anguilla",
"area": 91,
"cioc": "",
"cca2": "AI",
"capital": "The Valley",
"lat": 18.25,
"lng": -63.16666666,
"cca3": "AIA"
},
{
"name": "Central African Republic",
"area": 622984,
"cioc": "CAF",
"cca2": "CF",
"capital": "Bangui",
"lat": 7,
"lng": 21,
"cca3": "CAF"
},
{
"name": "Slovakia",
"area": 49037,
"cioc": "SVK",
"cca2": "SK",
"capital": "Bratislava",
"lat": 48.66666666,
"lng": 19.5,
"cca3": "SVK"
},
{
"name": "Cyprus",
"area": 9251,
"cioc": "CYP",
"cca2": "CY",
"capital": "Nicosia",
"lat": 35,
"lng": 33,
"cca3": "CYP"
},
{
"name": "Bosnia and Herzegovina",
"area": 51209,
"cioc": "BIH",
"cca2": "BA",
"capital": "Sarajevo",
"lat": 44,
"lng": 18,
"cca3": "BIH"
},
{
"name": "Singapore",
"area": 710,
"cioc": "SIN",
"cca2": "SG",
"capital": "Singapore",
"lat": 1.36666666,
"lng": 103.8,
"cca3": "SGP"
},
{
"name": "South Georgia",
"area": 3903,
"cioc": "",
"cca2": "GS",
"capital": "King Edward Point",
"lat": -54.5,
"lng": -37,
"cca3": "SGS"
},
{
"name": "Somalia",
"area": 637657,
"cioc": "SOM",
"cca2": "SO",
"capital": "Mogadishu",
"lat": 10,
"lng": 49,
"cca3": "SOM"
},
{
"name": "Uzbekistan",
"area": 447400,
"cioc": "UZB",
"cca2": "UZ",
"capital": "Tashkent",
"lat": 41,
"lng": 64,
"cca3": "UZB"
},
{
"name": "Eritrea",
"area": 117600,
"cioc": "ERI",
"cca2": "ER",
"capital": "Asmara",
"lat": 15,
"lng": 39,
"cca3": "ERI"
},
{
"name": "Poland",
"area": 312679,
"cioc": "POL",
"cca2": "PL",
"capital": "Warsaw",
"lat": 52,
"lng": 20,
"cca3": "POL"
},
{
"name": "Kuwait",
"area": 17818,
"cioc": "KUW",
"cca2": "KW",
"capital": "Kuwait City",
"lat": 29.5,
"lng": 45.75,
"cca3": "KWT"
},
{
"name": "Gabon",
"area": 267668,
"cioc": "GAB",
"cca2": "GA",
"capital": "Libreville",
"lat": -1,
"lng": 11.75,
"cca3": "GAB"
},
{
"name": "Cayman Islands",
"area": 264,
"cioc": "CAY",
"cca2": "KY",
"capital": "George Town",
"lat": 19.5,
"lng": -80.5,
"cca3": "CYM"
},
{
"name": "Vatican City",
"area": 0.44,
"cioc": "",
"cca2": "VA",
"capital": "Vatican City",
"lat": 41.9,
"lng": 12.45,
"cca3": "VAT"
},
{
"name": "Estonia",
"area": 45227,
"cioc": "EST",
"cca2": "EE",
"capital": "Tallinn",
"lat": 59,
"lng": 26,
"cca3": "EST"
},
{
"name": "Malawi",
"area": 118484,
"cioc": "MAW",
"cca2": "MW",
"capital": "Lilongwe",
"lat": -13.5,
"lng": 34,
"cca3": "MWI"
},
{
"name": "Spain",
"area": 505992,
"cioc": "ESP",
"cca2": "ES",
"capital": "Madrid",
"lat": 40,
"lng": -4,
"cca3": "ESP"
},
{
"name": "Iraq",
"area": 438317,
"cioc": "IRQ",
"cca2": "IQ",
"capital": "Baghdad",
"lat": 33,
"lng": 44,
"cca3": "IRQ"
},
{
"name": "El Salvador",
"area": 21041,
"cioc": "ESA",
"cca2": "SV",
"capital": "San Salvador",
"lat": 13.83333333,
"lng": -88.91666666,
"cca3": "SLV"
},
{
"name": "Mali",
"area": 1240192,
"cioc": "MLI",
"cca2": "ML",
"capital": "Bamako",
"lat": 17,
"lng": -4,
"cca3": "MLI"
},
{
"name": "Ireland",
"area": 70273,
"cioc": "IRL",
"cca2": "IE",
"capital": "Dublin",
"lat": 53,
"lng": -8,
"cca3": "IRL"
},
{
"name": "Iran",
"area": 1648195,
"cioc": "IRI",
"cca2": "IR",
"capital": "Tehran",
"lat": 32,
"lng": 53,
"cca3": "IRN"
},
{
"name": "Aruba",
"area": 180,
"cioc": "ARU",
"cca2": "AW",
"capital": "Oranjestad",
"lat": 12.5,
"lng": -69.96666666,
"cca3": "ABW"
},
{
"name": "Papua New Guinea",
"area": 462840,
"cioc": "PNG",
"cca2": "PG",
"capital": "Port Moresby",
"lat": -6,
"lng": 147,
"cca3": "PNG"
},
{
"name": "Panama",
"area": 75417,
"cioc": "PAN",
"cca2": "PA",
"capital": "Panama City",
"lat": 9,
"lng": -80,
"cca3": "PAN"
},
{
"name": "Sudan",
"area": 1886068,
"cioc": "SUD",
"cca2": "SD",
"capital": "Khartoum",
"lat": 15,
"lng": 30,
"cca3": "SDN"
},
{
"name": "Solomon Islands",
"area": 28896,
"cioc": "SOL",
"cca2": "SB",
"capital": "Honiara",
"lat": -8,
"lng": 159,
"cca3": "SLB"
},
{
"name": "Western Sahara",
"area": 266000,
"cioc": "",
"cca2": "EH",
"capital": "El Aaiun",
"lat": 24.5,
"lng": -13,
"cca3": "ESH"
},
{
"name": "Monaco",
"area": 2.02,
"cioc": "MON",
"cca2": "MC",
"capital": "Monaco",
"lat": 43.73333333,
"lng": 7.4,
"cca3": "MCO"
},
{
"name": "Italy",
"area": 301336,
"cioc": "ITA",
"cca2": "IT",
"capital": "Rome",
"lat": 42.83333333,
"lng": 12.83333333,
"cca3": "ITA"
},
{
"name": "Japan",
"area": 377930,
"cioc": "JPN",
"cca2": "JP",
"capital": "Tokyo",
"lat": 36,
"lng": 138,
"cca3": "JPN"
},
{
"name": "Kyrgyzstan",
"area": 199951,
"cioc": "KGZ",
"cca2": "KG",
"capital": "Bishkek",
"lat": 41,
"lng": 75,
"cca3": "KGZ"
},
{
"name": "Uganda",
"area": 241550,
"cioc": "UGA",
"cca2": "UG",
"capital": "Kampala",
"lat": 1,
"lng": 32,
"cca3": "UGA"
},
{
"name": "New Caledonia",
"area": 18575,
"cioc": "",
"cca2": "NC",
"capital": "Noumea",
"lat": -21.5,
"lng": 165.5,
"cca3": "NCL"
},
{
"name": "United Arab Emirates",
"area": 83600,
"cioc": "UAE",
"cca2": "AE",
"capital": "Abu Dhabi",
"lat": 24,
"lng": 54,
"cca3": "ARE"
},
{
"name": "Argentina",
"area": 2780400,
"cioc": "ARG",
"cca2": "AR",
"capital": "Buenos Aires",
"lat": -34,
"lng": -64,
"cca3": "ARG"
},
{
"name": "Bahamas",
"area": 13943,
"cioc": "BAH",
"cca2": "BS",
"capital": "Nassau",
"lat": 24.25,
"lng": -76,
"cca3": "BHS"
},
{
"name": "Bahrain",
"area": 765,
"cioc": "BRN",
"cca2": "BH",
"capital": "Manama",
"lat": 26,
"lng": 50.55,
"cca3": "BHR"
},
{
"name": "Armenia",
"area": 29743,
"cioc": "ARM",
"cca2": "AM",
"capital": "Yerevan",
"lat": 40,
"lng": 45,
"cca3": "ARM"
},
{
"name": "Nauru",
"area": 21,
"cioc": "NRU",
"cca2": "NR",
"capital": "Yaren",
"lat": -0.53333333,
"lng": 166.91666666,
"cca3": "NRU"
},
{
"name": "Cuba",
"area": 109884,
"cioc": "CUB",
"cca2": "CU",
"capital": "Havana",
"lat": 21.5,
"lng": -80,
"cca3": "CUB"
}
]
all_lookups = {}
lookups = ['cioc', 'cca2', 'cca3', 'name']
for lookup in lookups:
all_lookups[lookup] = {}
for country in countries:
all_lookups[lookup][country[lookup].lower()] = country
def get(field, symbol):
"""
Get country data based on a standard code and a symbol
>>> get('cioc', 'CUB')['name']
"Cuba"
>>> get('cca2', 'CA')['name']
"Canada"
"""
return all_lookups[field].get(symbol.lower())
|
|
from PyQt5.QtCore import *
from PyQt5.QtGui import QColor
from PyQt5.QtWidgets import QMessageBox
from Business.ParameterActions import set_parameter, set_parameter_name, delete_parameters
from Data.Parameters import *
from GUI.init import formula_from_locale, formula_to_locale, gui_scale
__author__ = 'mamj'
col_header = ["Name", "Expression", "Value", 'Hide']
class ParametersModel(QAbstractTableModel):
def __init__(self, parameters: Parameters):
QAbstractItemModel.__init__(self)
self._parameters = parameters
parameters.add_change_handler(self.on_parameters_changed)
self.old_row_count = 0
self._gui_scale = gui_scale()
self._columns_widths = [120, 150, 80, 40]
self._instance = None
self._user_input_handlers = []
@property
def instance(self):
return self._instance
@instance.setter
def instance(self, value):
self._instance = value
def add_user_input_handler(self, user_input_handler):
self._user_input_handlers.append(user_input_handler)
def on_user_input(self):
for input_handler in self._user_input_handlers:
input_handler(None)
def set_parameters(self, params):
self._parameters.remove_change_handler(self.on_parameters_changed)
self._parameters = params
if issubclass(type(params), ParametersInstance):
self._instance = params.uid
else:
self._instance = None
self._parameters.add_change_handler(self.on_parameters_changed)
self.modelReset.emit()
def rowCount(self, model_index=None, *args, **kwargs):
return self._parameters.length_all
def columnCount(self, model_index=None, *args, **kwargs):
return 4
def data(self, model_index: QModelIndex, int_role=None):
col = model_index.column()
row = model_index.row()
data = None
if int_role == Qt.DisplayRole:
param_item = self._parameters.get_parameter_item(row)
if col == 0:
data = param_item.full_name
elif col == 1:
if param_item is Parameters:
param = param_item.get_parameter(col - 2)
else:
param = param_item
data = formula_to_locale(param.get_instance_formula(self._instance))
elif col == 2:
if param_item is Parameters:
param = param_item.get_parameter(col - 2)
else:
param = param_item
data = param.get_instance_value(self._instance)
elif col == 3:
data = None # param_item.hidden
elif int_role == Qt.TextAlignmentRole:
if col == 0:
return None
else:
param_item = self._parameters.get_parameter_item(row)
if param_item is not None:
if col == 1:
if type(param_item.formula) is str:
return Qt.AlignLeft | Qt.AlignVCenter
else:
return Qt.AlignRight | Qt.AlignVCenter
else:
if type(param_item.value) is str:
return Qt.AlignLeft | Qt.AlignVCenter
else:
return Qt.AlignRight | Qt.AlignVCenter
elif int_role == Qt.CheckStateRole:
param_item = self._parameters.get_parameter_item(row)
if col == 3:
if param_item.hidden:
return Qt.Checked
else:
return Qt.Unchecked
elif int_role == Qt.EditRole:
param_item = self._parameters.get_parameter_item(row)
if col == 0:
data = param_item.name
elif col == 1:
if param_item is Parameters:
param = param_item.get_parameter(col - 1)
else:
param = param_item
if param.get_instance_formula(self._instance) != "":
data = data = formula_to_locale(param.get_instance_formula(self._instance))
else:
data = QLocale().toString(param.get_instance_value(self._instance))
elif col == 2:
if param_item is Parameters:
param = param_item.get_parameter(col - 2)
else:
param = param_item
if param.formula != "":
data = formula_to_locale(param.get_instance_formula(self._instance))
else:
data = QLocale().toString(param.get_instance_value(self._instance))
elif int_role == Qt.BackgroundColorRole:
param_item = self._parameters.get_parameter_item(row)
if self._parameters.param_in_current_type(param_item):
return QColor(80,120,200,50)
else:
return None
return data
def setData(self, model_index: QModelIndex, value: QVariant, int_role=None):
col = model_index.column()
row = model_index.row()
param_item = self._parameters.get_parameter_item(row)
if col == 0:
set_parameter_name(param_item, value)
# param_item.name = value
self.on_user_input()
return True
elif col == 1 or col == 2:
if param_item is Parameters:
param = param_item.get_parameter(col - 1)
else:
param = param_item
if isinstance(value, float):
set_parameter(param, value, self._instance)
# param.value = value
self.on_user_input()
return True
parsed = QLocale().toDouble(value)
if parsed[1]:
# param.value = parsed[0]
try:
set_parameter(param, parsed[0], self._instance)
except Exception as e:
self._parameters.document.set_status(str(e))
else:
try:
if value == "":
set_parameter(param, 0.0, self._instance)
# param.value = 0.0
else:
set_parameter(param, formula_from_locale(value), self._instance)
# param.value = formula_from_locale(value)
except Exception as ex:
self._parameters.document.set_status(str(ex))
self.on_user_input()
return True
elif col == 3:
if int_role == Qt.CheckStateRole:
hide = value == Qt.Checked
param_item.hidden = hide
return False
def on_parameters_changed(self, event):
if issubclass(type(event.sender), ParametersBase):
if event.type == ChangeEvent.ObjectChanged and event.object == event.sender:
self.modelAboutToBeReset.emit()
self.modelReset.emit()
if type(event.sender) is Parameter or type(event.object) is Parameter:
if event.type == event.BeforeObjectAdded:
parent = QModelIndex()
row = self.rowCount()
self.beginInsertRows(parent, row, row)
if event.type == event.ObjectAdded:
self.endInsertRows()
if event.type == event.BeforeObjectRemoved:
row = self._parameters.get_index_of(event.object)
self.beginRemoveRows(QModelIndex(), row, row)
if event.type == event.ObjectRemoved:
self.endRemoveRows()
if event.type == event.ValueChanged:
param = event.object
row = self._parameters.get_index_of(param)
left = self.createIndex(row, 0)
right = self.createIndex(row, 3)
self.dataChanged.emit(left, right)
if event.type == event.HiddenChanged:
param = event.object
if type(param) is Parameter:
row = self._parameters.get_index_of(param)
left = self.createIndex(row, 3)
right = self.createIndex(row, 3)
self.dataChanged.emit(left, right)
def flags(self, model_index: QModelIndex):
default_flags = Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsEditable
if model_index.column() == 3:
default_flags = Qt.ItemIsUserCheckable | Qt.ItemIsEnabled
return default_flags
def headerData(self, p_int, orientation, int_role=None):
if int_role == Qt.DisplayRole:
if orientation == Qt.Vertical:
return p_int
else:
return col_header[p_int]
elif int_role == Qt.SizeHintRole:
if orientation == Qt.Horizontal:
return QSize(self._columns_widths[p_int] * self._gui_scale, 22 * self._gui_scale);
else:
return None
def get_parameters_object(self):
return self._parameters
def remove_rows(self, rows):
params = []
for row in rows:
params.append(self._parameters.get_parameter_item(row))
delete_parameters(self._parameters, params)
def row_hidden(self, row):
return self._parameters.get_parameter_item(row).hidden
def get_parameter_from_row(self, row):
return self._parameters.get_parameter_item(row)
def get_row_from_parameter(self, parameter):
return self._parameters.get_index_of(parameter)
|
|
import sys
import re
import textwrap
import pytest
from doctest import OutputChecker, ELLIPSIS
from tests.lib.local_repos import local_checkout, local_repo
distribute_re = re.compile('^distribute==[0-9.]+\n', re.MULTILINE)
def _check_output(result, expected):
checker = OutputChecker()
actual = str(result)
# FIXME! The following is a TOTAL hack. For some reason the
# __str__ result for pkg_resources.Requirement gets downcased on
# Windows. Since INITools is the only package we're installing
# in this file with funky case requirements, I'm forcibly
# upcasing it. You can also normalize everything to lowercase,
# but then you have to remember to upcase <BLANKLINE>. The right
# thing to do in the end is probably to find out how to report
# the proper fully-cased package name in our error message.
if sys.platform == 'win32':
actual = actual.replace('initools', 'INITools')
# This allows our existing tests to work when run in a context
# with distribute installed.
actual = distribute_re.sub('', actual)
def banner(msg):
return '\n========== %s ==========\n' % msg
assert checker.check_output(expected, actual, ELLIPSIS), (
banner('EXPECTED') + expected + banner('ACTUAL') + actual +
banner(6 * '=')
)
def test_freeze_basic(script):
"""
Some tests of freeze, first we have to install some stuff. Note that
the test is a little crude at the end because Python 2.5+ adds egg
info to the standard library, so stuff like wsgiref will show up in
the freezing. (Probably that should be accounted for in pip, but
currently it is not).
"""
script.scratch_path.join("initools-req.txt").write(textwrap.dedent("""\
simple==2.0
# and something else to test out:
simple2<=3.0
"""))
script.pip_install_local(
'-r', script.scratch_path / 'initools-req.txt',
)
result = script.pip('freeze', expect_stderr=True)
expected = textwrap.dedent("""\
Script result: pip freeze
-- stdout: --------------------
...simple==2.0
simple2==3.0...
<BLANKLINE>""")
_check_output(result, expected)
@pytest.mark.network
def test_freeze_svn(script, tmpdir):
"""Test freezing a svn checkout"""
checkout_path = local_checkout(
'svn+http://svn.colorstudy.com/INITools/trunk',
tmpdir.join("cache"),
)
# svn internally stores windows drives as uppercase; we'll match that.
checkout_path = checkout_path.replace('c:', 'C:')
# Checkout
script.run(
'svn', 'co', '-r10',
local_repo(
'svn+http://svn.colorstudy.com/INITools/trunk',
tmpdir.join("cache"),
),
'initools-trunk',
)
# Install with develop
script.run(
'python', 'setup.py', 'develop',
cwd=script.scratch_path / 'initools-trunk',
expect_stderr=True,
)
result = script.pip('freeze', expect_stderr=True)
expected = textwrap.dedent("""\
Script result: pip freeze
-- stdout: --------------------
...-e %s@10#egg=INITools-0.3.1dev...-dev_r10
...""" % checkout_path)
_check_output(result, expected)
@pytest.mark.network
def test_freeze_git_clone(script, tmpdir):
"""
Test freezing a Git clone.
"""
result = script.run(
'git',
'clone',
local_repo(
'git+http://github.com/pypa/pip-test-package.git',
tmpdir.join("cache"),
),
'pip-test-package',
expect_stderr=True,
)
repo_dir = script.scratch_path / 'pip-test-package'
result = script.run(
'git',
'checkout',
'7d654e66c8fa7149c165ddeffa5b56bc06619458',
cwd=repo_dir,
expect_stderr=True,
)
result = script.run(
'python', 'setup.py', 'develop',
cwd=repo_dir
)
result = script.pip('freeze', expect_stderr=True)
expected = textwrap.dedent(
"""
Script result: ...pip freeze
-- stdout: --------------------
...-e %s@...#egg=pip_test_package-...
...
""" %
local_checkout(
'git+http://github.com/pypa/pip-test-package.git',
tmpdir.join("cache"),
)
).strip()
_check_output(result, expected)
result = script.pip(
'freeze', '-f',
'%s#egg=pip_test_package' %
local_checkout(
'git+http://github.com/pypa/pip-test-package.git',
tmpdir.join("cache"),
),
expect_stderr=True,
)
expected = textwrap.dedent(
"""
Script result: pip freeze -f %(repo)s#egg=pip_test_package
-- stdout: --------------------
-f %(repo)s#egg=pip_test_package...
-e %(repo)s@...#egg=pip_test_package-0.1.1
...
""" %
{
'repo': local_checkout(
'git+http://github.com/pypa/pip-test-package.git',
tmpdir.join("cache"),
),
},
).strip()
_check_output(result, expected)
# Check that slashes in branch or tag names are translated.
# See also issue #1083: https://github.com/pypa/pip/issues/1083
script.run(
'git', 'checkout', '-b', 'branch/name/with/slash',
cwd=repo_dir,
expect_stderr=True,
)
# Create a new commit to ensure that the commit has only one branch
# or tag name associated to it (to avoid the non-determinism reported
# in issue #1867).
script.run(
'git', 'revert', '--no-edit', 'HEAD',
cwd=repo_dir,
)
result = script.pip('freeze', expect_stderr=True)
expected = textwrap.dedent(
"""
Script result: ...pip freeze
-- stdout: --------------------
...-e ...@...#egg=pip_test_package-branch_name_with_slash...
...
"""
).strip()
_check_output(result, expected)
@pytest.mark.network
def test_freeze_mercurial_clone(script, tmpdir):
"""
Test freezing a Mercurial clone.
"""
result = script.run(
'hg', 'clone',
'-r', 'c9963c111e7c',
local_repo(
'hg+http://bitbucket.org/pypa/pip-test-package',
tmpdir.join("cache"),
),
'pip-test-package',
)
result = script.run(
'python', 'setup.py', 'develop',
cwd=script.scratch_path / 'pip-test-package',
expect_stderr=True,
)
result = script.pip('freeze', expect_stderr=True)
expected = textwrap.dedent(
"""
Script result: ...pip freeze
-- stdout: --------------------
...-e %s@...#egg=pip_test_package-...
...
""" %
local_checkout(
'hg+http://bitbucket.org/pypa/pip-test-package',
tmpdir.join("cache"),
),
).strip()
_check_output(result, expected)
result = script.pip(
'freeze', '-f',
'%s#egg=pip_test_package' %
local_checkout(
'hg+http://bitbucket.org/pypa/pip-test-package',
tmpdir.join("cache"),
),
expect_stderr=True,
)
expected = textwrap.dedent(
"""
Script result: ...pip freeze -f %(repo)s#egg=pip_test_package
-- stdout: --------------------
-f %(repo)s#egg=pip_test_package
...-e %(repo)s@...#egg=pip_test_package-dev
...
""" %
{
'repo': local_checkout(
'hg+http://bitbucket.org/pypa/pip-test-package',
tmpdir.join("cache"),
),
},
).strip()
_check_output(result, expected)
@pytest.mark.network
def test_freeze_bazaar_clone(script, tmpdir):
"""
Test freezing a Bazaar clone.
"""
checkout_path = local_checkout(
'bzr+http://bazaar.launchpad.net/%7Edjango-wikiapp/django-wikiapp/'
'release-0.1',
tmpdir.join("cache"),
)
# bzr internally stores windows drives as uppercase; we'll match that.
checkout_pathC = checkout_path.replace('c:', 'C:')
result = script.run(
'bzr', 'checkout', '-r', '174',
local_repo(
'bzr+http://bazaar.launchpad.net/%7Edjango-wikiapp/django-wikiapp/'
'release-0.1',
tmpdir.join("cache"),
),
'django-wikiapp',
)
result = script.run(
'python', 'setup.py', 'develop',
cwd=script.scratch_path / 'django-wikiapp',
)
result = script.pip('freeze', expect_stderr=True)
expected = textwrap.dedent("""\
Script result: ...pip freeze
-- stdout: --------------------
...-e %s@...#egg=django_wikiapp-...
...""" % checkout_pathC)
_check_output(result, expected)
result = script.pip(
'freeze', '-f',
'%s/#egg=django-wikiapp' % checkout_path,
expect_stderr=True,
)
expected = textwrap.dedent("""\
Script result: ...pip freeze -f %(repo)s/#egg=django-wikiapp
-- stdout: --------------------
-f %(repo)s/#egg=django-wikiapp
...-e %(repoC)s@...#egg=django_wikiapp-...
...""" % {'repoC': checkout_pathC, 'repo': checkout_path})
_check_output(result, expected)
@pytest.mark.network
def test_freeze_with_local_option(script):
"""
Test that wsgiref (from global site-packages) is reported normally, but not
with --local.
"""
result = script.pip('install', 'initools==0.2')
result = script.pip('freeze', expect_stderr=True)
expected = textwrap.dedent("""\
Script result: ...pip freeze
-- stdout: --------------------
INITools==0.2
wsgiref==...
<BLANKLINE>""")
# The following check is broken (see
# http://bitbucket.org/ianb/pip/issue/110). For now we are simply
# neutering this test, but if we can't find a way to fix it,
# this whole function should be removed.
# _check_output(result, expected)
result = script.pip('freeze', '--local', expect_stderr=True)
expected = textwrap.dedent("""\
Script result: ...pip freeze --local
-- stdout: --------------------
INITools==0.2
<BLANKLINE>""")
_check_output(result, expected)
@pytest.mark.network
def test_freeze_with_requirement_option(script):
"""
Test that new requirements are created correctly with --requirement hints
"""
ignores = textwrap.dedent("""\
# Unchanged requirements below this line
-r ignore.txt
--requirement ignore.txt
-Z ignore
--always-unzip ignore
-f http://ignore
-i http://ignore
--extra-index-url http://ignore
--find-links http://ignore
--index-url http://ignore
""")
script.scratch_path.join("hint.txt").write(textwrap.dedent("""\
INITools==0.1
NoExist==4.2
""") + ignores)
result = script.pip('install', 'initools==0.2')
result = script.pip_install_local('simple')
result = script.pip(
'freeze', '--requirement', 'hint.txt',
expect_stderr=True,
)
expected = """\
Script result: pip freeze --requirement hint.txt
-- stderr: --------------------
Requirement file contains NoExist==4.2, but that package is not installed
-- stdout: --------------------
INITools==0.2
""" + ignores + "## The following requirements were added by pip freeze:..."
_check_output(result, expected)
def test_freeze_user(script, virtualenv):
"""
Testing freeze with --user, first we have to install some stuff.
"""
virtualenv.system_site_packages = True
script.pip_install_local('--user', 'simple==2.0')
script.pip_install_local('simple2==3.0')
result = script.pip('freeze', '--user', expect_stderr=True)
expected = textwrap.dedent("""\
Script result: pip freeze --user
-- stdout: --------------------
simple==2.0
<BLANKLINE>""")
_check_output(result, expected)
assert 'simple2' not in result.stdout
|
|
"""Provide functionality to interact with Cast devices on the network."""
import asyncio
import logging
import threading
from typing import Optional, Tuple
import attr
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerDevice
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MOVIE,
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_TVSHOW,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
)
from homeassistant.const import (
CONF_HOST,
EVENT_HOMEASSISTANT_STOP,
STATE_IDLE,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
)
from homeassistant.core import callback
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect, dispatcher_send
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
import homeassistant.util.dt as dt_util
from homeassistant.util.logging import async_create_catching_coro
from . import DOMAIN as CAST_DOMAIN
DEPENDENCIES = ("cast",)
_LOGGER = logging.getLogger(__name__)
CONF_IGNORE_CEC = "ignore_cec"
CAST_SPLASH = "https://home-assistant.io/images/cast/splash.png"
DEFAULT_PORT = 8009
SUPPORT_CAST = (
SUPPORT_PAUSE
| SUPPORT_PLAY
| SUPPORT_PLAY_MEDIA
| SUPPORT_STOP
| SUPPORT_TURN_OFF
| SUPPORT_TURN_ON
| SUPPORT_VOLUME_MUTE
| SUPPORT_VOLUME_SET
)
# Stores a threading.Lock that is held by the internal pychromecast discovery.
INTERNAL_DISCOVERY_RUNNING_KEY = "cast_discovery_running"
# Stores all ChromecastInfo we encountered through discovery or config as a set
# If we find a chromecast with a new host, the old one will be removed again.
KNOWN_CHROMECAST_INFO_KEY = "cast_known_chromecasts"
# Stores UUIDs of cast devices that were added as entities. Doesn't store
# None UUIDs.
ADDED_CAST_DEVICES_KEY = "cast_added_cast_devices"
# Stores an audio group manager.
CAST_MULTIZONE_MANAGER_KEY = "cast_multizone_manager"
# Dispatcher signal fired with a ChromecastInfo every time we discover a new
# Chromecast or receive it through configuration
SIGNAL_CAST_DISCOVERED = "cast_discovered"
# Dispatcher signal fired with a ChromecastInfo every time a Chromecast is
# removed
SIGNAL_CAST_REMOVED = "cast_removed"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_IGNORE_CEC, default=[]): vol.All(cv.ensure_list, [cv.string]),
}
)
@attr.s(slots=True, frozen=True)
class ChromecastInfo:
"""Class to hold all data about a chromecast for creating connections.
This also has the same attributes as the mDNS fields by zeroconf.
"""
host = attr.ib(type=str)
port = attr.ib(type=int)
service = attr.ib(type=Optional[str], default=None)
uuid = attr.ib(
type=Optional[str], converter=attr.converters.optional(str), default=None
) # always convert UUID to string if not None
manufacturer = attr.ib(type=str, default="")
model_name = attr.ib(type=str, default="")
friendly_name = attr.ib(type=Optional[str], default=None)
is_dynamic_group = attr.ib(type=Optional[bool], default=None)
@property
def is_audio_group(self) -> bool:
"""Return if this is an audio group."""
return self.port != DEFAULT_PORT
@property
def is_information_complete(self) -> bool:
"""Return if all information is filled out."""
want_dynamic_group = self.is_audio_group
have_dynamic_group = self.is_dynamic_group is not None
have_all_except_dynamic_group = all(
attr.astuple(
self,
filter=attr.filters.exclude(
attr.fields(ChromecastInfo).is_dynamic_group
),
)
)
return have_all_except_dynamic_group and (
not want_dynamic_group or have_dynamic_group
)
@property
def host_port(self) -> Tuple[str, int]:
"""Return the host+port tuple."""
return self.host, self.port
def _is_matching_dynamic_group(
our_info: ChromecastInfo, new_info: ChromecastInfo
) -> bool:
return (
our_info.is_audio_group
and new_info.is_dynamic_group
and our_info.friendly_name == new_info.friendly_name
)
def _fill_out_missing_chromecast_info(info: ChromecastInfo) -> ChromecastInfo:
"""Fill out missing attributes of ChromecastInfo using blocking HTTP."""
if info.is_information_complete:
# We have all information, no need to check HTTP API. Or this is an
# audio group, so checking via HTTP won't give us any new information.
return info
# Fill out missing information via HTTP dial.
from pychromecast import dial
if info.is_audio_group:
is_dynamic_group = False
http_group_status = None
dynamic_groups = []
if info.uuid:
http_group_status = dial.get_multizone_status(
info.host,
services=[info.service],
zconf=ChromeCastZeroconf.get_zeroconf(),
)
if http_group_status is not None:
dynamic_groups = [str(g.uuid) for g in http_group_status.dynamic_groups]
is_dynamic_group = info.uuid in dynamic_groups
return ChromecastInfo(
service=info.service,
host=info.host,
port=info.port,
uuid=info.uuid,
friendly_name=info.friendly_name,
manufacturer=info.manufacturer,
model_name=info.model_name,
is_dynamic_group=is_dynamic_group,
)
http_device_status = dial.get_device_status(
info.host, services=[info.service], zconf=ChromeCastZeroconf.get_zeroconf()
)
if http_device_status is None:
# HTTP dial didn't give us any new information.
return info
return ChromecastInfo(
service=info.service,
host=info.host,
port=info.port,
uuid=(info.uuid or http_device_status.uuid),
friendly_name=(info.friendly_name or http_device_status.friendly_name),
manufacturer=(info.manufacturer or http_device_status.manufacturer),
model_name=(info.model_name or http_device_status.model_name),
)
def _discover_chromecast(hass: HomeAssistantType, info: ChromecastInfo):
if info in hass.data[KNOWN_CHROMECAST_INFO_KEY]:
_LOGGER.debug("Discovered previous chromecast %s", info)
# Either discovered completely new chromecast or a "moved" one.
info = _fill_out_missing_chromecast_info(info)
_LOGGER.debug("Discovered chromecast %s", info)
if info.uuid is not None:
# Remove previous cast infos with same uuid from known chromecasts.
same_uuid = set(
x for x in hass.data[KNOWN_CHROMECAST_INFO_KEY] if info.uuid == x.uuid
)
hass.data[KNOWN_CHROMECAST_INFO_KEY] -= same_uuid
hass.data[KNOWN_CHROMECAST_INFO_KEY].add(info)
dispatcher_send(hass, SIGNAL_CAST_DISCOVERED, info)
def _remove_chromecast(hass: HomeAssistantType, info: ChromecastInfo):
# Removed chromecast
_LOGGER.debug("Removed chromecast %s", info)
dispatcher_send(hass, SIGNAL_CAST_REMOVED, info)
class ChromeCastZeroconf:
"""Class to hold a zeroconf instance."""
__zconf = None
@classmethod
def set_zeroconf(cls, zconf):
"""Set zeroconf."""
cls.__zconf = zconf
@classmethod
def get_zeroconf(cls):
"""Get zeroconf."""
return cls.__zconf
def _setup_internal_discovery(hass: HomeAssistantType) -> None:
"""Set up the pychromecast internal discovery."""
if INTERNAL_DISCOVERY_RUNNING_KEY not in hass.data:
hass.data[INTERNAL_DISCOVERY_RUNNING_KEY] = threading.Lock()
if not hass.data[INTERNAL_DISCOVERY_RUNNING_KEY].acquire(blocking=False):
# Internal discovery is already running
return
import pychromecast
def internal_add_callback(name):
"""Handle zeroconf discovery of a new chromecast."""
mdns = listener.services[name]
_discover_chromecast(
hass,
ChromecastInfo(
service=name,
host=mdns[0],
port=mdns[1],
uuid=mdns[2],
model_name=mdns[3],
friendly_name=mdns[4],
),
)
def internal_remove_callback(name, mdns):
"""Handle zeroconf discovery of a removed chromecast."""
_remove_chromecast(
hass,
ChromecastInfo(
service=name,
host=mdns[0],
port=mdns[1],
uuid=mdns[2],
model_name=mdns[3],
friendly_name=mdns[4],
),
)
_LOGGER.debug("Starting internal pychromecast discovery.")
listener, browser = pychromecast.start_discovery(
internal_add_callback, internal_remove_callback
)
ChromeCastZeroconf.set_zeroconf(browser.zc)
def stop_discovery(event):
"""Stop discovery of new chromecasts."""
_LOGGER.debug("Stopping internal pychromecast discovery.")
pychromecast.stop_discovery(browser)
hass.data[INTERNAL_DISCOVERY_RUNNING_KEY].release()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_discovery)
@callback
def _async_create_cast_device(hass: HomeAssistantType, info: ChromecastInfo):
"""Create a CastDevice Entity from the chromecast object.
Returns None if the cast device has already been added.
"""
_LOGGER.debug("_async_create_cast_device: %s", info)
if info.uuid is None:
# Found a cast without UUID, we don't store it because we won't be able
# to update it anyway.
return CastDevice(info)
# Found a cast with UUID
if info.is_dynamic_group:
# This is a dynamic group, do not add it.
return None
added_casts = hass.data[ADDED_CAST_DEVICES_KEY]
if info.uuid in added_casts:
# Already added this one, the entity will take care of moved hosts
# itself
return None
# -> New cast device
added_casts.add(info.uuid)
return CastDevice(info)
async def async_setup_platform(
hass: HomeAssistantType, config: ConfigType, async_add_entities, discovery_info=None
):
"""Set up thet Cast platform.
Deprecated.
"""
_LOGGER.warning(
"Setting configuration for Cast via platform is deprecated. "
"Configure via Cast integration instead."
)
await _async_setup_platform(hass, config, async_add_entities, discovery_info)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Cast from a config entry."""
config = hass.data[CAST_DOMAIN].get("media_player", {})
if not isinstance(config, list):
config = [config]
# no pending task
done, _ = await asyncio.wait(
[_async_setup_platform(hass, cfg, async_add_entities, None) for cfg in config]
)
if any([task.exception() for task in done]):
exceptions = [task.exception() for task in done]
for exception in exceptions:
_LOGGER.debug("Failed to setup chromecast", exc_info=exception)
raise PlatformNotReady
async def _async_setup_platform(
hass: HomeAssistantType, config: ConfigType, async_add_entities, discovery_info
):
"""Set up the cast platform."""
import pychromecast
# Import CEC IGNORE attributes
pychromecast.IGNORE_CEC += config.get(CONF_IGNORE_CEC, [])
hass.data.setdefault(ADDED_CAST_DEVICES_KEY, set())
hass.data.setdefault(KNOWN_CHROMECAST_INFO_KEY, set())
info = None
if discovery_info is not None:
info = ChromecastInfo(host=discovery_info["host"], port=discovery_info["port"])
elif CONF_HOST in config:
info = ChromecastInfo(host=config[CONF_HOST], port=DEFAULT_PORT)
@callback
def async_cast_discovered(discover: ChromecastInfo) -> None:
"""Handle discovery of a new chromecast."""
if info is not None and info.host_port != discover.host_port:
# Not our requested cast device.
return
cast_device = _async_create_cast_device(hass, discover)
if cast_device is not None:
async_add_entities([cast_device])
async_dispatcher_connect(hass, SIGNAL_CAST_DISCOVERED, async_cast_discovered)
# Re-play the callback for all past chromecasts, store the objects in
# a list to avoid concurrent modification resulting in exception.
for chromecast in list(hass.data[KNOWN_CHROMECAST_INFO_KEY]):
async_cast_discovered(chromecast)
if info is None or info.is_audio_group:
# If we were a) explicitly told to enable discovery or
# b) have an audio group cast device, we need internal discovery.
hass.async_add_job(_setup_internal_discovery, hass)
else:
info = await hass.async_add_job(_fill_out_missing_chromecast_info, info)
if info.friendly_name is None:
_LOGGER.debug(
"Cannot retrieve detail information for chromecast"
" %s, the device may not be online",
info,
)
hass.async_add_job(_discover_chromecast, hass, info)
class CastStatusListener:
"""Helper class to handle pychromecast status callbacks.
Necessary because a CastDevice entity can create a new socket client
and therefore callbacks from multiple chromecast connections can
potentially arrive. This class allows invalidating past chromecast objects.
"""
def __init__(self, cast_device, chromecast, mz_mgr):
"""Initialize the status listener."""
self._cast_device = cast_device
self._uuid = chromecast.uuid
self._valid = True
self._mz_mgr = mz_mgr
chromecast.register_status_listener(self)
chromecast.socket_client.media_controller.register_status_listener(self)
chromecast.register_connection_listener(self)
# pylint: disable=protected-access
if cast_device._cast_info.is_audio_group:
self._mz_mgr.add_multizone(chromecast)
else:
self._mz_mgr.register_listener(chromecast.uuid, self)
def new_cast_status(self, cast_status):
"""Handle reception of a new CastStatus."""
if self._valid:
self._cast_device.new_cast_status(cast_status)
def new_media_status(self, media_status):
"""Handle reception of a new MediaStatus."""
if self._valid:
self._cast_device.new_media_status(media_status)
def new_connection_status(self, connection_status):
"""Handle reception of a new ConnectionStatus."""
if self._valid:
self._cast_device.new_connection_status(connection_status)
@staticmethod
def added_to_multizone(group_uuid):
"""Handle the cast added to a group."""
pass
def removed_from_multizone(self, group_uuid):
"""Handle the cast removed from a group."""
if self._valid:
self._cast_device.multizone_new_media_status(group_uuid, None)
def multizone_new_cast_status(self, group_uuid, cast_status):
"""Handle reception of a new CastStatus for a group."""
pass
def multizone_new_media_status(self, group_uuid, media_status):
"""Handle reception of a new MediaStatus for a group."""
if self._valid:
self._cast_device.multizone_new_media_status(group_uuid, media_status)
def invalidate(self):
"""Invalidate this status listener.
All following callbacks won't be forwarded.
"""
# pylint: disable=protected-access
if self._cast_device._cast_info.is_audio_group:
self._mz_mgr.remove_multizone(self._uuid)
else:
self._mz_mgr.deregister_listener(self._uuid, self)
self._valid = False
class DynamicGroupCastStatusListener:
"""Helper class to handle pychromecast status callbacks.
Necessary because a CastDevice entity can create a new socket client
and therefore callbacks from multiple chromecast connections can
potentially arrive. This class allows invalidating past chromecast objects.
"""
def __init__(self, cast_device, chromecast, mz_mgr):
"""Initialize the status listener."""
self._cast_device = cast_device
self._uuid = chromecast.uuid
self._valid = True
self._mz_mgr = mz_mgr
chromecast.register_status_listener(self)
chromecast.socket_client.media_controller.register_status_listener(self)
chromecast.register_connection_listener(self)
self._mz_mgr.add_multizone(chromecast)
def new_cast_status(self, cast_status):
"""Handle reception of a new CastStatus."""
pass
def new_media_status(self, media_status):
"""Handle reception of a new MediaStatus."""
if self._valid:
self._cast_device.new_dynamic_group_media_status(media_status)
def new_connection_status(self, connection_status):
"""Handle reception of a new ConnectionStatus."""
if self._valid:
self._cast_device.new_dynamic_group_connection_status(connection_status)
def invalidate(self):
"""Invalidate this status listener.
All following callbacks won't be forwarded.
"""
self._mz_mgr.remove_multizone(self._uuid)
self._valid = False
class CastDevice(MediaPlayerDevice):
"""Representation of a Cast device on the network.
This class is the holder of the pychromecast.Chromecast object and its
socket client. It therefore handles all reconnects and audio group changing
"elected leader" itself.
"""
def __init__(self, cast_info):
"""Initialize the cast device."""
import pychromecast # noqa: pylint: disable=unused-import
self._cast_info = cast_info # type: ChromecastInfo
self.services = None
if cast_info.service:
self.services = set()
self.services.add(cast_info.service)
self._chromecast = None # type: Optional[pychromecast.Chromecast]
self.cast_status = None
self.media_status = None
self.media_status_received = None
self._dynamic_group_cast_info = None # type: ChromecastInfo
self._dynamic_group_cast = None # type: Optional[pychromecast.Chromecast]
self.dynamic_group_media_status = None
self.dynamic_group_media_status_received = None
self.mz_media_status = {}
self.mz_media_status_received = {}
self.mz_mgr = None
self._available = False # type: bool
self._dynamic_group_available = False # type: bool
self._status_listener = None # type: Optional[CastStatusListener]
self._dynamic_group_status_listener = (
None
) # type: Optional[DynamicGroupCastStatusListener]
self._add_remove_handler = None
self._del_remove_handler = None
async def async_added_to_hass(self):
"""Create chromecast object when added to hass."""
@callback
def async_cast_discovered(discover: ChromecastInfo):
"""Handle discovery of new Chromecast."""
if self._cast_info.uuid is None:
# We can't handle empty UUIDs
return
if _is_matching_dynamic_group(self._cast_info, discover):
_LOGGER.debug("Discovered matching dynamic group: %s", discover)
self.hass.async_create_task(
async_create_catching_coro(self.async_set_dynamic_group(discover))
)
return
if self._cast_info.uuid != discover.uuid:
# Discovered is not our device.
return
if self.services is None:
_LOGGER.warning(
"[%s %s (%s:%s)] Received update for manually added Cast",
self.entity_id,
self._cast_info.friendly_name,
self._cast_info.host,
self._cast_info.port,
)
return
_LOGGER.debug("Discovered chromecast with same UUID: %s", discover)
self.hass.async_create_task(
async_create_catching_coro(self.async_set_cast_info(discover))
)
def async_cast_removed(discover: ChromecastInfo):
"""Handle removal of Chromecast."""
if self._cast_info.uuid is None:
# We can't handle empty UUIDs
return
if (
self._dynamic_group_cast_info is not None
and self._dynamic_group_cast_info.uuid == discover.uuid
):
_LOGGER.debug("Removed matching dynamic group: %s", discover)
self.hass.async_create_task(
async_create_catching_coro(self.async_del_dynamic_group())
)
return
if self._cast_info.uuid != discover.uuid:
# Removed is not our device.
return
_LOGGER.debug("Removed chromecast with same UUID: %s", discover)
self.hass.async_create_task(
async_create_catching_coro(self.async_del_cast_info(discover))
)
async def async_stop(event):
"""Disconnect socket on Home Assistant stop."""
await self._async_disconnect()
self._add_remove_handler = async_dispatcher_connect(
self.hass, SIGNAL_CAST_DISCOVERED, async_cast_discovered
)
self._del_remove_handler = async_dispatcher_connect(
self.hass, SIGNAL_CAST_REMOVED, async_cast_removed
)
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, async_stop)
self.hass.async_create_task(
async_create_catching_coro(self.async_set_cast_info(self._cast_info))
)
for info in self.hass.data[KNOWN_CHROMECAST_INFO_KEY]:
if _is_matching_dynamic_group(self._cast_info, info):
_LOGGER.debug(
"[%s %s (%s:%s)] Found dynamic group: %s",
self.entity_id,
self._cast_info.friendly_name,
self._cast_info.host,
self._cast_info.port,
info,
)
self.hass.async_create_task(
async_create_catching_coro(self.async_set_dynamic_group(info))
)
break
async def async_will_remove_from_hass(self) -> None:
"""Disconnect Chromecast object when removed."""
await self._async_disconnect()
if self._cast_info.uuid is not None:
# Remove the entity from the added casts so that it can dynamically
# be re-added again.
self.hass.data[ADDED_CAST_DEVICES_KEY].remove(self._cast_info.uuid)
if self._add_remove_handler:
self._add_remove_handler()
if self._del_remove_handler:
self._del_remove_handler()
async def async_set_cast_info(self, cast_info):
"""Set the cast information and set up the chromecast object."""
import pychromecast
self._cast_info = cast_info
if self.services is not None:
if cast_info.service not in self.services:
_LOGGER.debug(
"[%s %s (%s:%s)] Got new service: %s (%s)",
self.entity_id,
self._cast_info.friendly_name,
self._cast_info.host,
self._cast_info.port,
cast_info.service,
self.services,
)
self.services.add(cast_info.service)
if self._chromecast is not None:
# Only setup the chromecast once, added elements to services
# will automatically be picked up.
return
# pylint: disable=protected-access
if self.services is None:
_LOGGER.debug(
"[%s %s (%s:%s)] Connecting to cast device by host %s",
self.entity_id,
self._cast_info.friendly_name,
self._cast_info.host,
self._cast_info.port,
cast_info,
)
chromecast = await self.hass.async_add_job(
pychromecast._get_chromecast_from_host,
(
cast_info.host,
cast_info.port,
cast_info.uuid,
cast_info.model_name,
cast_info.friendly_name,
),
)
else:
_LOGGER.debug(
"[%s %s (%s:%s)] Connecting to cast device by service %s",
self.entity_id,
self._cast_info.friendly_name,
self._cast_info.host,
self._cast_info.port,
self.services,
)
chromecast = await self.hass.async_add_job(
pychromecast._get_chromecast_from_service,
(
self.services,
ChromeCastZeroconf.get_zeroconf(),
cast_info.uuid,
cast_info.model_name,
cast_info.friendly_name,
),
)
self._chromecast = chromecast
if CAST_MULTIZONE_MANAGER_KEY not in self.hass.data:
from pychromecast.controllers.multizone import MultizoneManager
self.hass.data[CAST_MULTIZONE_MANAGER_KEY] = MultizoneManager()
self.mz_mgr = self.hass.data[CAST_MULTIZONE_MANAGER_KEY]
self._status_listener = CastStatusListener(self, chromecast, self.mz_mgr)
self._available = False
self.cast_status = chromecast.status
self.media_status = chromecast.media_controller.status
self._chromecast.start()
self.async_schedule_update_ha_state()
async def async_del_cast_info(self, cast_info):
"""Remove the service."""
self.services.discard(cast_info.service)
_LOGGER.debug(
"[%s %s (%s:%s)] Remove service: %s (%s)",
self.entity_id,
self._cast_info.friendly_name,
self._cast_info.host,
self._cast_info.port,
cast_info.service,
self.services,
)
async def async_set_dynamic_group(self, cast_info):
"""Set the cast information and set up the chromecast object."""
import pychromecast
_LOGGER.debug(
"[%s %s (%s:%s)] Connecting to dynamic group by host %s",
self.entity_id,
self._cast_info.friendly_name,
self._cast_info.host,
self._cast_info.port,
cast_info,
)
await self.async_del_dynamic_group()
self._dynamic_group_cast_info = cast_info
# pylint: disable=protected-access
chromecast = await self.hass.async_add_executor_job(
pychromecast._get_chromecast_from_host,
(
cast_info.host,
cast_info.port,
cast_info.uuid,
cast_info.model_name,
cast_info.friendly_name,
),
)
self._dynamic_group_cast = chromecast
if CAST_MULTIZONE_MANAGER_KEY not in self.hass.data:
from pychromecast.controllers.multizone import MultizoneManager
self.hass.data[CAST_MULTIZONE_MANAGER_KEY] = MultizoneManager()
mz_mgr = self.hass.data[CAST_MULTIZONE_MANAGER_KEY]
self._dynamic_group_status_listener = DynamicGroupCastStatusListener(
self, chromecast, mz_mgr
)
self._dynamic_group_available = False
self.dynamic_group_media_status = chromecast.media_controller.status
self._dynamic_group_cast.start()
self.async_schedule_update_ha_state()
async def async_del_dynamic_group(self):
"""Remove the dynamic group."""
cast_info = self._dynamic_group_cast_info
_LOGGER.debug(
"[%s %s (%s:%s)] Remove dynamic group: %s",
self.entity_id,
self._cast_info.friendly_name,
self._cast_info.host,
self._cast_info.port,
cast_info.service if cast_info else None,
)
self._dynamic_group_available = False
self._dynamic_group_cast_info = None
if self._dynamic_group_cast is not None:
await self.hass.async_add_executor_job(self._dynamic_group_cast.disconnect)
self._dynamic_group_invalidate()
self.async_schedule_update_ha_state()
async def _async_disconnect(self):
"""Disconnect Chromecast object if it is set."""
if self._chromecast is None:
# Can't disconnect if not connected.
return
_LOGGER.debug(
"[%s %s (%s:%s)] Disconnecting from chromecast socket.",
self.entity_id,
self._cast_info.friendly_name,
self._cast_info.host,
self._cast_info.port,
)
self._available = False
self.async_schedule_update_ha_state()
await self.hass.async_add_executor_job(self._chromecast.disconnect)
if self._dynamic_group_cast is not None:
await self.hass.async_add_executor_job(self._dynamic_group_cast.disconnect)
self._invalidate()
self.async_schedule_update_ha_state()
def _invalidate(self):
"""Invalidate some attributes."""
self._chromecast = None
self.cast_status = None
self.media_status = None
self.media_status_received = None
self.mz_media_status = {}
self.mz_media_status_received = {}
self.mz_mgr = None
if self._status_listener is not None:
self._status_listener.invalidate()
self._status_listener = None
def _dynamic_group_invalidate(self):
"""Invalidate some attributes."""
self._dynamic_group_cast = None
self.dynamic_group_media_status = None
self.dynamic_group_media_status_received = None
if self._dynamic_group_status_listener is not None:
self._dynamic_group_status_listener.invalidate()
self._dynamic_group_status_listener = None
# ========== Callbacks ==========
def new_cast_status(self, cast_status):
"""Handle updates of the cast status."""
self.cast_status = cast_status
self.schedule_update_ha_state()
def new_media_status(self, media_status):
"""Handle updates of the media status."""
self.media_status = media_status
self.media_status_received = dt_util.utcnow()
self.schedule_update_ha_state()
def new_connection_status(self, connection_status):
"""Handle updates of connection status."""
from pychromecast.socket_client import (
CONNECTION_STATUS_CONNECTED,
CONNECTION_STATUS_DISCONNECTED,
)
_LOGGER.debug(
"[%s %s (%s:%s)] Received cast device connection status: %s",
self.entity_id,
self._cast_info.friendly_name,
self._cast_info.host,
self._cast_info.port,
connection_status.status,
)
if connection_status.status == CONNECTION_STATUS_DISCONNECTED:
self._available = False
self._invalidate()
self.schedule_update_ha_state()
return
new_available = connection_status.status == CONNECTION_STATUS_CONNECTED
if new_available != self._available:
# Connection status callbacks happen often when disconnected.
# Only update state when availability changed to put less pressure
# on state machine.
_LOGGER.debug(
"[%s %s (%s:%s)] Cast device availability changed: %s",
self.entity_id,
self._cast_info.friendly_name,
self._cast_info.host,
self._cast_info.port,
connection_status.status,
)
info = self._cast_info
if info.friendly_name is None and not info.is_audio_group:
# We couldn't find friendly_name when the cast was added, retry
self._cast_info = _fill_out_missing_chromecast_info(info)
self._available = new_available
self.schedule_update_ha_state()
def new_dynamic_group_media_status(self, media_status):
"""Handle updates of the media status."""
self.dynamic_group_media_status = media_status
self.dynamic_group_media_status_received = dt_util.utcnow()
self.schedule_update_ha_state()
def new_dynamic_group_connection_status(self, connection_status):
"""Handle updates of connection status."""
from pychromecast.socket_client import (
CONNECTION_STATUS_CONNECTED,
CONNECTION_STATUS_DISCONNECTED,
)
_LOGGER.debug(
"[%s %s (%s:%s)] Received dynamic group connection status: %s",
self.entity_id,
self._cast_info.friendly_name,
self._cast_info.host,
self._cast_info.port,
connection_status.status,
)
if connection_status.status == CONNECTION_STATUS_DISCONNECTED:
self._dynamic_group_available = False
self._dynamic_group_invalidate()
self.schedule_update_ha_state()
return
new_available = connection_status.status == CONNECTION_STATUS_CONNECTED
if new_available != self._dynamic_group_available:
# Connection status callbacks happen often when disconnected.
# Only update state when availability changed to put less pressure
# on state machine.
_LOGGER.debug(
"[%s %s (%s:%s)] Dynamic group availability changed: %s",
self.entity_id,
self._cast_info.friendly_name,
self._cast_info.host,
self._cast_info.port,
connection_status.status,
)
self._dynamic_group_available = new_available
self.schedule_update_ha_state()
def multizone_new_media_status(self, group_uuid, media_status):
"""Handle updates of audio group media status."""
_LOGGER.debug(
"[%s %s (%s:%s)] Multizone %s media status: %s",
self.entity_id,
self._cast_info.friendly_name,
self._cast_info.host,
self._cast_info.port,
group_uuid,
media_status,
)
self.mz_media_status[group_uuid] = media_status
self.mz_media_status_received[group_uuid] = dt_util.utcnow()
self.schedule_update_ha_state()
# ========== Service Calls ==========
def _media_controller(self):
"""
Return media status.
First try from our own cast, then dynamic groups and finally
groups which our cast is a member in.
"""
media_status = self.media_status
media_controller = self._chromecast.media_controller
if (
media_status is None or media_status.player_state == "UNKNOWN"
) and self._dynamic_group_cast is not None:
media_status = self.dynamic_group_media_status
media_controller = self._dynamic_group_cast.media_controller
if media_status is None or media_status.player_state == "UNKNOWN":
groups = self.mz_media_status
for k, val in groups.items():
if val and val.player_state != "UNKNOWN":
media_controller = self.mz_mgr.get_multizone_mediacontroller(k)
break
return media_controller
def turn_on(self):
"""Turn on the cast device."""
import pychromecast
if not self._chromecast.is_idle:
# Already turned on
return
if self._chromecast.app_id is not None:
# Quit the previous app before starting splash screen
self._chromecast.quit_app()
# The only way we can turn the Chromecast is on is by launching an app
self._chromecast.play_media(CAST_SPLASH, pychromecast.STREAM_TYPE_BUFFERED)
def turn_off(self):
"""Turn off the cast device."""
self._chromecast.quit_app()
def mute_volume(self, mute):
"""Mute the volume."""
self._chromecast.set_volume_muted(mute)
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._chromecast.set_volume(volume)
def media_play(self):
"""Send play command."""
media_controller = self._media_controller()
media_controller.play()
def media_pause(self):
"""Send pause command."""
media_controller = self._media_controller()
media_controller.pause()
def media_stop(self):
"""Send stop command."""
media_controller = self._media_controller()
media_controller.stop()
def media_previous_track(self):
"""Send previous track command."""
media_controller = self._media_controller()
media_controller.queue_prev()
def media_next_track(self):
"""Send next track command."""
media_controller = self._media_controller()
media_controller.queue_next()
def media_seek(self, position):
"""Seek the media to a specific location."""
media_controller = self._media_controller()
media_controller.seek(position)
def play_media(self, media_type, media_id, **kwargs):
"""Play media from a URL."""
# We do not want this to be forwarded to a group / dynamic group
self._chromecast.media_controller.play_media(media_id, media_type)
# ========== Properties ==========
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the device."""
return self._cast_info.friendly_name
@property
def device_info(self):
"""Return information about the device."""
cast_info = self._cast_info
if cast_info.model_name == "Google Cast Group":
return None
return {
"name": cast_info.friendly_name,
"identifiers": {(CAST_DOMAIN, cast_info.uuid.replace("-", ""))},
"model": cast_info.model_name,
"manufacturer": cast_info.manufacturer,
}
def _media_status(self):
"""
Return media status.
First try from our own cast, then dynamic groups and finally
groups which our cast is a member in.
"""
media_status = self.media_status
media_status_received = self.media_status_received
if (
media_status is None or media_status.player_state == "UNKNOWN"
) and self._dynamic_group_cast is not None:
media_status = self.dynamic_group_media_status
media_status_received = self.dynamic_group_media_status_received
if media_status is None or media_status.player_state == "UNKNOWN":
groups = self.mz_media_status
for k, val in groups.items():
if val and val.player_state != "UNKNOWN":
media_status = val
media_status_received = self.mz_media_status_received[k]
break
return (media_status, media_status_received)
@property
def state(self):
"""Return the state of the player."""
media_status, _ = self._media_status()
if media_status is None:
return None
if media_status.player_is_playing:
return STATE_PLAYING
if media_status.player_is_paused:
return STATE_PAUSED
if media_status.player_is_idle:
return STATE_IDLE
if self._chromecast is not None and self._chromecast.is_idle:
return STATE_OFF
return None
@property
def available(self):
"""Return True if the cast device is connected."""
return self._available
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self.cast_status.volume_level if self.cast_status else None
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self.cast_status.volume_muted if self.cast_status else None
@property
def media_content_id(self):
"""Content ID of current playing media."""
media_status, _ = self._media_status()
return media_status.content_id if media_status else None
@property
def media_content_type(self):
"""Content type of current playing media."""
media_status, _ = self._media_status()
if media_status is None:
return None
if media_status.media_is_tvshow:
return MEDIA_TYPE_TVSHOW
if media_status.media_is_movie:
return MEDIA_TYPE_MOVIE
if media_status.media_is_musictrack:
return MEDIA_TYPE_MUSIC
return None
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
media_status, _ = self._media_status()
return media_status.duration if media_status else None
@property
def media_image_url(self):
"""Image url of current playing media."""
media_status, _ = self._media_status()
if media_status is None:
return None
images = media_status.images
return images[0].url if images and images[0].url else None
@property
def media_image_remotely_accessible(self) -> bool:
"""If the image url is remotely accessible."""
return True
@property
def media_title(self):
"""Title of current playing media."""
media_status, _ = self._media_status()
return media_status.title if media_status else None
@property
def media_artist(self):
"""Artist of current playing media (Music track only)."""
media_status, _ = self._media_status()
return media_status.artist if media_status else None
@property
def media_album_name(self):
"""Album of current playing media (Music track only)."""
media_status, _ = self._media_status()
return media_status.album_name if media_status else None
@property
def media_album_artist(self):
"""Album artist of current playing media (Music track only)."""
media_status, _ = self._media_status()
return media_status.album_artist if media_status else None
@property
def media_track(self):
"""Track number of current playing media (Music track only)."""
media_status, _ = self._media_status()
return media_status.track if media_status else None
@property
def media_series_title(self):
"""Return the title of the series of current playing media."""
media_status, _ = self._media_status()
return media_status.series_title if media_status else None
@property
def media_season(self):
"""Season of current playing media (TV Show only)."""
media_status, _ = self._media_status()
return media_status.season if media_status else None
@property
def media_episode(self):
"""Episode of current playing media (TV Show only)."""
media_status, _ = self._media_status()
return media_status.episode if media_status else None
@property
def app_id(self):
"""Return the ID of the current running app."""
return self._chromecast.app_id if self._chromecast else None
@property
def app_name(self):
"""Name of the current running app."""
return self._chromecast.app_display_name if self._chromecast else None
@property
def supported_features(self):
"""Flag media player features that are supported."""
support = SUPPORT_CAST
media_status, _ = self._media_status()
if media_status:
if media_status.supports_queue_next:
support |= SUPPORT_PREVIOUS_TRACK
if media_status.supports_queue_next:
support |= SUPPORT_NEXT_TRACK
if media_status.supports_seek:
support |= SUPPORT_SEEK
return support
@property
def media_position(self):
"""Position of current playing media in seconds."""
media_status, _ = self._media_status()
if media_status is None or not (
media_status.player_is_playing
or media_status.player_is_paused
or media_status.player_is_idle
):
return None
return media_status.current_time
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid.
Returns value from homeassistant.util.dt.utcnow().
"""
_, media_status_recevied = self._media_status()
return media_status_recevied
@property
def unique_id(self) -> Optional[str]:
"""Return a unique ID."""
return self._cast_info.uuid
|
|
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
from django.db import transaction
from django.utils import timezone
from django.conf import settings
import django
import os
import logging
import sys
from datetime import datetime, timedelta
from compat import atomic
from compat import import_module
from background_task.exceptions import BackgroundTaskError
from background_task.models import Task
from background_task.models import CompletedTask
from background_task.signals import task_created, task_error, task_successful
import threading
logger = logging.getLogger(__name__)
BACKGROUND_TASK_RUN_ASYNC = getattr(settings, 'BACKGROUND_TASK_RUN_ASYNC', False)
def bg_runner(proxy_task, *args, **kwargs):
""" Executes the function attached to task. Used to enable threads. """
task = None
try:
func = getattr(proxy_task, 'task_function', None)
task_name = getattr(proxy_task, 'name', None)
task_qs = Task.objects.get_task(task_name=task_name, args=args, kwargs=kwargs)
if task_qs:
task = task_qs[0]
if func is None:
raise BackgroundTaskError("Function is None, can't execute!")
func(*args, **kwargs)
if task:
# task done, so can delete it
task.increment_attempts()
completed = task.create_completed_task()
task_successful.send(sender=task.__class__, task_id=task.id, completed_task=completed)
task.delete()
logging.info('Ran task and deleting %s', task)
except Exception as ex:
t, e, traceback = sys.exc_info()
if task:
logging.warn('Rescheduling %s', task, exc_info=(t, e, traceback))
task_error.send(sender=ex.__class__, task=task)
task.reschedule(t, e, traceback)
del traceback
class Tasks(object):
def __init__(self):
self._tasks = {}
self._runner = DBTaskRunner()
self._task_proxy_class = TaskProxy
self._bg_runner = bg_runner
def background(self, name=None, schedule=None):
'''
decorator to turn a regular function into
something that gets run asynchronously in
the background, at a later time
'''
# see if used as simple decorator
# where first arg is the function to be decorated
fn = None
if name and callable(name):
fn = name
name = None
def _decorator(fn):
_name = name
if not _name:
_name = '%s.%s' % (fn.__module__, fn.__name__)
proxy = self._task_proxy_class(_name, fn, schedule, self._runner)
self._tasks[_name] = proxy
return proxy
if fn:
return _decorator(fn)
return _decorator
def run_task(self, task_name, args, kwargs):
proxy_task = self._tasks[task_name]
if BACKGROUND_TASK_RUN_ASYNC:
curr_thread = threading.Thread(target=self._bg_runner, args=(proxy_task,) + tuple(args), kwargs=kwargs)
curr_thread.start()
else:
self._bg_runner(proxy_task, *args, **kwargs)
def run_next_task(self):
return self._runner.run_next_task(self)
class TaskSchedule(object):
SCHEDULE = 0
RESCHEDULE_EXISTING = 1
CHECK_EXISTING = 2
def __init__(self, run_at=None, priority=None, action=None):
self._run_at = run_at
self._priority = priority
self._action = action
@classmethod
def create(self, schedule):
if isinstance(schedule, TaskSchedule):
return schedule
priority = None
run_at = None
action = None
if schedule:
if isinstance(schedule, (int, timedelta, datetime)):
run_at = schedule
else:
run_at = schedule.get('run_at', None)
priority = schedule.get('priority', None)
action = schedule.get('action', None)
return TaskSchedule(run_at=run_at, priority=priority, action=action)
def merge(self, schedule):
params = {}
for name in ['run_at', 'priority', 'action']:
attr_name = '_%s' % name
value = getattr(self, attr_name, None)
if value is None:
params[name] = getattr(schedule, attr_name, None)
else:
params[name] = value
return TaskSchedule(**params)
@property
def run_at(self):
run_at = self._run_at or timezone.now()
if isinstance(run_at, int):
run_at = timezone.now() + timedelta(seconds=run_at)
if isinstance(run_at, timedelta):
run_at = timezone.now() + run_at
return run_at
@property
def priority(self):
return self._priority or 0
@property
def action(self):
return self._action or TaskSchedule.SCHEDULE
def __repr__(self):
return 'TaskSchedule(run_at=%s, priority=%s)' % (self._run_at,
self._priority)
def __eq__(self, other):
return self._run_at == other._run_at \
and self._priority == other._priority \
and self._action == other._action
class DBTaskRunner(object):
'''
Encapsulate the model related logic in here, in case
we want to support different queues in the future
'''
def __init__(self):
self.worker_name = str(os.getpid())
def schedule(self, task_name, args, kwargs, run_at=None,
priority=0, action=TaskSchedule.SCHEDULE):
'''Simply create a task object in the database'''
task = Task.objects.new_task(task_name, args, kwargs,
run_at, priority)
if action != TaskSchedule.SCHEDULE:
task_hash = task.task_hash
now = timezone.now()
unlocked = Task.objects.unlocked(now)
existing = unlocked.filter(task_hash=task_hash)
if action == TaskSchedule.RESCHEDULE_EXISTING:
updated = existing.update(run_at=run_at, priority=priority)
if updated:
return
elif action == TaskSchedule.CHECK_EXISTING:
if existing.count():
return
task.save()
task_created.send(sender=self.__class__, task=task)
return task
@atomic
def get_task_to_run(self, tasks):
available_tasks = [task for task in Task.objects.find_available()
if task.task_name in tasks._tasks][:5]
for task in available_tasks:
# try to lock task
locked_task = task.lock(self.worker_name)
if locked_task:
return locked_task
return None
@atomic
def run_task(self, tasks, task):
logging.info('Running %s', task)
args, kwargs = task.params()
tasks.run_task(task.task_name, args, kwargs)
@atomic
def run_next_task(self, tasks):
# we need to commit to make sure
# we can see new tasks as they arrive
task = self.get_task_to_run(tasks)
#transaction.commit()
if task:
self.run_task(tasks, task)
#transaction.commit()
return True
else:
return False
@python_2_unicode_compatible
class TaskProxy(object):
def __init__(self, name, task_function, schedule, runner):
self.name = name
self.task_function = task_function
self.runner = runner
self.schedule = TaskSchedule.create(schedule)
def __call__(self, *args, **kwargs):
schedule = kwargs.pop('schedule', None)
schedule = TaskSchedule.create(schedule).merge(self.schedule)
run_at = schedule.run_at
priority = schedule.priority
action = schedule.action
return self.runner.schedule(self.name, args, kwargs, run_at, priority, action)
def __str__(self):
return 'TaskProxy(%s)' % self.name
tasks = Tasks()
def autodiscover():
'''autodiscover tasks.py files in much the same way as admin app'''
import imp
from django.conf import settings
for app in settings.INSTALLED_APPS:
try:
app_path = import_module(app).__path__
except AttributeError:
continue
try:
imp.find_module('tasks', app_path)
except ImportError:
continue
import_module("%s.tasks" % app)
|
|
"""Plotting functions for visualizing distributions."""
from __future__ import division
import numpy as np
from scipy import stats
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import warnings
from six import string_types
try:
import statsmodels.nonparametric.api as smnp
_has_statsmodels = True
except ImportError:
_has_statsmodels = False
from .utils import set_hls_values, iqr, _kde_support
from .palettes import color_palette, blend_palette
from .axisgrid import JointGrid
def _freedman_diaconis_bins(a):
"""Calculate number of hist bins using Freedman-Diaconis rule."""
# From http://stats.stackexchange.com/questions/798/
a = np.asarray(a)
h = 2 * iqr(a) / (len(a) ** (1 / 3))
# fall back to sqrt(a) bins if iqr is 0
if h == 0:
return np.sqrt(a.size)
else:
return np.ceil((a.max() - a.min()) / h)
def distplot(a, bins=None, hist=True, kde=True, rug=False, fit=None,
hist_kws=None, kde_kws=None, rug_kws=None, fit_kws=None,
color=None, vertical=False, norm_hist=False, axlabel=None,
label=None, ax=None):
"""Flexibly plot a univariate distribution of observations.
This function combines the matplotlib ``hist`` function (with automatic
calculation of a good default bin size) with the seaborn :func:`kdeplot`
and :func:`rugplot` functions. It can also fit ``scipy.stats``
distributions and plot the estimated PDF over the data.
Parameters
----------
a : Series, 1d-array, or list.
Observed data. If this is a Series object with a ``name`` attribute,
the name will be used to label the data axis.
bins : argument for matplotlib hist(), or None, optional
Specification of hist bins, or None to use Freedman-Diaconis rule.
hist : bool, optional
Whether to plot a (normed) histogram.
kde : bool, optional
Whether to plot a gaussian kernel density estimate.
rug : bool, optional
Whether to draw a rugplot on the support axis.
fit : random variable object, optional
An object with `fit` method, returning a tuple that can be passed to a
`pdf` method a positional arguments following an grid of values to
evaluate the pdf on.
{hist, kde, rug, fit}_kws : dictionaries, optional
Keyword arguments for underlying plotting functions.
color : matplotlib color, optional
Color to plot everything but the fitted curve in.
vertical : bool, optional
If True, oberved values are on y-axis.
norm_hist : bool, otional
If True, the histogram height shows a density rather than a count.
This is implied if a KDE or fitted density is plotted.
axlabel : string, False, or None, optional
Name for the support axis label. If None, will try to get it
from a.namel if False, do not set a label.
label : string, optional
Legend label for the relevent component of the plot
ax : matplotlib axis, optional
if provided, plot on this axis
Returns
-------
ax : matplotlib Axes
Returns the Axes object with the plot for further tweaking.
See Also
--------
kdeplot : Show a univariate or bivariate distribution with a kernel
density estimate.
rugplot : Draw small vertical lines to show each observation in a
distribution.
Examples
--------
Show a default plot with a kernel density estimate and histogram with bin
size determined automatically with a reference rule:
.. plot::
:context: close-figs
>>> import seaborn as sns, numpy as np
>>> sns.set(rc={"figure.figsize": (8, 4)}); np.random.seed(0)
>>> x = np.random.randn(100)
>>> ax = sns.distplot(x)
Use Pandas objects to get an informative axis label:
.. plot::
:context: close-figs
>>> import pandas as pd
>>> x = pd.Series(x, name="x variable")
>>> ax = sns.distplot(x)
Plot the distribution with a kenel density estimate and rug plot:
.. plot::
:context: close-figs
>>> ax = sns.distplot(x, rug=True, hist=False)
Plot the distribution with a histogram and maximum likelihood gaussian
distribution fit:
.. plot::
:context: close-figs
>>> from scipy.stats import norm
>>> ax = sns.distplot(x, fit=norm, kde=False)
Plot the distribution on the vertical axis:
.. plot::
:context: close-figs
>>> ax = sns.distplot(x, vertical=True)
Change the color of all the plot elements:
.. plot::
:context: close-figs
>>> sns.set_color_codes()
>>> ax = sns.distplot(x, color="y")
Pass specific parameters to the underlying plot functions:
.. plot::
:context: close-figs
>>> ax = sns.distplot(x, rug=True, rug_kws={"color": "g"},
... kde_kws={"color": "k", "lw": 3, "label": "KDE"},
... hist_kws={"histtype": "step", "linewidth": 3,
... "alpha": 1, "color": "g"})
"""
if ax is None:
ax = plt.gca()
# Intelligently label the support axis
label_ax = bool(axlabel)
if axlabel is None and hasattr(a, "name"):
axlabel = a.name
if axlabel is not None:
label_ax = True
# Make a a 1-d array
a = np.asarray(a).squeeze()
# Decide if the hist is normed
norm_hist = norm_hist or kde or (fit is not None)
# Handle dictionary defaults
if hist_kws is None:
hist_kws = dict()
if kde_kws is None:
kde_kws = dict()
if rug_kws is None:
rug_kws = dict()
if fit_kws is None:
fit_kws = dict()
# Get the color from the current color cycle
if color is None:
if vertical:
line, = ax.plot(0, a.mean())
else:
line, = ax.plot(a.mean(), 0)
color = line.get_color()
line.remove()
# Plug the label into the right kwarg dictionary
if label is not None:
if hist:
hist_kws["label"] = label
elif kde:
kde_kws["label"] = label
elif rug:
rug_kws["label"] = label
elif fit:
fit_kws["label"] = label
if hist:
if bins is None:
bins = min(_freedman_diaconis_bins(a), 50)
hist_kws.setdefault("alpha", 0.4)
hist_kws.setdefault("normed", norm_hist)
orientation = "horizontal" if vertical else "vertical"
hist_color = hist_kws.pop("color", color)
ax.hist(a, bins, orientation=orientation,
color=hist_color, **hist_kws)
if hist_color != color:
hist_kws["color"] = hist_color
if kde:
kde_color = kde_kws.pop("color", color)
kdeplot(a, vertical=vertical, ax=ax, color=kde_color, **kde_kws)
if kde_color != color:
kde_kws["color"] = kde_color
if rug:
rug_color = rug_kws.pop("color", color)
axis = "y" if vertical else "x"
rugplot(a, axis=axis, ax=ax, color=rug_color, **rug_kws)
if rug_color != color:
rug_kws["color"] = rug_color
if fit is not None:
fit_color = fit_kws.pop("color", "#282828")
gridsize = fit_kws.pop("gridsize", 200)
cut = fit_kws.pop("cut", 3)
clip = fit_kws.pop("clip", (-np.inf, np.inf))
bw = stats.gaussian_kde(a).scotts_factor() * a.std(ddof=1)
x = _kde_support(a, bw, gridsize, cut, clip)
params = fit.fit(a)
pdf = lambda x: fit.pdf(x, *params)
y = pdf(x)
if vertical:
x, y = y, x
ax.plot(x, y, color=fit_color, **fit_kws)
if fit_color != "#282828":
fit_kws["color"] = fit_color
if label_ax:
if vertical:
ax.set_ylabel(axlabel)
else:
ax.set_xlabel(axlabel)
return ax
def _univariate_kdeplot(data, shade, vertical, kernel, bw, gridsize, cut,
clip, legend, ax, cumulative=False, **kwargs):
"""Plot a univariate kernel density estimate on one of the axes."""
# Sort out the clipping
if clip is None:
clip = (-np.inf, np.inf)
# Calculate the KDE
if _has_statsmodels:
# Prefer using statsmodels for kernel flexibility
x, y = _statsmodels_univariate_kde(data, kernel, bw,
gridsize, cut, clip,
cumulative=cumulative)
else:
# Fall back to scipy if missing statsmodels
if kernel != "gau":
kernel = "gau"
msg = "Kernel other than `gau` requires statsmodels."
warnings.warn(msg, UserWarning)
if cumulative:
raise ImportError("Cumulative distributions are currently"
"only implemented in statsmodels."
"Please install statsmodels.")
x, y = _scipy_univariate_kde(data, bw, gridsize, cut, clip)
# Make sure the density is nonnegative
y = np.amax(np.c_[np.zeros_like(y), y], axis=1)
# Flip the data if the plot should be on the y axis
if vertical:
x, y = y, x
# Check if a label was specified in the call
label = kwargs.pop("label", None)
# Otherwise check if the data object has a name
if label is None and hasattr(data, "name"):
label = data.name
# Decide if we're going to add a legend
legend = label is not None and legend
label = "_nolegend_" if label is None else label
# Use the active color cycle to find the plot color
line, = ax.plot(x, y, **kwargs)
color = line.get_color()
line.remove()
kwargs.pop("color", None)
# Draw the KDE plot and, optionally, shade
ax.plot(x, y, color=color, label=label, **kwargs)
alpha = kwargs.get("alpha", 0.25)
if shade:
if vertical:
ax.fill_betweenx(y, 1e-12, x, color=color, alpha=alpha)
else:
ax.fill_between(x, 1e-12, y, color=color, alpha=alpha)
# Draw the legend here
if legend:
ax.legend(loc="best")
return ax
def _statsmodels_univariate_kde(data, kernel, bw, gridsize, cut, clip,
cumulative=False):
"""Compute a univariate kernel density estimate using statsmodels."""
fft = kernel == "gau"
kde = smnp.KDEUnivariate(data)
kde.fit(kernel, bw, fft, gridsize=gridsize, cut=cut, clip=clip)
if cumulative:
grid, y = kde.support, kde.cdf
else:
grid, y = kde.support, kde.density
return grid, y
def _scipy_univariate_kde(data, bw, gridsize, cut, clip):
"""Compute a univariate kernel density estimate using scipy."""
try:
kde = stats.gaussian_kde(data, bw_method=bw)
except TypeError:
kde = stats.gaussian_kde(data)
if bw != "scott": # scipy default
msg = ("Ignoring bandwidth choice, "
"please upgrade scipy to use a different bandwidth.")
warnings.warn(msg, UserWarning)
if isinstance(bw, string_types):
bw = "scotts" if bw == "scott" else bw
bw = getattr(kde, "%s_factor" % bw)()
grid = _kde_support(data, bw, gridsize, cut, clip)
y = kde(grid)
return grid, y
def _bivariate_kdeplot(x, y, filled, fill_lowest,
kernel, bw, gridsize, cut, clip,
axlabel, ax, **kwargs):
"""Plot a joint KDE estimate as a bivariate contour plot."""
# Determine the clipping
if clip is None:
clip = [(-np.inf, np.inf), (-np.inf, np.inf)]
elif np.ndim(clip) == 1:
clip = [clip, clip]
# Calculate the KDE
if _has_statsmodels:
xx, yy, z = _statsmodels_bivariate_kde(x, y, bw, gridsize, cut, clip)
else:
xx, yy, z = _scipy_bivariate_kde(x, y, bw, gridsize, cut, clip)
# Plot the contours
n_levels = kwargs.pop("n_levels", 10)
cmap = kwargs.get("cmap", "BuGn" if filled else "BuGn_d")
if isinstance(cmap, string_types):
if cmap.endswith("_d"):
pal = ["#333333"]
pal.extend(color_palette(cmap.replace("_d", "_r"), 2))
cmap = blend_palette(pal, as_cmap=True)
else:
cmap = mpl.cm.get_cmap(cmap)
kwargs["cmap"] = cmap
contour_func = ax.contourf if filled else ax.contour
cset = contour_func(xx, yy, z, n_levels, **kwargs)
if filled and not fill_lowest:
cset.collections[0].set_alpha(0)
kwargs["n_levels"] = n_levels
# Label the axes
if hasattr(x, "name") and axlabel:
ax.set_xlabel(x.name)
if hasattr(y, "name") and axlabel:
ax.set_ylabel(y.name)
return ax
def _statsmodels_bivariate_kde(x, y, bw, gridsize, cut, clip):
"""Compute a bivariate kde using statsmodels."""
if isinstance(bw, string_types):
bw_func = getattr(smnp.bandwidths, "bw_" + bw)
x_bw = bw_func(x)
y_bw = bw_func(y)
bw = [x_bw, y_bw]
elif np.isscalar(bw):
bw = [bw, bw]
if isinstance(x, pd.Series):
x = x.values
if isinstance(y, pd.Series):
y = y.values
kde = smnp.KDEMultivariate([x, y], "cc", bw)
x_support = _kde_support(x, kde.bw[0], gridsize, cut, clip[0])
y_support = _kde_support(y, kde.bw[1], gridsize, cut, clip[1])
xx, yy = np.meshgrid(x_support, y_support)
z = kde.pdf([xx.ravel(), yy.ravel()]).reshape(xx.shape)
return xx, yy, z
def _scipy_bivariate_kde(x, y, bw, gridsize, cut, clip):
"""Compute a bivariate kde using scipy."""
data = np.c_[x, y]
kde = stats.gaussian_kde(data.T)
data_std = data.std(axis=0, ddof=1)
if isinstance(bw, string_types):
bw = "scotts" if bw == "scott" else bw
bw_x = getattr(kde, "%s_factor" % bw)() * data_std[0]
bw_y = getattr(kde, "%s_factor" % bw)() * data_std[1]
elif np.isscalar(bw):
bw_x, bw_y = bw, bw
else:
msg = ("Cannot specify a different bandwidth for each dimension "
"with the scipy backend. You should install statsmodels.")
raise ValueError(msg)
x_support = _kde_support(data[:, 0], bw_x, gridsize, cut, clip[0])
y_support = _kde_support(data[:, 1], bw_y, gridsize, cut, clip[1])
xx, yy = np.meshgrid(x_support, y_support)
z = kde([xx.ravel(), yy.ravel()]).reshape(xx.shape)
return xx, yy, z
def kdeplot(data, data2=None, shade=False, vertical=False, kernel="gau",
bw="scott", gridsize=100, cut=3, clip=None, legend=True,
cumulative=False, shade_lowest=True, ax=None, **kwargs):
"""Fit and plot a univariate or bivariate kernel density estimate.
Parameters
----------
data : 1d array-like
Input data.
data2: 1d array-like
Second input data. If present, a bivariate KDE will be estimated.
shade : bool, optional
If True, shade in the area under the KDE curve (or draw with filled
contours when data is bivariate).
vertical : bool
If True, density is on x-axis.
kernel : {'gau' | 'cos' | 'biw' | 'epa' | 'tri' | 'triw' }, optional
Code for shape of kernel to fit with. Bivariate KDE can only use
gaussian kernel.
bw : {'scott' | 'silverman' | scalar | pair of scalars }, optional
Name of reference method to determine kernel size, scalar factor,
or scalar for each dimension of the bivariate plot.
gridsize : int, optional
Number of discrete points in the evaluation grid.
cut : scalar, optional
Draw the estimate to cut * bw from the extreme data points.
clip : pair of scalars, or pair of pair of scalars, optional
Lower and upper bounds for datapoints used to fit KDE. Can provide
a pair of (low, high) bounds for bivariate plots.
legend : bool, optinal
If True, add a legend or label the axes when possible.
cumulative : bool
If True, draw the cumulative distribution estimated by the kde.
shade_lowest : bool
If True, shade the lowest contour of a bivariate KDE plot. Not
relevant when drawing a univariate plot or when ``shade=False``.
Setting this to ``False`` can be useful when you want multiple
densities on the same Axes.
ax : matplotlib axis, optional
Axis to plot on, otherwise uses current axis.
kwargs : key, value pairings
Other keyword arguments are passed to ``plt.plot()`` or
``plt.contour{f}`` depending on whether a univariate or bivariate
plot is being drawn.
Returns
-------
ax : matplotlib Axes
Axes with plot.
See Also
--------
distplot: Flexibly plot a univariate distribution of observations.
jointplot: Plot a joint dataset with bivariate and marginal distributions.
Examples
--------
Plot a basic univariate density:
.. plot::
:context: close-figs
>>> import numpy as np; np.random.seed(10)
>>> import seaborn as sns; sns.set(color_codes=True)
>>> mean, cov = [0, 2], [(1, .5), (.5, 1)]
>>> x, y = np.random.multivariate_normal(mean, cov, size=50).T
>>> ax = sns.kdeplot(x)
Shade under the density curve and use a different color:
.. plot::
:context: close-figs
>>> ax = sns.kdeplot(x, shade=True, color="r")
Plot a bivariate density:
.. plot::
:context: close-figs
>>> ax = sns.kdeplot(x, y)
Use filled contours:
.. plot::
:context: close-figs
>>> ax = sns.kdeplot(x, y, shade=True)
Use more contour levels and a different color palette:
.. plot::
:context: close-figs
>>> ax = sns.kdeplot(x, y, n_levels=30, cmap="Purples_d")
Use a narrower bandwith:
.. plot::
:context: close-figs
>>> ax = sns.kdeplot(x, bw=.15)
Plot the density on the vertical axis:
.. plot::
:context: close-figs
>>> ax = sns.kdeplot(y, vertical=True)
Limit the density curve within the range of the data:
.. plot::
:context: close-figs
>>> ax = sns.kdeplot(x, cut=0)
Plot two shaded bivariate densities:
.. plot::
:context: close-figs
>>> iris = sns.load_dataset("iris")
>>> setosa = iris.loc[iris.species == "setosa"]
>>> virginica = iris.loc[iris.species == "virginica"]
>>> ax = sns.kdeplot(setosa.sepal_width, setosa.sepal_length,
... cmap="Reds", shade=True, shade_lowest=False)
>>> ax = sns.kdeplot(virginica.sepal_width, virginica.sepal_length,
... cmap="Blues", shade=True, shade_lowest=False)
"""
if ax is None:
ax = plt.gca()
data = data.astype(np.float64)
if data2 is not None:
data2 = data2.astype(np.float64)
bivariate = False
if isinstance(data, np.ndarray) and np.ndim(data) > 1:
bivariate = True
x, y = data.T
elif isinstance(data, pd.DataFrame) and np.ndim(data) > 1:
bivariate = True
x = data.iloc[:, 0].values
y = data.iloc[:, 1].values
elif data2 is not None:
bivariate = True
x = data
y = data2
if bivariate and cumulative:
raise TypeError("Cumulative distribution plots are not"
"supported for bivariate distributions.")
if bivariate:
ax = _bivariate_kdeplot(x, y, shade, shade_lowest,
kernel, bw, gridsize, cut, clip, legend,
ax, **kwargs)
else:
ax = _univariate_kdeplot(data, shade, vertical, kernel, bw,
gridsize, cut, clip, legend, ax,
cumulative=cumulative, **kwargs)
return ax
def rugplot(a, height=.05, axis="x", ax=None, **kwargs):
"""Plot datapoints in an array as sticks on an axis.
Parameters
----------
a : vector
1D array of observations.
height : scalar, optional
Height of ticks as proportion of the axis.
axis : {'x' | 'y'}, optional
Axis to draw rugplot on.
ax : matplotlib axes
Axes to draw plot into; otherwise grabs current axes.
kwargs : key, value mappings
Other keyword arguments are passed to ``axvline`` or ``axhline``.
Returns
-------
ax : matplotlib axes
The Axes object with the plot on it.
"""
if ax is None:
ax = plt.gca()
a = np.asarray(a)
vertical = kwargs.pop("vertical", axis == "y")
func = ax.axhline if vertical else ax.axvline
kwargs.setdefault("linewidth", 1)
for pt in a:
func(pt, 0, height, **kwargs)
return ax
def jointplot(x, y, data=None, kind="scatter", stat_func=stats.pearsonr,
color=None, size=6, ratio=5, space=.2,
dropna=True, xlim=None, ylim=None,
joint_kws=None, marginal_kws=None, annot_kws=None, **kwargs):
"""Draw a plot of two variables with bivariate and univariate graphs.
This function provides a convenient interface to the :class:`JointGrid`
class, with several canned plot kinds. This is intended to be a fairly
lightweight wrapper; if you need more flexibility, you should use
:class:`JointGrid` directly.
Parameters
----------
x, y : strings or vectors
Data or names of variables in ``data``.
data : DataFrame, optional
DataFrame when ``x`` and ``y`` are variable names.
kind : { "scatter" | "reg" | "resid" | "kde" | "hex" }, optional
Kind of plot to draw.
stat_func : callable or None
Function used to calculate a statistic about the relationship and
annotate the plot. Should map `x` and `y` either to a single value
or to a (value, p) tuple. Set to ``None`` if you don't want to
annotate the plot.
color : matplotlib color, optional
Color used for the plot elements.
size : numeric, optional
Size of the figure (it will be square).
ratio : numeric, optional
Ratio of joint axes size to marginal axes height.
space : numeric, optional
Space between the joint and marginal axes
dropna : bool, optional
If True, remove observations that are missing from ``x`` and ``y``.
{x, y}lim : two-tuples, optional
Axis limits to set before plotting.
{joint, marginal, annot}_kws : dicts
Additional keyword arguments for the plot components.
kwargs : key, value pairs
Additional keyword arguments are passed to the function used to
draw the plot on the joint Axes, superseding items in the
``joint_kws`` dictionary.
Returns
-------
grid : :class:`JointGrid`
:class:`JointGrid` object with the plot on it.
See Also
--------
JointGrid : The Grid class used for drawing this plot. Use it directly if
you need more flexibility.
Examples
--------
Draw a scatterplot with marginal histograms:
.. plot::
:context: close-figs
>>> import numpy as np, pandas as pd; np.random.seed(0)
>>> import seaborn as sns; sns.set(style="white", color_codes=True)
>>> tips = sns.load_dataset("tips")
>>> g = sns.jointplot(x="total_bill", y="tip", data=tips)
Add regression and kernel density fits:
.. plot::
:context: close-figs
>>> g = sns.jointplot("total_bill", "tip", data=tips, kind="reg")
Replace the scatterplot with a joint histogram using hexagonal bins:
.. plot::
:context: close-figs
>>> g = sns.jointplot("total_bill", "tip", data=tips, kind="hex")
Replace the scatterplots and histograms with density estimates and align
the marginal Axes tightly with the joint Axes:
.. plot::
:context: close-figs
>>> iris = sns.load_dataset("iris")
>>> g = sns.jointplot("sepal_width", "petal_length", data=iris,
... kind="kde", space=0, color="g")
Use a different statistic for the annotation:
.. plot::
:context: close-figs
>>> from scipy.stats import spearmanr
>>> g = sns.jointplot("size", "total_bill", data=tips,
... stat_func=spearmanr, color="m")
Draw a scatterplot, then add a joint density estimate:
.. plot::
:context: close-figs
>>> g = (sns.jointplot("sepal_length", "sepal_width",
... data=iris, color="k")
... .plot_joint(sns.kdeplot, zorder=0, n_levels=6))
Pass vectors in directly without using Pandas, then name the axes:
.. plot::
:context: close-figs
>>> x, y = np.random.randn(2, 300)
>>> g = (sns.jointplot(x, y, kind="hex", stat_func=None)
... .set_axis_labels("x", "y"))
Draw a smaller figure with more space devoted to the marginal plots:
.. plot::
:context: close-figs
>>> g = sns.jointplot("total_bill", "tip", data=tips,
... size=5, ratio=3, color="g")
Pass keyword arguments down to the underlying plots:
.. plot::
:context: close-figs
>>> g = sns.jointplot("petal_length", "sepal_length", data=iris,
... marginal_kws=dict(bins=15, rug=True),
... annot_kws=dict(stat="r"),
... s=40, edgecolor="w", linewidth=1)
"""
# Set up empty default kwarg dicts
if joint_kws is None:
joint_kws = {}
joint_kws.update(kwargs)
if marginal_kws is None:
marginal_kws = {}
if annot_kws is None:
annot_kws = {}
# Make a colormap based off the plot color
if color is None:
color = color_palette()[0]
color_rgb = mpl.colors.colorConverter.to_rgb(color)
colors = [set_hls_values(color_rgb, l=l) for l in np.linspace(1, 0, 12)]
cmap = blend_palette(colors, as_cmap=True)
# Initialize the JointGrid object
grid = JointGrid(x, y, data, dropna=dropna,
size=size, ratio=ratio, space=space,
xlim=xlim, ylim=ylim)
# Plot the data using the grid
if kind == "scatter":
joint_kws.setdefault("color", color)
grid.plot_joint(plt.scatter, **joint_kws)
marginal_kws.setdefault("kde", False)
marginal_kws.setdefault("color", color)
grid.plot_marginals(distplot, **marginal_kws)
elif kind.startswith("hex"):
x_bins = _freedman_diaconis_bins(grid.x)
y_bins = _freedman_diaconis_bins(grid.y)
gridsize = int(np.mean([x_bins, y_bins]))
joint_kws.setdefault("gridsize", gridsize)
joint_kws.setdefault("cmap", cmap)
grid.plot_joint(plt.hexbin, **joint_kws)
marginal_kws.setdefault("kde", False)
marginal_kws.setdefault("color", color)
grid.plot_marginals(distplot, **marginal_kws)
elif kind.startswith("kde"):
joint_kws.setdefault("shade", True)
joint_kws.setdefault("cmap", cmap)
grid.plot_joint(kdeplot, **joint_kws)
marginal_kws.setdefault("shade", True)
marginal_kws.setdefault("color", color)
grid.plot_marginals(kdeplot, **marginal_kws)
elif kind.startswith("reg"):
from .linearmodels import regplot
marginal_kws.setdefault("color", color)
grid.plot_marginals(distplot, **marginal_kws)
joint_kws.setdefault("color", color)
grid.plot_joint(regplot, **joint_kws)
elif kind.startswith("resid"):
from .linearmodels import residplot
joint_kws.setdefault("color", color)
grid.plot_joint(residplot, **joint_kws)
x, y = grid.ax_joint.collections[0].get_offsets().T
marginal_kws.setdefault("color", color)
marginal_kws.setdefault("kde", False)
distplot(x, ax=grid.ax_marg_x, **marginal_kws)
distplot(y, vertical=True, fit=stats.norm, ax=grid.ax_marg_y,
**marginal_kws)
stat_func = None
else:
msg = "kind must be either 'scatter', 'reg', 'resid', 'kde', or 'hex'"
raise ValueError(msg)
if stat_func is not None:
grid.annotate(stat_func, **annot_kws)
return grid
|
|
# Copyright (C) 2007, One Laptop Per Child
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import gobject
import gtk
import hippo
import sugar
from sugar.graphics import style
from sugar.graphics.palette import Palette, ToolInvoker
from sugar.graphics.toolbutton import ToolButton
from sugar.graphics.icon import Icon
from constants import Constants
_PREVIOUS_PAGE = 0
_NEXT_PAGE = 1
class _TrayViewport(gtk.Viewport):
__gproperties__ = {
'can-scroll' : (bool, None, None, False,
gobject.PARAM_READABLE),
}
def __init__(self, orientation):
self.orientation = orientation
self._can_scroll = False
gobject.GObject.__init__(self)
self.set_shadow_type(gtk.SHADOW_NONE)
self.traybar = gtk.Toolbar()
self.traybar.set_orientation(orientation)
self.traybar.set_show_arrow(False)
self.add(self.traybar)
self.traybar.show()
self.connect('size_allocate', self._size_allocate_cb)
def scroll(self, direction):
if direction == _PREVIOUS_PAGE:
self._scroll_previous()
elif direction == _NEXT_PAGE:
self._scroll_next()
def _scroll_next(self):
if self.orientation == gtk.ORIENTATION_HORIZONTAL:
adj = self.get_hadjustment()
new_value = adj.value + self.allocation.width
adj.value = min(new_value, adj.upper - self.allocation.width)
else:
adj = self.get_vadjustment()
new_value = adj.value + self.allocation.height
adj.value = min(new_value, adj.upper - self.allocation.height)
def _scroll_to_end(self):
if self.orientation == gtk.ORIENTATION_HORIZONTAL:
adj = self.get_hadjustment()
adj.value = adj.upper# - self.allocation.width
else:
adj = self.get_vadjustment()
adj.value = adj.upper - self.allocation.height
def _scroll_previous(self):
if self.orientation == gtk.ORIENTATION_HORIZONTAL:
adj = self.get_hadjustment()
new_value = adj.value - self.allocation.width
adj.value = max(adj.lower, new_value)
else:
adj = self.get_vadjustment()
new_value = adj.value - self.allocation.height
adj.value = max(adj.lower, new_value)
def do_size_request(self, requisition):
child_requisition = self.child.size_request()
if self.orientation == gtk.ORIENTATION_HORIZONTAL:
requisition[0] = 0
requisition[1] = child_requisition[1]
else:
requisition[0] = child_requisition[0]
requisition[1] = 0
def do_get_property(self, pspec):
if pspec.name == 'can-scroll':
return self._can_scroll
def _size_allocate_cb(self, viewport, allocation):
bar_requisition = self.traybar.get_child_requisition()
if self.orientation == gtk.ORIENTATION_HORIZONTAL:
can_scroll = bar_requisition[0] > allocation.width
else:
can_scroll = bar_requisition[1] > allocation.height
if can_scroll != self._can_scroll:
self._can_scroll = can_scroll
self.notify('can-scroll')
class _TrayScrollButton(gtk.Button):
def __init__(self, icon_name, scroll_direction):
gobject.GObject.__init__(self)
self._viewport = None
self._scroll_direction = scroll_direction
self.set_relief(gtk.RELIEF_NONE)
self.set_size_request(style.GRID_CELL_SIZE, style.GRID_CELL_SIZE)
icon = Icon(icon_name = icon_name,
icon_size=gtk.ICON_SIZE_SMALL_TOOLBAR)
self.set_image(icon)
icon.show()
self.connect('clicked', self._clicked_cb)
def set_viewport(self, viewport):
self._viewport = viewport
self._viewport.connect('notify::can-scroll',
self._viewport_can_scroll_changed_cb)
def _viewport_can_scroll_changed_cb(self, viewport, pspec):
#self.props.visible = self._viewport.props.can_scroll
self.set_sensitive(self._viewport.props.can_scroll)
def _clicked_cb(self, button):
self._viewport.scroll(self._scroll_direction)
viewport = property(fset=set_viewport)
class HTray(gtk.VBox):
def __init__(self, **kwargs):
gobject.GObject.__init__(self, **kwargs)
separator = hippo.Canvas()
box = hippo.CanvasBox(
border_color=Constants.colorWhite.get_int(),
background_color=Constants.colorWhite.get_int(),
box_height=1,
border_bottom=1)
separator.set_root(box)
self.pack_start(separator, False)
hbox = gtk.HBox()
self.pack_start(hbox)
scroll_left = _TrayScrollButton('go-left', _PREVIOUS_PAGE)
scroll_left_event = gtk.EventBox()
scroll_left_event.add(scroll_left)
scroll_left_event.set_size_request(55, -1)
hbox.pack_start(scroll_left_event, False)
self._viewport = _TrayViewport(gtk.ORIENTATION_HORIZONTAL)
hbox.pack_start(self._viewport)
self._viewport.show()
scroll_right = _TrayScrollButton('go-right', _NEXT_PAGE)
scroll_right_event = gtk.EventBox()
scroll_right_event.add(scroll_right)
scroll_right_event.set_size_request(55, -1)
hbox.pack_start(scroll_right_event, False)
scroll_left.set_focus_on_click(False)
scroll_left_event.modify_bg(gtk.STATE_NORMAL, sugar.graphics.style.COLOR_TOOLBAR_GREY.get_gdk_color())
scroll_left.modify_bg(gtk.STATE_ACTIVE, sugar.graphics.style.COLOR_BUTTON_GREY.get_gdk_color())
scroll_right.set_focus_on_click(False)
scroll_right_event.modify_bg(gtk.STATE_NORMAL, sugar.graphics.style.COLOR_TOOLBAR_GREY.get_gdk_color())
scroll_right.modify_bg(gtk.STATE_ACTIVE, sugar.graphics.style.COLOR_BUTTON_GREY.get_gdk_color())
scroll_left.viewport = self._viewport
scroll_right.viewport = self._viewport
self.connect_after("size-allocate", self._sizeAllocateCb)
def _sizeAllocateCb(self, widget, event ):
self._viewport.notify('can-scroll')
def get_children(self):
return self._viewport.traybar.get_children()
def add_item(self, item, index=-1):
self._viewport.traybar.insert(item, index)
def remove_item(self, item):
self._viewport.traybar.remove(item)
def get_item_index(self, item):
return self._viewport.traybar.get_item_index(item)
def scroll_to_end(self):
self._viewport._scroll_to_end()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.