code
stringlengths 1
199k
|
|---|
import os
import shutil
import biicode.common.test
from biicode.common.utils import file_utils as common_file_utils
def load(filepath):
"""Return binary load of given test resource."""
abspath = file_path(filepath)
with open(abspath, "rb") as f:
return f.read()
def read(filepath):
"""Return system text content of given test resource."""
abspath = file_path(filepath)
with open(abspath, "r") as f:
return f.read()
def write(file_, content):
try:
os.makedirs(os.path.split(file_)[0])
except:
pass
with open(file_, "wb") as f:
return f.write(content)
test_resources = os.path.join(os.path.dirname(biicode.common.test.__file__),
"resources/")
def append(content, dest):
with open(dest, "a") as f:
f.write(content)
def get_dir_files(path):
"""Returns a list of files within given test folder
Paths are relative to test/resources/path"""
abs_paths = common_file_utils.get_visible_files_recursive(file_path(path))
base_path = os.path.join(test_resources, path)
return [os.path.relpath(p, base_path) for p in abs_paths]
def file_path(name):
"""Return full path to given test resource. """
return os.path.join(test_resources, name)
def copyFiles(container, dest_folder, files=None):
'''Copies files from container to dst_folder, filtering by files if provided
'''
new_files = []
if not os.path.exists(dest_folder):
os.makedirs(dest_folder)
if not files:
files = get_dir_files(container)
for f in files:
srcpath = file_path(os.path.join(container, f))
dest = os.path.join(dest_folder, f)
dst_subfolder = os.path.join(dest_folder, os.path.dirname(f))
if not os.path.isdir(dst_subfolder):
os.makedirs(dst_subfolder)
if os.path.isdir(srcpath):
shutil.copytree(srcpath, dest)
else:
shutil.copyfile(srcpath, dest)
new_files.append(dest)
return new_files
def copyFile(src, dst_folder, dst_name=None):
'''Copies src file from test/resources folder to dst_folder
renamed to dst_name if provided
'''
srcpath = file_path(src)
if not dst_name:
dst_name = os.path.split(src)[1]
if not os.path.exists(dst_folder):
os.makedirs(dst_folder)
dst = os.path.join(dst_folder, dst_name)
shutil.copyfile(srcpath, dst)
return dst
def createFile(name, dst_folder, content):
path = os.path.join(dst_folder, name)
if not os.path.exists(dst_folder):
os.makedirs(dst_folder)
with open(path, 'w+') as f:
f.write(content)
return path
def removeFolderContents(path):
'''Recursively deletes all content in given directory'''
for root, dirs, files in os.walk(path):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
def search_pattern_and_replace(path, pattern, replacement):
'''Performs inline search and replace in given file'''
import fileinput, re
for line in fileinput.FileInput(path, inplace=1):
line = re.sub(pattern, replacement, line)
print line, # DO NOT REMOVE THIS PRINT, it is necessary for replace to work
def copy_directory(origin, dest):
shutil.copytree(origin, dest)
return dest
import filecmp
import os.path
def are_dir_trees_equal(dir1, dir2):
"""
Compare two directories recursively. Files in each directory are
assumed to be equal if their names and contents are equal.
@param dir1: First directory path
@param dir2: Second directory path
@return: True if the directory trees are the same and
there were no errors while accessing the directories or files,
False otherwise.
"""
dirs_cmp = filecmp.dircmp(dir1, dir2)
if len(dirs_cmp.left_only) > 0 or len(dirs_cmp.right_only) > 0 or \
len(dirs_cmp.funny_files) > 0:
return False
(_, mismatch, errors) = filecmp.cmpfiles(
dir1, dir2, dirs_cmp.common_files, shallow=False)
if len(mismatch) > 0 or len(errors) > 0:
return False
for common_dir in dirs_cmp.common_dirs:
new_dir1 = os.path.join(dir1, common_dir)
new_dir2 = os.path.join(dir2, common_dir)
if not are_dir_trees_equal(new_dir1, new_dir2):
return False
return True
def replace_content(folder, file_name, tag, tag_content):
""" Replace content from folder/file_name of tag with tag content."""
file_path = os.path.join(folder, file_name)
content = read(file_path)
content = content.replace(tag, tag_content)
return write(file_path, content)
|
from azure.cli.core import AzCommandsLoader
import azure.cli.command_modules.sql._help # pylint: disable=unused-import
class SqlCommandsLoader(AzCommandsLoader):
def __init__(self, cli_ctx=None):
from azure.cli.core.commands import CliCommandType
from azure.cli.core.profiles import ResourceType
sql_custom = CliCommandType(operations_tmpl='azure.cli.command_modules.sql.custom#{}')
super(SqlCommandsLoader, self).__init__(cli_ctx=cli_ctx,
custom_command_type=sql_custom,
resource_type=ResourceType.MGMT_SQL)
def load_command_table(self, args):
from azure.cli.command_modules.sql.commands import load_command_table
load_command_table(self, args)
return self.command_table
def load_arguments(self, command):
from azure.cli.command_modules.sql._params import load_arguments
load_arguments(self, command)
COMMAND_LOADER_CLS = SqlCommandsLoader
|
import os.path
from pipeline.conf import settings
from pipeline.compilers import SubProcessCompiler
class LessCompiler(SubProcessCompiler):
output_extension = 'css'
def match_file(self, filename):
return filename.endswith('.less')
def compile_file(self, content, path):
command = '%s %s %s' % (
settings.PIPELINE_LESS_BINARY,
settings.PIPELINE_LESS_ARGUMENTS,
path
)
cwd = os.path.dirname(path)
content = self.execute_command(command, cwd=cwd)
return content
|
"""
pipe2py.modules.pipeurlinput
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
http://pipes.yahoo.com/pipes/docs?doc=user_inputs#URL
"""
from pipe2py.lib import utils
def pipe_urlinput(context=None, _INPUT=None, conf=None, **kwargs):
"""An input that prompts the user for a url and yields it forever.
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : unused
conf : {
'name': {'value': 'parameter name'},
'prompt': {'value': 'User prompt'},
'default': {'value': 'default value'},
'debug': {'value': 'debug value'}
}
Yields
------
_OUTPUT : url
"""
value = utils.get_input(context, conf)
value = utils.url_quote(value)
while True:
yield value
|
"""buildpkg.py -- Build OS X packages for Apple's Installer.app.
This is an experimental command-line tool for building packages to be
installed with the Mac OS X Installer.app application.
It is much inspired by Apple's GUI tool called PackageMaker.app, that
seems to be part of the OS X developer tools installed in the folder
/Developer/Applications. But apparently there are other free tools to
do the same thing which are also named PackageMaker like Brian Hill's
one:
http://personalpages.tds.net/~brian_hill/packagemaker.html
Beware of the multi-package features of Installer.app (which are not
yet supported here) that can potentially screw-up your installation
and are discussed in these articles on Stepwise:
http://www.stepwise.com/Articles/Technical/Packages/InstallerWoes.html
http://www.stepwise.com/Articles/Technical/Packages/InstallerOnX.html
Beside using the PackageMaker class directly, by importing it inside
another module, say, there are additional ways of using this module:
the top-level buildPackage() function provides a shortcut to the same
feature and is also called when using this module from the command-
line.
****************************************************************
NOTE: For now you should be able to run this even on a non-OS X
system and get something similar to a package, but without
the real archive (needs pax) and bom files (needs mkbom)
inside! This is only for providing a chance for testing to
folks without OS X.
****************************************************************
TODO:
- test pre-process and post-process scripts (Python ones?)
- handle multi-volume packages (?)
- integrate into distutils (?)
Dinu C. Gherman,
gherman@europemail.com
November 2001
!! USE AT YOUR OWN RISK !!
"""
__version__ = 0.2
__license__ = "FreeBSD"
import os, sys, glob, fnmatch, shutil, string, copy, getopt
from os.path import basename, dirname, join, islink, isdir, isfile
Error = "buildpkg.Error"
PKG_INFO_FIELDS = """\
Title
Version
Description
DefaultLocation
DeleteWarning
NeedsAuthorization
DisableStop
UseUserMask
Application
Relocatable
Required
InstallOnly
RequiresReboot
RootVolumeOnly
LongFilenames
LibrarySubdirectory
AllowBackRev
OverwritePermissions
InstallFat\
"""
class GlobDirectoryWalker:
"A forward iterator that traverses files in a directory tree."
def __init__(self, directory, pattern="*"):
self.stack = [directory]
self.pattern = pattern
self.files = []
self.index = 0
def __getitem__(self, index):
while 1:
try:
file = self.files[self.index]
self.index = self.index + 1
except IndexError:
# pop next directory from stack
self.directory = self.stack.pop()
self.files = os.listdir(self.directory)
self.index = 0
else:
# got a filename
fullname = join(self.directory, file)
if isdir(fullname) and not islink(fullname):
self.stack.append(fullname)
if fnmatch.fnmatch(file, self.pattern):
return fullname
class PackageMaker:
"""A class to generate packages for Mac OS X.
This is intended to create OS X packages (with extension .pkg)
containing archives of arbitrary files that the Installer.app
will be able to handle.
As of now, PackageMaker instances need to be created with the
title, version and description of the package to be built.
The package is built after calling the instance method
build(root, **options). It has the same name as the constructor's
title argument plus a '.pkg' extension and is located in the same
parent folder that contains the root folder.
E.g. this will create a package folder /my/space/distutils.pkg/:
pm = PackageMaker("distutils", "1.0.2", "Python distutils.")
pm.build("/my/space/distutils")
"""
packageInfoDefaults = {
'Title': None,
'Version': None,
'Description': '',
'DefaultLocation': '/',
'DeleteWarning': '',
'NeedsAuthorization': 'NO',
'DisableStop': 'NO',
'UseUserMask': 'YES',
'Application': 'NO',
'Relocatable': 'YES',
'Required': 'NO',
'InstallOnly': 'NO',
'RequiresReboot': 'NO',
'RootVolumeOnly' : 'NO',
'InstallFat': 'NO',
'LongFilenames': 'YES',
'LibrarySubdirectory': 'Standard',
'AllowBackRev': 'YES',
'OverwritePermissions': 'NO',
}
def __init__(self, title, version, desc):
"Init. with mandatory title/version/description arguments."
info = {"Title": title, "Version": version, "Description": desc}
self.packageInfo = copy.deepcopy(self.packageInfoDefaults)
self.packageInfo.update(info)
# variables set later
self.packageRootFolder = None
self.packageResourceFolder = None
self.sourceFolder = None
self.resourceFolder = None
def build(self, root, resources=None, **options):
"""Create a package for some given root folder.
With no 'resources' argument set it is assumed to be the same
as the root directory. Option items replace the default ones
in the package info.
"""
# set folder attributes
self.sourceFolder = root
if resources == None:
self.resourceFolder = root
else:
self.resourceFolder = resources
# replace default option settings with user ones if provided
fields = self. packageInfoDefaults.keys()
for k, v in options.items():
if k in fields:
self.packageInfo[k] = v
elif not k in ["OutputDir"]:
raise Error, "Unknown package option: %s" % k
# Check where we should leave the output. Default is current directory
outputdir = options.get("OutputDir", os.getcwd())
packageName = self.packageInfo["Title"]
self.PackageRootFolder = os.path.join(outputdir, packageName + ".pkg")
# do what needs to be done
self._makeFolders()
self._addInfo()
self._addBom()
self._addArchive()
self._addResources()
self._addSizes()
self._addLoc()
def _makeFolders(self):
"Create package folder structure."
# Not sure if the package name should contain the version or not...
# packageName = "%s-%s" % (self.packageInfo["Title"],
# self.packageInfo["Version"]) # ??
contFolder = join(self.PackageRootFolder, "Contents")
self.packageResourceFolder = join(contFolder, "Resources")
os.mkdir(self.PackageRootFolder)
os.mkdir(contFolder)
os.mkdir(self.packageResourceFolder)
def _addInfo(self):
"Write .info file containing installing options."
# Not sure if options in PKG_INFO_FIELDS are complete...
info = ""
for f in string.split(PKG_INFO_FIELDS, "\n"):
if self.packageInfo.has_key(f):
info = info + "%s %%(%s)s\n" % (f, f)
info = info % self.packageInfo
base = self.packageInfo["Title"] + ".info"
path = join(self.packageResourceFolder, base)
f = open(path, "w")
f.write(info)
def _addBom(self):
"Write .bom file containing 'Bill of Materials'."
# Currently ignores if the 'mkbom' tool is not available.
try:
base = self.packageInfo["Title"] + ".bom"
bomPath = join(self.packageResourceFolder, base)
cmd = "mkbom %s %s" % (self.sourceFolder, bomPath)
res = os.system(cmd)
except:
pass
def _addArchive(self):
"Write .pax.gz file, a compressed archive using pax/gzip."
# Currently ignores if the 'pax' tool is not available.
cwd = os.getcwd()
# create archive
os.chdir(self.sourceFolder)
base = basename(self.packageInfo["Title"]) + ".pax"
self.archPath = join(self.packageResourceFolder, base)
cmd = "pax -w -f %s %s" % (self.archPath, ".")
res = os.system(cmd)
# compress archive
cmd = "gzip %s" % self.archPath
res = os.system(cmd)
os.chdir(cwd)
def _addResources(self):
"Add Welcome/ReadMe/License files, .lproj folders and scripts."
# Currently we just copy everything that matches the allowed
# filenames. So, it's left to Installer.app to deal with the
# same file available in multiple formats...
if not self.resourceFolder:
return
# find candidate resource files (txt html rtf rtfd/ or lproj/)
allFiles = []
for pat in string.split("*.txt *.html *.rtf *.rtfd *.lproj", " "):
pattern = join(self.resourceFolder, pat)
allFiles = allFiles + glob.glob(pattern)
# find pre-process and post-process scripts
# naming convention: packageName.{pre,post}_{upgrade,install}
# Alternatively the filenames can be {pre,post}_{upgrade,install}
# in which case we prepend the package name
packageName = self.packageInfo["Title"]
for pat in ("*upgrade", "*install", "*flight"):
pattern = join(self.resourceFolder, packageName + pat)
pattern2 = join(self.resourceFolder, pat)
allFiles = allFiles + glob.glob(pattern)
allFiles = allFiles + glob.glob(pattern2)
# check name patterns
files = []
for f in allFiles:
for s in ("Welcome", "License", "ReadMe"):
if string.find(basename(f), s) == 0:
files.append((f, f))
if f[-6:] == ".lproj":
files.append((f, f))
elif basename(f) in ["pre_upgrade", "pre_install", "post_upgrade", "post_install"]:
files.append((f, packageName+"."+basename(f)))
elif basename(f) in ["preflight", "postflight"]:
files.append((f, f))
elif f[-8:] == "_upgrade":
files.append((f,f))
elif f[-8:] == "_install":
files.append((f,f))
# copy files
for src, dst in files:
src = basename(src)
dst = basename(dst)
f = join(self.resourceFolder, src)
if isfile(f):
shutil.copy(f, os.path.join(self.packageResourceFolder, dst))
elif isdir(f):
# special case for .rtfd and .lproj folders...
d = join(self.packageResourceFolder, dst)
os.mkdir(d)
files = GlobDirectoryWalker(f)
for file in files:
shutil.copy(file, d)
def _addSizes(self):
"Write .sizes file with info about number and size of files."
# Not sure if this is correct, but 'installedSize' and
# 'zippedSize' are now in Bytes. Maybe blocks are needed?
# Well, Installer.app doesn't seem to care anyway, saying
# the installation needs 100+ MB...
numFiles = 0
installedSize = 0
zippedSize = 0
files = GlobDirectoryWalker(self.sourceFolder)
for f in files:
numFiles = numFiles + 1
installedSize = installedSize + os.lstat(f)[6]
try:
zippedSize = os.stat(self.archPath+ ".gz")[6]
except OSError: # ignore error
pass
base = self.packageInfo["Title"] + ".sizes"
f = open(join(self.packageResourceFolder, base), "w")
format = "NumFiles %d\nInstalledSize %d\nCompressedSize %d\n"
f.write(format % (numFiles, installedSize, zippedSize))
def _addLoc(self):
"Write .loc file."
base = self.packageInfo["Title"] + ".loc"
f = open(join(self.packageResourceFolder, base), "w")
f.write('/')
def buildPackage(*args, **options):
"A Shortcut function for building a package."
o = options
title, version, desc = o["Title"], o["Version"], o["Description"]
pm = PackageMaker(title, version, desc)
apply(pm.build, list(args), options)
def test0():
"Vanilla test for the distutils distribution."
pm = PackageMaker("distutils2", "1.0.2", "Python distutils package.")
pm.build("/Users/dinu/Desktop/distutils2")
def test1():
"Test for the reportlab distribution with modified options."
pm = PackageMaker("reportlab", "1.10",
"ReportLab's Open Source PDF toolkit.")
pm.build(root="/Users/dinu/Desktop/reportlab",
DefaultLocation="/Applications/ReportLab",
Relocatable="YES")
def test2():
"Shortcut test for the reportlab distribution with modified options."
buildPackage(
"/Users/dinu/Desktop/reportlab",
Title="reportlab",
Version="1.10",
Description="ReportLab's Open Source PDF toolkit.",
DefaultLocation="/Applications/ReportLab",
Relocatable="YES")
def printUsage():
"Print usage message."
format = "Usage: %s <opts1> [<opts2>] <root> [<resources>]"
print format % basename(sys.argv[0])
print
print " with arguments:"
print " (mandatory) root: the package root folder"
print " (optional) resources: the package resources folder"
print
print " and options:"
print " (mandatory) opts1:"
mandatoryKeys = string.split("Title Version Description", " ")
for k in mandatoryKeys:
print " --%s" % k
print " (optional) opts2: (with default values)"
pmDefaults = PackageMaker.packageInfoDefaults
optionalKeys = pmDefaults.keys()
for k in mandatoryKeys:
optionalKeys.remove(k)
optionalKeys.sort()
maxKeyLen = max(map(len, optionalKeys))
for k in optionalKeys:
format = " --%%s:%s %%s"
format = format % (" " * (maxKeyLen-len(k)))
print format % (k, repr(pmDefaults[k]))
def main():
"Command-line interface."
shortOpts = ""
keys = PackageMaker.packageInfoDefaults.keys()
longOpts = map(lambda k: k+"=", keys)
try:
opts, args = getopt.getopt(sys.argv[1:], shortOpts, longOpts)
except getopt.GetoptError, details:
print details
printUsage()
return
optsDict = {}
for k, v in opts:
optsDict[k[2:]] = v
ok = optsDict.keys()
if not (1 <= len(args) <= 2):
print "No argument given!"
elif not ("Title" in ok and \
"Version" in ok and \
"Description" in ok):
print "Missing mandatory option!"
else:
apply(buildPackage, args, optsDict)
return
printUsage()
# sample use:
# buildpkg.py --Title=distutils \
# --Version=1.0.2 \
# --Description="Python distutils package." \
# /Users/dinu/Desktop/distutils
if __name__ == "__main__":
main()
|
from rsf.proj import *
from math import *
import fdmod,pcsutil,wefd
def data(par):
# ------------------------------------------------------------
Fetch('vp_marmousi-ii.segy',"marm2")
Fetch('vs_marmousi-ii.segy',"marm2")
Fetch('density_marmousi-ii.segy',"marm2")
# ------------------------------------------------------------
for file in ('vp','vs','ro'):
if(file=='ro'):
ifile='density_marmousi-ii.segy'
else:
ifile=file+'_marmousi-ii.segy'
Flow(['z'+file,'t'+file,'./s'+file,'./b'+file],ifile,
'''
segyread tape=$SOURCE
tfile=${TARGETS[1]}
hfile=${TARGETS[2]}
bfile=${TARGETS[3]}
''',stdin=0)
Flow('_'+file,'z'+file,
'''
put
o1=0 d1=0.001249 label1=%(lz)s unit1=%(uz)s
o2=0 d2=0.001249 label2=%(lx)s unit2=%(ux)s |
window j1=2 j2=2
''' % par)
if(file=='ro'):
Flow(file+'raw','_'+file,'window n1=%(nz)d n2=%(nx)d min1=%(oz)g min2=%(ox)g | scale rscale=1000000' % par)
else:
Flow(file+'raw','_'+file,'window n1=%(nz)d n2=%(nx)d min1=%(oz)g min2=%(ox)g' % par)
# ------------------------------------------------------------
Flow( 'wmask','vpraw','mask max=1.5 | dd type=float')
Flow('rx','vpraw','math output="1.0e6+1.5e6*(input-1.5)/3" ')
Flow('ro','roraw','math output=1')
Flow('vp','vpraw','smooth rect1=35 rect2=35 repeat=5')
Flow('vs','vp wmask','scale rscale=0.5 | math w=${SOURCES[1]} output="input*(1-w)"')
# velocity ratio at cig location x
Flow('vratio1_1','vp vp','add mode=d ${SOURCES[1]}');
Flow('vratio1_2','vp vs','add mode=d ${SOURCES[1]}');
Flow('vratio2_1','vs vp','add mode=d ${SOURCES[1]}');
Flow('vratio2_2','vs vs','add mode=d ${SOURCES[1]}');
Flow('vratio','vratio1_1 vratio1_2 vratio2_1 vratio2_2',
'''
cat axis=3 space=n ${SOURCES[0:4]}
''',stdin=0)
def mask(mask,xsou,tmin,tmax,par):
dipline1(mask+'ml',
0.15+tmin,par['xmin'],
0.15,xsou,
0,1,
par['nt'],par['ot'],par['dt'],
par['nx'],par['ox'],par['dx'])
dipline1(mask+'mr',
0.15,xsou,
0.15+tmax,par['xmax'],
0,1,
par['nt'],par['ot'],par['dt'],
par['nx'],par['ox'],par['dx'])
Flow(mask,[mask+'ml',mask+'mr'],
'''
spike nsp=1 mag=1.0
n1=%(nx)d o1=%(ox)g d1=%(dx)g k1=%(ltap)d l1=%(rtap)d
n2=%(nt)d o2=%(ot)g d2=%(dt)g |
smooth rect1=100 repeat=1 |
scale axis=123 |
transp |
add mode=p ${SOURCES[0]} |
add mode=p ${SOURCES[1]} |
transp |
smooth rect2=100 repeat=3 |
put label1=x label2=t unit1=km unit2=s |
spray axis=3 n=2 o=0 d=1 |
transp plane=23
''' % par)
Result(mask,
'window n2=1 | transp|' + fdmod.dgrey('',par))
def dip(dip,img,par):
Flow( dip,img,'dip rect1=40 rect2=40 order=3 liter=100 verb=y ')
Result(dip,fdmod.cgrey('color=j wantscalebar=n',par))
def psang(x,img,dip,vpvs,tag,par):
#dip angle at cig location x
Flow( dip+'-one',dip,'window n2=1 min2=%g'%x)
#vpvs ratio at cig location x
Flow('vratioPP',vpvs,'window n3=1 f3=0 n2=1 min2=%g'%x)
Flow('vratioPS',vpvs,'window n3=1 f3=1 n2=1 min2=%g'%x)
Flow('vratioSP',vpvs,'window n3=1 f3=2 n2=1 min2=%g'%x)
Flow('vratioSS',vpvs,'window n3=1 f3=3 n2=1 min2=%g'%x)
nhx=200
nhz=0
nht=0
wefd.elaps('S'+tag,
img+tag+'_ds',
img+tag+'_dr',
nhx,nhz,nht,
dip+'-one',x,par)
def dipline1(mod,s1,s2,e1,e2,vi,vt,n1,o1,d1,n2,o2,d2):
min1=o1
max1=o1+(n1-1)*d1
min2=o2
max2=o2+(n2-1)*d2
ra = (e1-s1)/(e2-s2)
vels = "%s,%s,%s" %(vi,vt,vt)
drvs = "%s,%s" %(tan(ra),tan(ra))
dim1 = 'd1=%g o1=%g n1=%d' % (d2,o2,n2)
dim2 = 'd2=%g o2=%g n2=%d' % (d1,o1,n1)
Flow(mod+'lay2',None,
'''
spike nsp=4 mag=%g,%g,%g,%g
n1=4 n2=1 k1=1,2,3,4 |
put n1=2 n2=2 |
spline %s fp=%s
'''%(min2,min1,max2,max1,dim1,drvs))
Flow(mod+'lay1',None,
'''
spike nsp=4 mag=%g,%g,%g,%g
n1=4 n2=1 k1=1,2,3,4 |
put n1=2 n2=2 |
spline %s fp=%s
'''%(s2,s1,e2,e1,dim1,drvs))
Flow( mod+'layers',[mod+'lay1',mod+'lay2'],'cat axis=2 ${SOURCES[1:2]}')
Flow(mod,mod+'layers',
'''
unif2 v00=%s n1=%d d1=%g o1=%g
''' % (vels,n1,d1,o1) )
|
'''
:mod: Utils
Module that collects utility functions.
'''
import fnmatch
from DIRAC import gConfig, S_OK
from DIRAC.Core.Utilities import List
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
__RCSID__ = '$Id: $'
def voimport( base_mod ):
'''
Function to import from extensions, if not found, tries from DIRAC.
'''
# FIXME: A.T.: Use Core.Utilities.ObjectLoader
for ext in gConfig.getValue( 'DIRAC/Extensions', [] ):
try:
return __import__( ext + base_mod, globals(), locals(), ['*'] )
except ImportError:
continue
# If not found in extensions, import it in DIRAC base.
return __import__( base_mod, globals(), locals(), ['*'] )
def getCSTree( csPath = '' ):
'''
Gives the configuration rooted at path in a Python dict. The
result is a Python dictionary that reflects the structure of the
configuration file.
'''
opHelper = Operations()
def getCSTreeAsDict( treePath ):
'''
Function to recursively iterate over a CS tree
'''
csTreeDict = {}
opts = opHelper.getOptionsDict( treePath )
if opts[ 'OK' ]:
opts = opts[ 'Value' ]
for optKey, optValue in opts.items():
if optValue.find( ',' ) > -1:
optValue = List.fromChar( optValue )
else:
optValue = [ optValue ]
csTreeDict[ optKey ] = optValue
secs = opHelper.getSections( treePath )
if secs[ 'OK' ]:
secs = secs[ 'Value' ]
for sec in secs:
secTree = getCSTreeAsDict( '%s/%s' % ( treePath, sec ) )
if not secTree[ 'OK' ]:
return secTree
csTreeDict[ sec ] = secTree[ 'Value' ]
return S_OK( csTreeDict )
return getCSTreeAsDict( csPath )
def configMatch( candidateParams, configParams ):
'''
For a given configuration, the candidate will be rejected if:
- it is missing at least one of the params in the config
- if a param of the candidate does not match the config params
- if a candidate param is None, is considered as wildcard
'''
for key in candidateParams:
if not key in configParams:
# The candidateParams is missing one of the parameters required
# return False
continue
if candidateParams[ key ] is None:
# None is assumed to be a wildcard (*)
continue
cParameter = candidateParams[ key ]
if not isinstance( cParameter, list ):
cParameter = [ cParameter ]
# We allow using UNIX-like regular expression ( wild-cards ) on the CS
_matches = False
for configItem in configParams[ key ]:
if fnmatch.filter( set( cParameter ), configItem ):
_matches = True
break
if not _matches:
return False
return True
|
"""
URLResolver Addon for Kodi
Copyright (C) 2016 t0mm0, tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
from urlresolver.lib import kodi
from urlresolver.lib import log_utils
from urlresolver.lib import cache
from urlresolver.lib.url_dispatcher import URL_Dispatcher
url_dispatcher = URL_Dispatcher()
def __enum(**enums):
return type('Enum', (), enums)
MODES = __enum(AUTH_RD='auth_rd', RESET_RD='reset_rd', RESET_CACHE='reset_cache')
@url_dispatcher.register(MODES.AUTH_RD)
def auth_rd():
kodi.close_all()
kodi.sleep(500) # sleep or authorize won't work for some reason
from urlresolver.plugins import realdebrid
if realdebrid.RealDebridResolver().authorize_resolver():
kodi.notify(msg=kodi.i18n('rd_authorized'), duration=5000)
@url_dispatcher.register(MODES.RESET_RD)
def reset_rd():
kodi.close_all()
kodi.sleep(500) # sleep or reset won't work for some reason
from urlresolver.plugins import realdebrid
rd = realdebrid.RealDebridResolver()
rd.reset_authorization()
kodi.notify(msg=kodi.i18n('rd_auth_reset'), duration=5000)
@url_dispatcher.register(MODES.RESET_CACHE)
def reset_cache():
if cache.reset_cache():
kodi.notify(msg=kodi.i18n('cache_reset'))
else:
kodi.notify(msg=kodi.i18n('cache_reset_failed'))
def main(argv=None):
if sys.argv: argv = sys.argv
queries = kodi.parse_query(sys.argv[2])
log_utils.log('Version: |%s| Queries: |%s|' % (kodi.get_version(), queries))
log_utils.log('Args: |%s|' % (argv))
# don't process params that don't match our url exactly. (e.g. plugin://plugin.video.1channel/extrafanart)
plugin_url = 'plugin://%s/' % (kodi.get_id())
if argv[0] != plugin_url:
return
mode = queries.get('mode', None)
url_dispatcher.dispatch(mode, queries)
if __name__ == '__main__':
sys.exit(main())
|
from UserDict import DictMixin
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
if len(self) != len(other):
return False
for p, q in zip(self.items(), other.items()):
if p != q:
return False
return True
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
|
from .base import Capability, BaseObject, StringField, FloatField
__all__ = ['IpLocation', 'CapGeolocIp']
class IpLocation(BaseObject):
"""
Represents the location of an IP address.
"""
city = StringField('City')
region = StringField('Region')
zipcode = StringField('Zip code')
country = StringField('Country')
lt = FloatField('Latitude')
lg = FloatField('Longitude')
osmlink = StringField('Link to OpenStreetMap location page')
host = StringField('Hostname')
tld = StringField('Top Level Domain')
isp = StringField('Internet Service Provider')
def __init__(self, ipaddr):
BaseObject.__init__(self, ipaddr)
class CapGeolocIp(Capability):
"""
Access information about IP addresses database.
"""
def get_location(self, ipaddr):
"""
Get location of an IP address.
:param ipaddr: IP address
:type ipaddr: str
:rtype: :class:`IpLocation`
"""
raise NotImplementedError()
|
from __future__ import unicode_literals
from copy import deepcopy
from django import forms
from django.conf import settings
from django.core.urlresolvers import reverse
from django.forms.models import modelform_factory
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from shoop.admin.base import MenuEntry
from shoop.admin.toolbar import Toolbar, URLActionButton, get_default_edit_toolbar
from shoop.admin.utils.views import CreateOrUpdateView
from shoop.core.models import PaymentMethod, ShippingMethod
from shoop.core.modules.interface import ModuleNotFound
from shoop.utils.multilanguage_model_form import MultiLanguageModelForm
class MethodEditToolbar(Toolbar):
def __init__(self, view_object):
super(Toolbar, self).__init__()
self.view_object = view_object
get_default_edit_toolbar(toolbar=self, view_object=view_object, save_form_id="method_form")
method = view_object.object
if method.pk:
self.build_detail_button(method)
def build_detail_button(self, method):
disable_reason = None
try:
if not method.module.admin_detail_view_class:
disable_reason = _("The selected module has no details to configure")
except ModuleNotFound:
disable_reason = _("The selected module is not currently available")
self.append(URLActionButton(
url=reverse(
"shoop_admin:%s.edit-detail" % self.view_object.action_url_name_prefix,
kwargs={"pk": method.pk}
),
text=_("Edit Details"),
icon="fa fa-pencil",
extra_css_class="btn-info",
disable_reason=disable_reason
))
class _BaseMethodEditView(CreateOrUpdateView):
model = None # Overridden below
action_url_name_prefix = None
template_name = "shoop/admin/methods/edit.jinja"
form_class = forms.Form
context_object_name = "method"
@property
def title(self):
return _(u"Edit %(model)s") % {"model": self.model._meta.verbose_name}
def get_breadcrumb_parents(self):
return [
MenuEntry(
text=force_text(self.model._meta.verbose_name_plural).title(),
url="shoop_admin:%s.list" % self.action_url_name_prefix
)
]
def get_form(self, form_class=None):
form_class = modelform_factory(
model=self.model,
form=MultiLanguageModelForm,
fields=("name", "status", "tax_class", "module_identifier"),
widgets={"module_identifier": forms.Select},
)
form = form_class(languages=settings.LANGUAGES, **self.get_form_kwargs())
form.fields["module_identifier"].widget.choices = self.model.get_module_choices(
empty_label=(_("Default %s module") % self.model._meta.verbose_name).title()
)
# Add fields from the module, if any...
form.module_option_field_names = []
for field_name, field in self.object.module.option_fields:
form.fields[field_name] = deepcopy(field)
form.module_option_field_names.append(field_name)
if self.object.module_data and field_name in self.object.module_data:
form.initial[field_name] = self.object.module_data[field_name]
return form
def get_success_url(self):
return reverse("shoop_admin:%s.edit" % self.action_url_name_prefix, kwargs={"pk": self.object.pk})
def get_toolbar(self):
return MethodEditToolbar(self)
def save_form(self, form):
self.object = form.save()
if not self.object.module_data:
self.object.module_data = {}
for field_name in form.module_option_field_names:
if field_name in form.cleaned_data:
self.object.module_data[field_name] = form.cleaned_data[field_name]
self.object.save()
class ShippingMethodEditView(_BaseMethodEditView):
model = ShippingMethod
action_url_name_prefix = "method.shipping"
class PaymentMethodEditView(_BaseMethodEditView):
model = PaymentMethod
action_url_name_prefix = "method.payment"
|
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='BlockStructureConfiguration',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('num_versions_to_keep', models.IntegerField(default=5, null=True, blank=True)),
('cache_timeout_in_seconds', models.IntegerField(default=86400, null=True, blank=True)),
('changed_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Changed by')),
],
options={
'db_table': 'block_structure_config',
},
),
]
|
"""fix description field in connection to be text
Revision ID: 64a7d6477aae
Revises: f5b5ec089444
Create Date: 2020-11-25 08:56:11.866607
"""
import sqlalchemy as sa # noqa
from alembic import op # noqa
revision = '64a7d6477aae'
down_revision = '61ec73d9401f'
branch_labels = None
depends_on = None
def upgrade():
"""Apply fix description field in connection to be text"""
conn = op.get_bind() # pylint: disable=no-member
if conn.dialect.name == "sqlite":
# in sqlite TEXT and STRING column types are the same
return
if conn.dialect.name == "mysql":
op.alter_column(
'connection',
'description',
existing_type=sa.String(length=5000),
type_=sa.Text(length=5000),
existing_nullable=True,
)
else:
# postgres does not allow size modifier for text type
op.alter_column('connection', 'description', existing_type=sa.String(length=5000), type_=sa.Text())
def downgrade():
"""Unapply fix description field in connection to be text"""
conn = op.get_bind() # pylint: disable=no-member
if conn.dialect.name == "sqlite":
# in sqlite TEXT and STRING column types are the same
return
if conn.dialect.name == "mysql":
op.alter_column(
'connection',
'description',
existing_type=sa.Text(5000),
type_=sa.String(length=5000),
existing_nullable=True,
)
else:
# postgres does not allow size modifier for text type
op.alter_column(
'connection',
'description',
existing_type=sa.Text(),
type_=sa.String(length=5000),
existing_nullable=True,
)
|
from tempest.lib.services.identity.v3 import endpoint_groups_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestEndPointGroupsClient(base.BaseServiceTest):
FAKE_CREATE_ENDPOINT_GROUP = {
"endpoint_group": {
"id": 1,
"name": "FAKE_ENDPOINT_GROUP",
"description": "FAKE SERVICE ENDPOINT GROUP",
"filters": {
"service_id": 1
}
}
}
FAKE_ENDPOINT_GROUP_INFO = {
"endpoint_group": {
"id": 1,
"name": "FAKE_ENDPOINT_GROUP",
"description": "FAKE SERVICE ENDPOINT GROUP",
"links": {
"self": "http://example.com/identity/v3/OS-EP-FILTER/" +
"endpoint_groups/1"
},
"filters": {
"service_id": 1
}
}
}
FAKE_LIST_ENDPOINT_GROUPS = {
"endpoint_groups": [
{
"id": 1,
"name": "SERVICE_GROUP1",
"description": "FAKE SERVICE ENDPOINT GROUP",
"links": {
"self": "http://example.com/identity/v3/OS-EP-FILTER/" +
"endpoint_groups/1"
},
"filters": {
"service_id": 1
}
},
{
"id": 2,
"name": "SERVICE_GROUP2",
"description": "FAKE SERVICE ENDPOINT GROUP",
"links": {
"self": "http://example.com/identity/v3/OS-EP-FILTER/" +
"endpoint_groups/2"
},
"filters": {
"service_id": 2
}
}
]
}
def setUp(self):
super(TestEndPointGroupsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = endpoint_groups_client.EndPointGroupsClient(
fake_auth, 'identity', 'regionOne')
def _test_create_endpoint_group(self, bytes_body=False):
self.check_service_client_function(
self.client.create_endpoint_group,
'tempest.lib.common.rest_client.RestClient.post',
self.FAKE_CREATE_ENDPOINT_GROUP,
bytes_body,
status=201,
name="FAKE_ENDPOINT_GROUP",
filters={'service_id': "1"})
def _test_show_endpoint_group(self, bytes_body=False):
self.check_service_client_function(
self.client.show_endpoint_group,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_ENDPOINT_GROUP_INFO,
bytes_body,
endpoint_group_id="1")
def _test_check_endpoint_group(self, bytes_body=False):
self.check_service_client_function(
self.client.check_endpoint_group,
'tempest.lib.common.rest_client.RestClient.head',
{},
bytes_body,
status=200,
endpoint_group_id="1")
def _test_update_endpoint_group(self, bytes_body=False):
self.check_service_client_function(
self.client.update_endpoint_group,
'tempest.lib.common.rest_client.RestClient.patch',
self.FAKE_ENDPOINT_GROUP_INFO,
bytes_body,
endpoint_group_id="1",
name="NewName")
def _test_list_endpoint_groups(self, bytes_body=False):
self.check_service_client_function(
self.client.list_endpoint_groups,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_LIST_ENDPOINT_GROUPS,
bytes_body)
def test_create_endpoint_group_with_str_body(self):
self._test_create_endpoint_group()
def test_create_endpoint_group_with_bytes_body(self):
self._test_create_endpoint_group(bytes_body=True)
def test_show_endpoint_group_with_str_body(self):
self._test_show_endpoint_group()
def test_show_endpoint_group_with_bytes_body(self):
self._test_show_endpoint_group(bytes_body=True)
def test_check_endpoint_group_with_str_body(self):
self._test_check_endpoint_group()
def test_check_endpoint_group_with_bytes_body(self):
self._test_check_endpoint_group(bytes_body=True)
def test_list_endpoint_groups_with_str_body(self):
self._test_list_endpoint_groups()
def test_list_endpoint_groups_with_bytes_body(self):
self._test_list_endpoint_groups(bytes_body=True)
def test_update_endpoint_group_with_str_body(self):
self._test_update_endpoint_group()
def test_update_endpoint_group_with_bytes_body(self):
self._test_update_endpoint_group(bytes_body=True)
def test_delete_endpoint_group(self):
self.check_service_client_function(
self.client.delete_endpoint_group,
'tempest.lib.common.rest_client.RestClient.delete',
{},
endpoint_group_id="1",
status=204)
|
from oslo_config import cfg
from neutron._i18n import _
allowed_address_pair_opts = [
#TODO(limao): use quota framework when it support quota for attributes
cfg.IntOpt('max_allowed_address_pair', default=10,
help=_("Maximum number of allowed address pairs")),
]
def register_allowed_address_pair_opts(cfg=cfg.CONF):
cfg.register_opts(allowed_address_pair_opts)
|
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from distutils import sysconfig
import os
import sys
import platform
flags = ''
inc = sysconfig.get_python_inc()
lib = sysconfig.get_config_var("LIBDIR")
if sys.platform == "darwin":
lib = os.path.dirname(lib) + '/Python'
if os.path.isfile(lib):
flags += '-DPYTHON_LIBRARY={lib} '.format(lib=lib)
if os.path.isfile(inc + '/Python.h'):
flags += '-DPYTHON_INCLUDE_DIR={inc} '.format(inc=inc)
print(flags, end='')
|
"""
Example Airflow DAG that uses Google AutoML services.
"""
import os
from datetime import datetime
from airflow import models
from airflow.providers.google.cloud.hooks.automl import CloudAutoMLHook
from airflow.providers.google.cloud.operators.automl import (
AutoMLCreateDatasetOperator,
AutoMLDeleteDatasetOperator,
AutoMLDeleteModelOperator,
AutoMLImportDataOperator,
AutoMLTrainModelOperator,
)
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "your-project-id")
GCP_AUTOML_LOCATION = os.environ.get("GCP_AUTOML_LOCATION", "us-central1")
GCP_AUTOML_SENTIMENT_BUCKET = os.environ.get("GCP_AUTOML_SENTIMENT_BUCKET", "gs://INVALID BUCKET NAME")
DATASET_ID = ""
MODEL = {
"display_name": "auto_model_1",
"dataset_id": DATASET_ID,
"text_sentiment_model_metadata": {},
}
DATASET = {
"display_name": "test_text_sentiment_dataset",
"text_sentiment_dataset_metadata": {"sentiment_max": 10},
}
IMPORT_INPUT_CONFIG = {"gcs_source": {"input_uris": [GCP_AUTOML_SENTIMENT_BUCKET]}}
extract_object_id = CloudAutoMLHook.extract_object_id
with models.DAG(
"example_automl_text_sentiment",
schedule_interval=None, # Override to match your needs
start_date=datetime(2021, 1, 1),
catchup=False,
user_defined_macros={"extract_object_id": extract_object_id},
tags=['example'],
) as example_dag:
create_dataset_task = AutoMLCreateDatasetOperator(
task_id="create_dataset_task", dataset=DATASET, location=GCP_AUTOML_LOCATION
)
dataset_id = create_dataset_task.output['dataset_id']
import_dataset_task = AutoMLImportDataOperator(
task_id="import_dataset_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
input_config=IMPORT_INPUT_CONFIG,
)
MODEL["dataset_id"] = dataset_id
create_model = AutoMLTrainModelOperator(task_id="create_model", model=MODEL, location=GCP_AUTOML_LOCATION)
model_id = create_model.output['model_id']
delete_model_task = AutoMLDeleteModelOperator(
task_id="delete_model_task",
model_id=model_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
delete_datasets_task = AutoMLDeleteDatasetOperator(
task_id="delete_datasets_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
import_dataset_task >> create_model
delete_model_task >> delete_datasets_task
# Task dependencies created via `XComArgs`:
# create_dataset_task >> import_dataset_task
# create_dataset_task >> create_model
# create_model >> delete_model_task
# create_dataset_task >> delete_datasets_task
|
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class InitializeOAuth(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the InitializeOAuth Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(InitializeOAuth, self).__init__(temboo_session, '/Library/Bitly/OAuth/InitializeOAuth')
def new_input_set(self):
return InitializeOAuthInputSet()
def _make_result_set(self, result, path):
return InitializeOAuthResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return InitializeOAuthChoreographyExecution(session, exec_id, path)
class InitializeOAuthInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the InitializeOAuth
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccountName(self, value):
"""
Set the value of the AccountName input for this Choreo. ((optional, string) Deprecated (retained for backward compatibility only).)
"""
super(InitializeOAuthInputSet, self)._set_input('AccountName', value)
def set_AppKeyName(self, value):
"""
Set the value of the AppKeyName input for this Choreo. ((optional, string) Deprecated (retained for backward compatibility only).)
"""
super(InitializeOAuthInputSet, self)._set_input('AppKeyName', value)
def set_AppKeyValue(self, value):
"""
Set the value of the AppKeyValue input for this Choreo. ((optional, string) Deprecated (retained for backward compatibility only).)
"""
super(InitializeOAuthInputSet, self)._set_input('AppKeyValue', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((required, string) The Client ID provided by Bitly after registering your application.)
"""
super(InitializeOAuthInputSet, self)._set_input('ClientID', value)
def set_CustomCallbackID(self, value):
"""
Set the value of the CustomCallbackID input for this Choreo. ((optional, string) A unique identifier that you can pass to eliminate the need to wait for a Temboo generated CallbackID. Callback identifiers may only contain numbers, letters, periods, and hyphens.)
"""
super(InitializeOAuthInputSet, self)._set_input('CustomCallbackID', value)
def set_ForwardingURL(self, value):
"""
Set the value of the ForwardingURL input for this Choreo. ((optional, string) The URL that Temboo will redirect your users to after they grant access to your application. This should include the "https://" or "http://" prefix and be a fully qualified URL.)
"""
super(InitializeOAuthInputSet, self)._set_input('ForwardingURL', value)
class InitializeOAuthResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the InitializeOAuth Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_AuthorizationURL(self):
"""
Retrieve the value for the "AuthorizationURL" output from this Choreo execution. ((string) The authorization URL that the application's user needs to go to in order to grant access to your application.)
"""
return self._output.get('AuthorizationURL', None)
def get_CallbackID(self):
"""
Retrieve the value for the "CallbackID" output from this Choreo execution. ((string) An ID used to retrieve the callback data that Temboo stores once your application's user authorizes.)
"""
return self._output.get('CallbackID', None)
class InitializeOAuthChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return InitializeOAuthResultSet(response, path)
|
"""Tests for tensorflow.ops.gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
import tensorflow.python.platform
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework import types
from tensorflow.python.ops import array_grad
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import data_flow_grad
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_grad
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad
from tensorflow.python.ops import state_grad
from tensorflow.python.ops.constant_op import constant
from tensorflow.python.ops.nn_ops import bias_add
from tensorflow.python.platform import googletest
def _OpsBetween(graph, to_ops, from_ops):
"""Build the list of operations between two lists of Operations.
Args:
graph: a Graph.
to_ops: list of Operations.
from_ops: list of Operations.
Returns:
The list of operations between "from_ops" and "to_ops", sorted by
decreasing operation id. This list contains all elements of to_ops.
TODO(touts): Think about returning an empty list if from_ops are not
reachable from to_ops. Presently it returns to_ops in that case.
"""
# List of booleans, indexed by operation id, indicating if
# an op is reached from the output of "input_ops".
reached_ops = [False] * (graph._last_id + 1)
# We only care to reach up to "output_ops" so we mark the
# output ops as reached to avoid recursing past them.
for op in to_ops:
reached_ops[op._id] = True
gradients._MarkReachedOps(from_ops, reached_ops)
between_ops = gradients._GatherInputs(to_ops, reached_ops)
between_ops.sort(lambda x, y: y._id - x._id)
return between_ops
class GradientsTest(test_util.TensorFlowTestCase):
def _OpNames(self, op_list):
return ["%s/%d" % (str(op.name), op._id) for op in op_list]
def _assertOpListEqual(self, ops1, ops2):
self.assertEquals(self._OpNames(ops1), self._OpNames(ops2))
def testOpsBetweenSimple(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
t3 = array_ops.pack([t1, t2])
# Full graph
self._assertOpListEqual([t3.op, t2.op, t1.op],
_OpsBetween(g, [t3.op], [t1.op, t2.op]))
# Only t1, t3.
self._assertOpListEqual([t3.op, t1.op],
_OpsBetween(g, [t3.op], [t1.op]))
def testOpsBetweenUnreachable(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
_ = array_ops.pack([t1, t2])
t4 = constant(1.0)
t5 = constant(2.0)
t6 = array_ops.pack([t4, t5])
# Elements of to_ops are always listed.
self._assertOpListEqual([t6.op], _OpsBetween(g, [t6.op], [t1.op]))
def testOpsBetweenCut(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
t3 = array_ops.pack([t1, t2])
t4 = constant([1.0])
t5 = array_ops.concat(0, [t4, t3])
t6 = constant([2.0])
t7 = array_ops.concat(0, [t5, t6])
self._assertOpListEqual([t7.op, t5.op, t4.op],
_OpsBetween(g, [t7.op], [t4.op]))
def testOpsBetweenCycle(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
t3 = array_ops.pack([t1, t2])
t4 = array_ops.concat(0, [t3, t3, t3])
t5 = constant([1.0])
t6 = array_ops.concat(0, [t4, t5])
t7 = array_ops.concat(0, [t6, t3])
self._assertOpListEqual([t6.op, t4.op, t3.op],
_OpsBetween(g, [t6.op], [t3.op]))
self._assertOpListEqual([t7.op, t6.op, t5.op, t4.op, t3.op, t1.op],
_OpsBetween(g, [t7.op], [t1.op, t5.op]))
self._assertOpListEqual([t6.op, t5.op, t4.op, t3.op, t2.op],
_OpsBetween(g, [t6.op], [t2.op, t5.op]))
def testGradients(self):
with ops.Graph().as_default():
inp = constant(1.0, shape=[32, 100], name="in")
w = constant(1.0, shape=[100, 10], name="w")
b = constant(1.0, shape=[10], name="b")
xw = math_ops.matmul(inp, w, name="xw")
h = bias_add(xw, b, name="h")
w_grad = gradients.gradients(h, w)[0]
self.assertEquals("MatMul", w_grad.op.type)
self.assertEquals(w_grad.op._original_op, xw.op)
self.assertTrue(w_grad.op.get_attr("transpose_a"))
self.assertFalse(w_grad.op.get_attr("transpose_b"))
def testUnusedOutput(self):
with ops.Graph().as_default():
w = constant(1.0, shape=[2, 2])
x = constant(1.0, shape=[2, 2])
wx = math_ops.matmul(w, x)
split_wx = array_ops.split(0, 2, wx)
c = math_ops.reduce_sum(split_wx[1])
gw = gradients.gradients(c, [w])[0]
self.assertEquals("MatMul", gw.op.type)
def testColocateGradients(self):
with ops.Graph().as_default() as g:
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
with g.device("/gpu:0"):
wx = math_ops.matmul(w, x)
gw = gradients.gradients(wx, [w], colocate_gradients_with_ops=True)[0]
self.assertEquals("/gpu:0", gw.device)
def testColocateGradientsWithAggregation(self):
with ops.Graph().as_default() as g:
with g.device("/gpu:1"):
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
y = constant(1.0, shape=[1, 2])
wx = math_ops.matmul(w, x)
wy = math_ops.matmul(w, y)
with g.device("/gpu:0"):
z = wx + wy
gw1 = gradients.gradients(z, [w], colocate_gradients_with_ops=True)[0]
self.assertEquals("/gpu:1", gw1.device)
gw2 = gradients.gradients(z, [w], colocate_gradients_with_ops=False)[0]
self.assertEquals(None, gw2.device)
def testBoundaryStop(self):
# Test that we don't differentiate 'x'. The gradient function for 'x' is
# set explicitly to None so we will get an exception if the gradient code
# tries to differentiate 'x'.
with ops.Graph().as_default() as g:
c = constant(1.0)
x = array_ops.identity(c)
y = x + 1.0
z = y + 1
grads = gradients.gradients(z, [x])
self.assertTrue(all([x for x in grads]))
def testBoundaryContinue(self):
# Test that we differentiate both 'x' and 'y' correctly when x is a
# predecessor of y.
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y * 3.0
grads = gradients.gradients(z, [x, y])
self.assertTrue(all([x for x in grads]))
self.assertEqual(6.0, grads[0].eval())
def testAggregationMethodAccumulateN(self):
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y + y + y + y + y + y + y + y + y + y
grads = gradients.gradients(
z,
[x, y],
aggregation_method=
gradients.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
self.assertTrue(all([x for x in grads]))
self.assertEqual(20.0, grads[0].eval())
self.assertEqual(10.0, grads[1].eval())
def testAggregationMethodAddN(self):
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y + y + y + y + y + y + y + y + y + y
grads = gradients.gradients(
z,
[x, y],
aggregation_method=gradients.AggregationMethod.ADD_N)
self.assertTrue(all([x for x in grads]))
self.assertEqual(20.0, grads[0].eval())
self.assertEqual(10.0, grads[1].eval())
def testAggregationMethodTree(self):
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y + y + y + y + y + y + y + y + y + y
grads = gradients.gradients(
z,
[x, y],
aggregation_method=gradients.AggregationMethod.EXPERIMENTAL_TREE)
self.assertTrue(all([x for x in grads]))
self.assertEqual(20.0, grads[0].eval())
self.assertEqual(10.0, grads[1].eval())
def testNoGradientForStringOutputs(self):
with ops.Graph().as_default() as g:
@ops.RegisterGradient("TestOp")
def _TestOpGrad(op, float_grad, string_grad):
"""Gradient function for TestOp."""
self.assertEquals(float_grad.dtype, types.float32)
self.assertFalse(string_grad)
return float_grad
ops.RegisterShape("TestOp")(None)
c = constant(1.0)
x, y = g.create_op("TestOp", [c], [types.float32, types.string]).outputs
z = x * 2.0
w = z * 3.0
grads = gradients.gradients(z, [c])
self.assertTrue(isinstance(grads[0], ops.Tensor))
class StopGradientTest(test_util.TensorFlowTestCase):
def testStopGradient(self):
with ops.Graph().as_default():
inp = constant(1.0, shape=[100, 32], name="in")
out = array_ops.stop_gradient(inp)
igrad = gradients.gradients(out, inp)[0]
assert igrad is None
class HessianVectorProductTest(test_util.TensorFlowTestCase):
def testHessianVectorProduct(self):
# Manually compute the Hessian explicitly for a low-dimensional problem
# and check that HessianVectorProduct matches multiplication by the
# explicit Hessian.
# Specifically, the Hessian of f(x) = x^T A x is
# H = A + A^T.
# We expect HessianVectorProduct(f(x), x, v) to be H v.
m = 4
rng = np.random.RandomState([1, 2, 3])
mat_value = rng.randn(m, m).astype("float32")
v_value = rng.randn(m, 1).astype("float32")
x_value = rng.randn(m, 1).astype("float32")
hess_value = mat_value + mat_value.T
hess_v_value = np.dot(hess_value, v_value)
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
mat = constant_op.constant(mat_value)
v = constant_op.constant(v_value)
x = constant_op.constant(x_value)
mat_x = math_ops.matmul(mat, x, name="Ax")
x_mat_x = math_ops.matmul(array_ops.transpose(x), mat_x, name="xAx")
hess_v = gradients._hessian_vector_product(x_mat_x, [x], [v])[0]
hess_v_actual = hess_v.eval()
self.assertAllClose(hess_v_value, hess_v_actual)
class IndexedSlicesToTensorTest(test_util.TensorFlowTestCase):
def testIndexedSlicesToTensor(self):
with self.test_session():
np_val = np.random.rand(4, 4, 4, 4).astype(np.float32)
c = constant_op.constant(np_val)
c_sparse = math_ops._as_indexed_slices(c)
self.assertAllEqual(np_val.shape, c_sparse.dense_shape.eval())
c_dense = math_ops.mul(c_sparse, 1.0)
self.assertAllClose(np_val, c_dense.eval())
def testInt64Indices(self):
with self.test_session():
np_val = np.random.rand(4, 4, 4, 4).astype(np.float32)
c = constant_op.constant(np_val)
c_sparse = math_ops._as_indexed_slices(c)
c_sparse = ops.IndexedSlices(
c_sparse.values, math_ops.cast(c_sparse.indices, types.int64),
c_sparse.dense_shape)
self.assertAllEqual(np_val.shape, c_sparse.dense_shape.eval())
c_dense = math_ops.mul(c_sparse, 1.0)
self.assertAllClose(np_val, c_dense.eval())
def testWarnings(self):
# Smaller than the threshold: no warning.
c_sparse = ops.IndexedSlices(array_ops.placeholder(types.float32),
array_ops.placeholder(types.int32),
constant([4, 4, 4, 4]))
with warnings.catch_warnings(record=True) as w:
math_ops.mul(c_sparse, 1.0)
self.assertEqual(0, len(w))
# Greater than or equal to the threshold: warning.
c_sparse = ops.IndexedSlices(array_ops.placeholder(types.float32),
array_ops.placeholder(types.int32),
constant([100, 100, 100, 100]))
with warnings.catch_warnings(record=True) as w:
math_ops.mul(c_sparse, 1.0)
self.assertEqual(1, len(w))
self.assertTrue(
"with 100000000 elements. This may consume a large amount of memory."
in str(w[0].message))
# Unknown dense shape: warning.
c_sparse = ops.IndexedSlices(array_ops.placeholder(types.float32),
array_ops.placeholder(types.int32),
array_ops.placeholder(types.int32))
with warnings.catch_warnings(record=True) as w:
math_ops.mul(c_sparse, 1.0)
self.assertEqual(1, len(w))
self.assertTrue(
"of unknown shape. This may consume a large amount of memory."
in str(w[0].message))
if __name__ == "__main__":
googletest.main()
|
"""Test for the smhi weather entity."""
import asyncio
from datetime import datetime
import logging
from unittest.mock import AsyncMock, Mock, patch
from smhi.smhi_lib import APIURL_TEMPLATE, SmhiForecastException
from homeassistant.components.smhi import weather as weather_smhi
from homeassistant.components.smhi.const import ATTR_SMHI_CLOUDINESS
from homeassistant.components.weather import (
ATTR_FORECAST_CONDITION,
ATTR_FORECAST_PRECIPITATION,
ATTR_FORECAST_TEMP,
ATTR_FORECAST_TEMP_LOW,
ATTR_FORECAST_TIME,
ATTR_WEATHER_ATTRIBUTION,
ATTR_WEATHER_HUMIDITY,
ATTR_WEATHER_PRESSURE,
ATTR_WEATHER_TEMPERATURE,
ATTR_WEATHER_VISIBILITY,
ATTR_WEATHER_WIND_BEARING,
ATTR_WEATHER_WIND_SPEED,
DOMAIN as WEATHER_DOMAIN,
)
from homeassistant.const import TEMP_CELSIUS
from homeassistant.core import HomeAssistant
from tests.common import MockConfigEntry, load_fixture
_LOGGER = logging.getLogger(__name__)
TEST_CONFIG = {"name": "test", "longitude": "17.84197", "latitude": "59.32624"}
async def test_setup_hass(hass: HomeAssistant, aioclient_mock) -> None:
"""Test for successfully setting up the smhi platform.
This test are deeper integrated with the core. Since only
config_flow is used the component are setup with
"async_forward_entry_setup". The actual result are tested
with the entity state rather than "per function" unity tests
"""
uri = APIURL_TEMPLATE.format(TEST_CONFIG["longitude"], TEST_CONFIG["latitude"])
api_response = load_fixture("smhi.json")
aioclient_mock.get(uri, text=api_response)
entry = MockConfigEntry(domain="smhi", data=TEST_CONFIG)
await hass.config_entries.async_forward_entry_setup(entry, WEATHER_DOMAIN)
await hass.async_block_till_done()
assert aioclient_mock.call_count == 1
# Testing the actual entity state for
# deeper testing than normal unity test
state = hass.states.get("weather.smhi_test")
assert state.state == "sunny"
assert state.attributes[ATTR_SMHI_CLOUDINESS] == 50
assert state.attributes[ATTR_WEATHER_ATTRIBUTION].find("SMHI") >= 0
assert state.attributes[ATTR_WEATHER_HUMIDITY] == 55
assert state.attributes[ATTR_WEATHER_PRESSURE] == 1024
assert state.attributes[ATTR_WEATHER_TEMPERATURE] == 17
assert state.attributes[ATTR_WEATHER_VISIBILITY] == 50
assert state.attributes[ATTR_WEATHER_WIND_SPEED] == 7
assert state.attributes[ATTR_WEATHER_WIND_BEARING] == 134
_LOGGER.error(state.attributes)
assert len(state.attributes["forecast"]) == 4
forecast = state.attributes["forecast"][1]
assert forecast[ATTR_FORECAST_TIME] == "2018-09-02T12:00:00"
assert forecast[ATTR_FORECAST_TEMP] == 21
assert forecast[ATTR_FORECAST_TEMP_LOW] == 6
assert forecast[ATTR_FORECAST_PRECIPITATION] == 0
assert forecast[ATTR_FORECAST_CONDITION] == "partlycloudy"
def test_properties_no_data(hass: HomeAssistant) -> None:
"""Test properties when no API data available."""
weather = weather_smhi.SmhiWeather("name", "10", "10")
weather.hass = hass
assert weather.name == "name"
assert weather.should_poll is True
assert weather.temperature is None
assert weather.humidity is None
assert weather.wind_speed is None
assert weather.wind_bearing is None
assert weather.visibility is None
assert weather.pressure is None
assert weather.cloudiness is None
assert weather.condition is None
assert weather.forecast is None
assert weather.temperature_unit == TEMP_CELSIUS
def test_properties_unknown_symbol() -> None:
"""Test behaviour when unknown symbol from API."""
hass = Mock()
data = Mock()
data.temperature = 5
data.mean_precipitation = 0.5
data.total_precipitation = 1
data.humidity = 5
data.wind_speed = 10
data.wind_direction = 180
data.horizontal_visibility = 6
data.pressure = 1008
data.cloudiness = 52
data.symbol = 100 # Faulty symbol
data.valid_time = datetime(2018, 1, 1, 0, 1, 2)
data2 = Mock()
data2.temperature = 5
data2.mean_precipitation = 0.5
data2.total_precipitation = 1
data2.humidity = 5
data2.wind_speed = 10
data2.wind_direction = 180
data2.horizontal_visibility = 6
data2.pressure = 1008
data2.cloudiness = 52
data2.symbol = 100 # Faulty symbol
data2.valid_time = datetime(2018, 1, 1, 12, 1, 2)
data3 = Mock()
data3.temperature = 5
data3.mean_precipitation = 0.5
data3.total_precipitation = 1
data3.humidity = 5
data3.wind_speed = 10
data3.wind_direction = 180
data3.horizontal_visibility = 6
data3.pressure = 1008
data3.cloudiness = 52
data3.symbol = 100 # Faulty symbol
data3.valid_time = datetime(2018, 1, 2, 12, 1, 2)
testdata = [data, data2, data3]
weather = weather_smhi.SmhiWeather("name", "10", "10")
weather.hass = hass
weather._forecasts = testdata
assert weather.condition is None
forecast = weather.forecast[0]
assert forecast[ATTR_FORECAST_CONDITION] is None
async def test_refresh_weather_forecast_exceeds_retries(hass) -> None:
"""Test the refresh weather forecast function."""
with patch.object(
hass.helpers.event, "async_call_later"
) as call_later, patch.object(
weather_smhi.SmhiWeather,
"get_weather_forecast",
side_effect=SmhiForecastException(),
):
weather = weather_smhi.SmhiWeather("name", "17.0022", "62.0022")
weather.hass = hass
weather._fail_count = 2
await weather.async_update()
assert weather._forecasts is None
assert not call_later.mock_calls
async def test_refresh_weather_forecast_timeout(hass) -> None:
"""Test timeout exception."""
weather = weather_smhi.SmhiWeather("name", "17.0022", "62.0022")
weather.hass = hass
with patch.object(
hass.helpers.event, "async_call_later"
) as call_later, patch.object(
weather_smhi.SmhiWeather, "retry_update"
), patch.object(
weather_smhi.SmhiWeather,
"get_weather_forecast",
side_effect=asyncio.TimeoutError,
):
await weather.async_update()
assert len(call_later.mock_calls) == 1
# Assert we are going to wait RETRY_TIMEOUT seconds
assert call_later.mock_calls[0][1][0] == weather_smhi.RETRY_TIMEOUT
async def test_refresh_weather_forecast_exception() -> None:
"""Test any exception."""
hass = Mock()
weather = weather_smhi.SmhiWeather("name", "17.0022", "62.0022")
weather.hass = hass
with patch.object(
hass.helpers.event, "async_call_later"
) as call_later, patch.object(
weather,
"get_weather_forecast",
side_effect=SmhiForecastException(),
):
await weather.async_update()
assert len(call_later.mock_calls) == 1
# Assert we are going to wait RETRY_TIMEOUT seconds
assert call_later.mock_calls[0][1][0] == weather_smhi.RETRY_TIMEOUT
async def test_retry_update():
"""Test retry function of refresh forecast."""
hass = Mock()
weather = weather_smhi.SmhiWeather("name", "17.0022", "62.0022")
weather.hass = hass
with patch.object(weather, "async_update", AsyncMock()) as update:
await weather.retry_update(None)
assert len(update.mock_calls) == 1
def test_condition_class():
"""Test condition class."""
def get_condition(index: int) -> str:
"""Return condition given index."""
return [k for k, v in weather_smhi.CONDITION_CLASSES.items() if index in v][0]
# SMHI definitions as follows, see
# http://opendata.smhi.se/apidocs/metfcst/parameters.html
# 1. Clear sky
assert get_condition(1) == "sunny"
# 2. Nearly clear sky
assert get_condition(2) == "sunny"
# 3. Variable cloudiness
assert get_condition(3) == "partlycloudy"
# 4. Halfclear sky
assert get_condition(4) == "partlycloudy"
# 5. Cloudy sky
assert get_condition(5) == "cloudy"
# 6. Overcast
assert get_condition(6) == "cloudy"
# 7. Fog
assert get_condition(7) == "fog"
# 8. Light rain showers
assert get_condition(8) == "rainy"
# 9. Moderate rain showers
assert get_condition(9) == "rainy"
# 18. Light rain
assert get_condition(18) == "rainy"
# 19. Moderate rain
assert get_condition(19) == "rainy"
# 10. Heavy rain showers
assert get_condition(10) == "pouring"
# 20. Heavy rain
assert get_condition(20) == "pouring"
# 21. Thunder
assert get_condition(21) == "lightning"
# 11. Thunderstorm
assert get_condition(11) == "lightning-rainy"
# 15. Light snow showers
assert get_condition(15) == "snowy"
# 16. Moderate snow showers
assert get_condition(16) == "snowy"
# 17. Heavy snow showers
assert get_condition(17) == "snowy"
# 25. Light snowfall
assert get_condition(25) == "snowy"
# 26. Moderate snowfall
assert get_condition(26) == "snowy"
# 27. Heavy snowfall
assert get_condition(27) == "snowy"
# 12. Light sleet showers
assert get_condition(12) == "snowy-rainy"
# 13. Moderate sleet showers
assert get_condition(13) == "snowy-rainy"
# 14. Heavy sleet showers
assert get_condition(14) == "snowy-rainy"
# 22. Light sleet
assert get_condition(22) == "snowy-rainy"
# 23. Moderate sleet
assert get_condition(23) == "snowy-rainy"
# 24. Heavy sleet
assert get_condition(24) == "snowy-rainy"
|
import os
from requestbuilder import Arg
from euca2ools.commands.iam import IAMRequest, AS_ACCOUNT, arg_user
class CreateSigningCertificate(IAMRequest):
DESCRIPTION = '[Eucalyptus only] Create a new signing certificate'
ARGS = [arg_user(nargs='?', help='''user to create the signing
certificate for (default: current user)'''),
Arg('--out', metavar='FILE', route_to=None,
help='file to write the certificate to (default: stdout)'),
Arg('--keyout', metavar='FILE', route_to=None,
help='file to write the private key to (default: stdout)'),
AS_ACCOUNT]
def postprocess(self, result):
if self.args['out']:
with open(self.args['out'], 'w') as certfile:
certfile.write(result['Certificate']['CertificateBody'])
if self.args['keyout']:
old_umask = os.umask(0o077)
with open(self.args['keyout'], 'w') as keyfile:
keyfile.write(result['Certificate']['PrivateKey'])
os.umask(old_umask)
def print_result(self, result):
print result['Certificate']['CertificateId']
if not self.args['out']:
print result['Certificate']['CertificateBody']
if not self.args['keyout']:
print result['Certificate']['PrivateKey']
|
from requestbuilder import Arg
from euca2ools.commands.ec2 import EC2Request
class DeleteNetworkAclEntry(EC2Request):
DESCRIPTION = 'Delete a network acl rule'
ARGS = [Arg('NetworkAclId', metavar='NACL', help='''ID of the
network ACL to delete an entry from (required)'''),
Arg('-n', '--rule-number', dest='RuleNumber', required=True,
type=int, help='number of the entry to delete (required)'),
Arg('--egress', dest='Egress', action='store_true', help='''delete
an egress entry (default: delete an ingress entry)''')]
|
from __future__ import unicode_literals
import json
from django.test import TestCase, override_settings
from django.utils.http import urlquote
from django.core.urlresolvers import reverse
from django.contrib.auth.models import Permission
from django.core.files.uploadedfile import SimpleUploadedFile
from django.template.defaultfilters import filesizeformat
try:
from django.utils.http import RFC3986_SUBDELIMS
urlquote_safechars = RFC3986_SUBDELIMS + str('/~:@')
except ImportError: # < Django 1,8
urlquote_safechars = '/'
from wagtail.tests.utils import WagtailTestUtils
from wagtail.wagtailimages.utils import generate_signature
from .utils import Image, get_test_image_file
class TestImageIndexView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailimages:index'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/index.html')
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
def test_ordering(self):
orderings = ['title', '-created_at']
for ordering in orderings:
response = self.get({'ordering': ordering})
self.assertEqual(response.status_code, 200)
class TestImageAddView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailimages:add'), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailimages:add'), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/add.html')
def test_add(self):
response = self.post({
'title': "Test image",
'file': SimpleUploadedFile('test.png', get_test_image_file().file.getvalue()),
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailimages:index'))
# Check that the image was created
images = Image.objects.filter(title="Test image")
self.assertEqual(images.count(), 1)
# Test that size was populated correctly
image = images.first()
self.assertEqual(image.width, 640)
self.assertEqual(image.height, 480)
# Test that the file_size field was set
self.assertTrue(image.file_size)
def test_add_no_file_selected(self):
response = self.post({
'title': "Test image",
})
# Shouldn't redirect anywhere
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/add.html')
# The form should have an error
self.assertFormError(response, 'form', 'file', "This field is required.")
@override_settings(WAGTAILIMAGES_MAX_UPLOAD_SIZE=1)
def test_add_too_large_file(self):
file_content = get_test_image_file().file.getvalue()
response = self.post({
'title': "Test image",
'file': SimpleUploadedFile('test.png', file_content),
})
# Shouldn't redirect anywhere
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/add.html')
# The form should have an error
self.assertFormError(response, 'form', 'file', "This file is too big ({file_size}). Maximum filesize {max_file_size}.".format(
file_size=filesizeformat(len(file_content)),
max_file_size=filesizeformat(1),
))
class TestImageEditView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
# Create an image to edit
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def get(self, params={}):
return self.client.get(reverse('wagtailimages:edit', args=(self.image.id,)), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailimages:edit', args=(self.image.id,)), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/edit.html')
def test_edit(self):
response = self.post({
'title': "Edited",
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailimages:index'))
# Check that the image was edited
image = Image.objects.get(id=self.image.id)
self.assertEqual(image.title, "Edited")
def test_edit_with_new_image_file(self):
file_content = get_test_image_file().file.getvalue()
# Change the file size of the image
self.image.file_size = 100000
self.image.save()
response = self.post({
'title': "Edited",
'file': SimpleUploadedFile('new.png', file_content),
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailimages:index'))
# Check that the image file size changed (assume it changed to the correct value)
image = Image.objects.get(id=self.image.id)
self.assertNotEqual(image.file_size, 100000)
def test_with_missing_image_file(self):
self.image.file.delete(False)
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/edit.html')
class TestImageDeleteView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
# Create an image to edit
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def get(self, params={}):
return self.client.get(reverse('wagtailimages:delete', args=(self.image.id,)), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailimages:delete', args=(self.image.id,)), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/confirm_delete.html')
def test_delete(self):
response = self.post({
'hello': 'world'
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailimages:index'))
# Check that the image was deleted
images = Image.objects.filter(title="Test image")
self.assertEqual(images.count(), 0)
class TestImageChooserView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailimages:chooser'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/chooser/chooser.html')
self.assertTemplateUsed(response, 'wagtailimages/chooser/chooser.js')
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
class TestImageChooserChosenView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
# Create an image to edit
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def get(self, params={}):
return self.client.get(reverse('wagtailimages:image_chosen', args=(self.image.id,)), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/chooser/image_chosen.js')
# TODO: Test posting
class TestImageChooserUploadView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailimages:chooser_upload'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/chooser/chooser.html')
self.assertTemplateUsed(response, 'wagtailimages/chooser/chooser.js')
def test_upload(self):
response = self.client.post(reverse('wagtailimages:chooser_upload'), {
'title': "Test image",
'file': SimpleUploadedFile('test.png', get_test_image_file().file.getvalue()),
})
# Check response
self.assertEqual(response.status_code, 200)
# Check that the image was created
images = Image.objects.filter(title="Test image")
self.assertEqual(images.count(), 1)
# Test that size was populated correctly
image = images.first()
self.assertEqual(image.width, 640)
self.assertEqual(image.height, 480)
def test_upload_no_file_selected(self):
response = self.client.post(reverse('wagtailimages:chooser_upload'), {
'title': "Test image",
})
# Shouldn't redirect anywhere
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/chooser/chooser.html')
# The form should have an error
self.assertFormError(response, 'uploadform', 'file', "This field is required.")
class TestMultipleImageUploader(TestCase, WagtailTestUtils):
"""
This tests the multiple image upload views located in wagtailimages/views/multiple.py
"""
def setUp(self):
self.login()
# Create an image for running tests on
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def test_add(self):
"""
This tests that the add view responds correctly on a GET request
"""
# Send request
response = self.client.get(reverse('wagtailimages:add_multiple'))
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/multiple/add.html')
@override_settings(WAGTAILIMAGES_MAX_UPLOAD_SIZE=1000)
def test_add_max_file_size_context_variables(self):
response = self.client.get(reverse('wagtailimages:add_multiple'))
self.assertEqual(response.context['max_filesize'], 1000)
self.assertEqual(response.context['error_max_file_size'], "This file is too big. Maximum filesize 1000\xa0bytes.")
def test_add_post(self):
"""
This tests that a POST request to the add view saves the image and returns an edit form
"""
response = self.client.post(reverse('wagtailimages:add_multiple'), {
'files[]': SimpleUploadedFile('test.png', get_test_image_file().file.getvalue()),
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertTemplateUsed(response, 'wagtailimages/multiple/edit_form.html')
# Check image
self.assertIn('image', response.context)
self.assertEqual(response.context['image'].title, 'test.png')
self.assertTrue(response.context['image'].file_size)
# Check form
self.assertIn('form', response.context)
self.assertEqual(response.context['form'].initial['title'], 'test.png')
# Check JSON
response_json = json.loads(response.content.decode())
self.assertIn('image_id', response_json)
self.assertIn('form', response_json)
self.assertIn('success', response_json)
self.assertEqual(response_json['image_id'], response.context['image'].id)
self.assertTrue(response_json['success'])
def test_add_post_noajax(self):
"""
This tests that only AJAX requests are allowed to POST to the add view
"""
response = self.client.post(reverse('wagtailimages:add_multiple'), {})
# Check response
self.assertEqual(response.status_code, 400)
def test_add_post_nofile(self):
"""
This tests that the add view checks for a file when a user POSTs to it
"""
response = self.client.post(reverse('wagtailimages:add_multiple'), {}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 400)
def test_add_post_badfile(self):
"""
This tests that the add view checks for a file when a user POSTs to it
"""
response = self.client.post(reverse('wagtailimages:add_multiple'), {
'files[]': SimpleUploadedFile('test.png', b"This is not an image!"),
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
# Check JSON
response_json = json.loads(response.content.decode())
self.assertNotIn('image_id', response_json)
self.assertNotIn('form', response_json)
self.assertIn('success', response_json)
self.assertIn('error_message', response_json)
self.assertFalse(response_json['success'])
self.assertEqual(response_json['error_message'], "Not a supported image format. Supported formats: GIF, JPEG, PNG.")
def test_edit_get(self):
"""
This tests that a GET request to the edit view returns a 405 "METHOD NOT ALLOWED" response
"""
# Send request
response = self.client.get(reverse('wagtailimages:edit_multiple', args=(self.image.id, )))
# Check response
self.assertEqual(response.status_code, 405)
def test_edit_post(self):
"""
This tests that a POST request to the edit view edits the image
"""
# Send request
response = self.client.post(reverse('wagtailimages:edit_multiple', args=(self.image.id, )), {
('image-%d-title' % self.image.id): "New title!",
('image-%d-tags' % self.image.id): "",
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
# Check JSON
response_json = json.loads(response.content.decode())
self.assertIn('image_id', response_json)
self.assertNotIn('form', response_json)
self.assertIn('success', response_json)
self.assertEqual(response_json['image_id'], self.image.id)
self.assertTrue(response_json['success'])
def test_edit_post_noajax(self):
"""
This tests that a POST request to the edit view without AJAX returns a 400 response
"""
# Send request
response = self.client.post(reverse('wagtailimages:edit_multiple', args=(self.image.id, )), {
('image-%d-title' % self.image.id): "New title!",
('image-%d-tags' % self.image.id): "",
})
# Check response
self.assertEqual(response.status_code, 400)
def test_edit_post_validation_error(self):
"""
This tests that a POST request to the edit page returns a json document with "success=False"
and a form with the validation error indicated
"""
# Send request
response = self.client.post(reverse('wagtailimages:edit_multiple', args=(self.image.id, )), {
('image-%d-title' % self.image.id): "", # Required
('image-%d-tags' % self.image.id): "",
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertTemplateUsed(response, 'wagtailimages/multiple/edit_form.html')
# Check that a form error was raised
self.assertFormError(response, 'form', 'title', "This field is required.")
# Check JSON
response_json = json.loads(response.content.decode())
self.assertIn('image_id', response_json)
self.assertIn('form', response_json)
self.assertIn('success', response_json)
self.assertEqual(response_json['image_id'], self.image.id)
self.assertFalse(response_json['success'])
def test_delete_get(self):
"""
This tests that a GET request to the delete view returns a 405 "METHOD NOT ALLOWED" response
"""
# Send request
response = self.client.get(reverse('wagtailimages:delete_multiple', args=(self.image.id, )))
# Check response
self.assertEqual(response.status_code, 405)
def test_delete_post(self):
"""
This tests that a POST request to the delete view deletes the image
"""
# Send request
response = self.client.post(reverse('wagtailimages:delete_multiple', args=(self.image.id, )), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
# Make sure the image is deleted
self.assertFalse(Image.objects.filter(id=self.image.id).exists())
# Check JSON
response_json = json.loads(response.content.decode())
self.assertIn('image_id', response_json)
self.assertIn('success', response_json)
self.assertEqual(response_json['image_id'], self.image.id)
self.assertTrue(response_json['success'])
def test_delete_post_noajax(self):
"""
This tests that a POST request to the delete view without AJAX returns a 400 response
"""
# Send request
response = self.client.post(reverse('wagtailimages:delete_multiple', args=(self.image.id, )))
# Check response
self.assertEqual(response.status_code, 400)
class TestURLGeneratorView(TestCase, WagtailTestUtils):
def setUp(self):
# Create an image for running tests on
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
# Login
self.user = self.login()
def test_get(self):
"""
This tests that the view responds correctly for a user with edit permissions on this image
"""
# Get
response = self.client.get(reverse('wagtailimages:url_generator', args=(self.image.id, )))
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/url_generator.html')
def test_get_bad_permissions(self):
"""
This tests that the view gives a 403 if a user without correct permissions attemts to access it
"""
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Get
response = self.client.get(reverse('wagtailimages:url_generator', args=(self.image.id, )))
# Check response
self.assertEqual(response.status_code, 403)
class TestGenerateURLView(TestCase, WagtailTestUtils):
def setUp(self):
# Create an image for running tests on
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
# Login
self.user = self.login()
def test_get(self):
"""
This tests that the view responds correctly for a user with edit permissions on this image
"""
# Get
response = self.client.get(reverse('wagtailimages:generate_url', args=(self.image.id, 'fill-800x600')))
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
# Check JSON
content_json = json.loads(response.content.decode())
self.assertEqual(set(content_json.keys()), set(['url', 'preview_url']))
expected_url = 'http://localhost/images/%(signature)s/%(image_id)d/fill-800x600/' % {
'signature': urlquote(generate_signature(self.image.id, 'fill-800x600').decode(), safe=urlquote_safechars),
'image_id': self.image.id,
}
self.assertEqual(content_json['url'], expected_url)
expected_preview_url = reverse('wagtailimages:preview', args=(self.image.id, 'fill-800x600'))
self.assertEqual(content_json['preview_url'], expected_preview_url)
def test_get_bad_permissions(self):
"""
This tests that the view gives a 403 if a user without correct permissions attemts to access it
"""
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Get
response = self.client.get(reverse('wagtailimages:generate_url', args=(self.image.id, 'fill-800x600')))
# Check response
self.assertEqual(response.status_code, 403)
self.assertEqual(response['Content-Type'], 'application/json')
# Check JSON
self.assertJSONEqual(response.content.decode(), json.dumps({
'error': 'You do not have permission to generate a URL for this image.',
}))
def test_get_bad_image(self):
"""
This tests that the view gives a 404 response if a user attempts to use it with an image which doesn't exist
"""
# Get
response = self.client.get(reverse('wagtailimages:generate_url', args=(self.image.id + 1, 'fill-800x600')))
# Check response
self.assertEqual(response.status_code, 404)
self.assertEqual(response['Content-Type'], 'application/json')
# Check JSON
self.assertJSONEqual(response.content.decode(), json.dumps({
'error': 'Cannot find image.',
}))
def test_get_bad_filter_spec(self):
"""
This tests that the view gives a 400 response if the user attempts to use it with an invalid filter spec
"""
# Get
response = self.client.get(reverse('wagtailimages:generate_url', args=(self.image.id, 'bad-filter-spec')))
# Check response
self.assertEqual(response.status_code, 400)
self.assertEqual(response['Content-Type'], 'application/json')
# Check JSON
self.assertJSONEqual(response.content.decode(), json.dumps({
'error': 'Invalid filter spec.',
}))
class TestPreviewView(TestCase, WagtailTestUtils):
def setUp(self):
# Create an image for running tests on
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
# Login
self.user = self.login()
def test_get(self):
"""
Test a valid GET request to the view
"""
# Get the image
response = self.client.get(reverse('wagtailimages:preview', args=(self.image.id, 'fill-800x600')))
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'image/png')
def test_get_invalid_filter_spec(self):
"""
Test that an invalid filter spec returns a 400 response
This is very unlikely to happen in reality. A user would have
to create signature for the invalid filter spec which can't be
done with Wagtails built in URL generator. We should test it
anyway though.
"""
# Get the image
response = self.client.get(reverse('wagtailimages:preview', args=(self.image.id, 'bad-filter-spec')))
# Check response
self.assertEqual(response.status_code, 400)
|
from __future__ import absolute_import
input_name = '../examples/multi_physics/thermo_elasticity_ess.py'
output_name = 'test_thermo_elasticity_ess.vtk'
from tests_basic import TestInput
class Test(TestInput):
pass
|
def get_attributes_display_map(variant, attributes):
display = {}
for attribute in attributes:
value = variant.get_attribute(attribute.pk)
if value:
choices = {a.pk: a for a in attribute.values.all()}
attr = choices.get(value)
if attr:
display[attribute.pk] = attr
else:
display[attribute.pk] = value
return display
|
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../.."))
os.environ['DJANGO_SETTINGS_MODULE'] = 'oauth_provider.runtests.settings'
from django.conf import settings
from django.test.utils import get_runner
from south.management.commands import patch_for_test_db_setup
def usage():
return """
Usage: python runtests.py [UnitTestClass].[method]
You can pass the Class name of the `UnitTestClass` you want to test.
Append a method name if you only want to test a specific method of that class.
"""
def main():
TestRunner = get_runner(settings)
test_runner = TestRunner(verbosity=2)
if len(sys.argv) == 2:
test_case = '.' + sys.argv[1]
elif len(sys.argv) == 1:
test_case = ''
else:
print(usage())
sys.exit(1)
patch_for_test_db_setup()
failures = test_runner.run_tests(['tests' + test_case])
sys.exit(failures)
if __name__ == '__main__':
main()
|
import taxcalc
|
import cherrypy
from cherrypy.test import helper
class ETagTest(helper.CPWebCase):
def setup_server():
class Root:
def resource(self):
return "Oh wah ta goo Siam."
resource.exposed = True
def fail(self, code):
code = int(code)
if 300 <= code <= 399:
raise cherrypy.HTTPRedirect([], code)
else:
raise cherrypy.HTTPError(code)
fail.exposed = True
def unicoded(self):
return u'I am a \u1ee4nicode string.'
unicoded.exposed = True
unicoded._cp_config = {'tools.encode.on': True}
conf = {'/': {'tools.etags.on': True,
'tools.etags.autotags': True,
}}
cherrypy.tree.mount(Root(), config=conf)
setup_server = staticmethod(setup_server)
def test_etags(self):
self.getPage("/resource")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/html;charset=utf-8')
self.assertBody('Oh wah ta goo Siam.')
etag = self.assertHeader('ETag')
# Test If-Match (both valid and invalid)
self.getPage("/resource", headers=[('If-Match', etag)])
self.assertStatus("200 OK")
self.getPage("/resource", headers=[('If-Match', "*")])
self.assertStatus("200 OK")
self.getPage("/resource", headers=[('If-Match', "*")], method="POST")
self.assertStatus("200 OK")
self.getPage("/resource", headers=[('If-Match', "a bogus tag")])
self.assertStatus("412 Precondition Failed")
# Test If-None-Match (both valid and invalid)
self.getPage("/resource", headers=[('If-None-Match', etag)])
self.assertStatus(304)
self.getPage("/resource", method='POST', headers=[('If-None-Match', etag)])
self.assertStatus("412 Precondition Failed")
self.getPage("/resource", headers=[('If-None-Match', "*")])
self.assertStatus(304)
self.getPage("/resource", headers=[('If-None-Match', "a bogus tag")])
self.assertStatus("200 OK")
def test_errors(self):
self.getPage("/resource")
self.assertStatus(200)
etag = self.assertHeader('ETag')
# Test raising errors in page handler
self.getPage("/fail/412", headers=[('If-Match', etag)])
self.assertStatus(412)
self.getPage("/fail/304", headers=[('If-Match', etag)])
self.assertStatus(304)
self.getPage("/fail/412", headers=[('If-None-Match', "*")])
self.assertStatus(412)
self.getPage("/fail/304", headers=[('If-None-Match', "*")])
self.assertStatus(304)
def test_unicode_body(self):
self.getPage("/unicoded")
self.assertStatus(200)
etag1 = self.assertHeader('ETag')
self.getPage("/unicoded", headers=[('If-Match', etag1)])
self.assertStatus(200)
self.assertHeader('ETag', etag1)
|
import re
import urlparse
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
class SpeedyshareCom(SimpleHoster):
__name__ = "SpeedyshareCom"
__type__ = "hoster"
__version__ = "0.06"
__status__ = "testing"
__pattern__ = r'https?://(?:www\.)?(speedyshare\.com|speedy\.sh)/\w+'
__config__ = [("use_premium", "bool", "Use premium account if available", True)]
__description__ = """Speedyshare.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("zapp-brannigan", "fuerst.reinje@web.de")]
NAME_PATTERN = r'class=downloadfilename>(?P<N>.*)</span></td>'
SIZE_PATTERN = r'class=sizetagtext>(?P<S>.*) (?P<U>[kKmM]?[iI]?[bB]?)</div>'
OFFLINE_PATTERN = r'class=downloadfilenamenotfound>.*</span>'
LINK_FREE_PATTERN = r'<a href=\'(.*)\'><img src=/gf/slowdownload\.png alt=\'Slow Download\' border=0'
def setup(self):
self.multiDL = False
self.chunk_limit = 1
def handle_free(self, pyfile):
m = re.search(self.LINK_FREE_PATTERN, self.html)
if m is None:
self.link = m.group(1)
getInfo = create_getInfo(SpeedyshareCom)
|
{
'name': 'Online Members Directory',
'category': 'Website',
'summary': 'Publish your members directory',
'version': '1.0',
'description': """
Publish your members/association directory publicly.
""",
'depends': ['website_partner', 'website_google_map', 'association', 'website_sale'],
'data': [
'data/membership_data.xml',
'views/website_membership_templates.xml',
'security/ir.model.access.csv',
'security/website_membership.xml',
],
'demo': ['data/membership_demo.xml'],
'qweb': ['static/src/xml/*.xml'],
'installable': True,
}
|
from osv import fields, osv, orm
from tools.translate import _
from datetime import datetime
from datetime import timedelta
from tools.safe_eval import safe_eval
from tools import ustr
import pooler
import re
import time
import tools
def get_datetime(date_field):
'''Return a datetime from a date string or a datetime string'''
#complete date time if date_field contains only a date
date_split = date_field.split(' ')
if len(date_split) == 1:
date_field = date_split[0] + " 00:00:00"
return datetime.strptime(date_field[:19], '%Y-%m-%d %H:%M:%S')
class base_action_rule(osv.osv):
""" Base Action Rules """
_name = 'base.action.rule'
_description = 'Action Rules'
def _state_get(self, cr, uid, context=None):
""" Get State
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param context: A standard dictionary for contextual values """
return self.state_get(cr, uid, context=context)
def state_get(self, cr, uid, context=None):
""" Get State
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param context: A standard dictionary for contextual values """
return [('', '')]
def priority_get(self, cr, uid, context=None):
""" Get Priority
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param context: A standard dictionary for contextual values """
return [('', '')]
_columns = {
'name': fields.char('Rule Name', size=64, required=True),
'model_id': fields.many2one('ir.model', 'Object', required=True),
'create_date': fields.datetime('Create Date', readonly=1),
'active': fields.boolean('Active', help="If the active field is set to False,\
it will allow you to hide the rule without removing it."),
'sequence': fields.integer('Sequence', help="Gives the sequence order \
when displaying a list of rules."),
'trg_date_type': fields.selection([
('none', 'None'),
('create', 'Creation Date'),
('action_last', 'Last Action Date'),
('date', 'Date'),
('deadline', 'Deadline'),
], 'Trigger Date', size=16),
'trg_date_range': fields.integer('Delay after trigger date', \
help="Delay After Trigger Date,\
specifies you can put a negative number. If you need a delay before the \
trigger date, like sending a reminder 15 minutes before a meeting."),
'trg_date_range_type': fields.selection([('minutes', 'Minutes'), ('hour', 'Hours'), \
('day', 'Days'), ('month', 'Months')], 'Delay type'),
'trg_user_id': fields.many2one('res.users', 'Responsible'),
'trg_partner_id': fields.many2one('res.partner', 'Partner'),
'trg_partner_categ_id': fields.many2one('res.partner.category', 'Partner Category'),
'trg_state_from': fields.selection(_state_get, 'State', size=16),
'trg_state_to': fields.selection(_state_get, 'Button Pressed', size=16),
'act_method': fields.char('Call Object Method', size=64),
'act_user_id': fields.many2one('res.users', 'Set Responsible to'),
'act_state': fields.selection(_state_get, 'Set State to', size=16),
'act_email_cc': fields.char('Add Watchers (Cc)', size=250, help="\
These people will receive a copy of the future communication between partner \
and users by email"),
'act_remind_partner': fields.boolean('Remind Partner', help="Check \
this if you want the rule to send a reminder by email to the partner."),
'act_remind_user': fields.boolean('Remind Responsible', help="Check \
this if you want the rule to send a reminder by email to the user."),
'act_reply_to': fields.char('Reply-To', size=64),
'act_remind_attach': fields.boolean('Remind with Attachment', help="Check this if you want that all documents attached to the object be attached to the reminder email sent."),
'act_mail_to_user': fields.boolean('Mail to Responsible', help="Check\
this if you want the rule to send an email to the responsible person."),
'act_mail_to_watchers': fields.boolean('Mail to Watchers (CC)',
help="Check this if you want \
the rule to mark CC(mail to any other person defined in actions)."),
'act_mail_to_email': fields.char('Mail to these Emails', size=128, \
help="Email-id of the persons whom mail is to be sent"),
'act_mail_body': fields.text('Mail body', help="Content of mail"),
'regex_name': fields.char('Regex on Resource Name', size=128, help="Regular expression for matching name of the resource\
\ne.g.: 'urgent.*' will search for records having name starting with the string 'urgent'\
\nNote: This is case sensitive search."),
'server_action_id': fields.many2one('ir.actions.server', 'Server Action', help="Describes the action name.\neg:on which object which action to be taken on basis of which condition"),
'filter_id':fields.many2one('ir.filters', 'Filter', required=False),
'act_email_from' : fields.char('Email From', size=64, required=False,
help="Use a python expression to specify the right field on which one than we will use for the 'From' field of the header"),
'act_email_to' : fields.char('Email To', size=64, required=False,
help="Use a python expression to specify the right field on which one than we will use for the 'To' field of the header"),
'last_run': fields.datetime('Last Run', readonly=1),
}
_defaults = {
'active': lambda *a: True,
'trg_date_type': lambda *a: 'none',
'trg_date_range_type': lambda *a: 'day',
'act_mail_to_user': lambda *a: 0,
'act_remind_partner': lambda *a: 0,
'act_remind_user': lambda *a: 0,
'act_mail_to_watchers': lambda *a: 0,
}
_order = 'sequence'
def onchange_model_id(self, cr, uid, ids, name):
#This is not a good solution as it will affect the domain only on onchange
res = {'domain':{'filter_id':[]}}
if name:
model_name = self.pool.get('ir.model').read(cr, uid, [name], ['model'])
if model_name:
mod_name = model_name[0]['model']
res['domain'] = {'filter_id': [('model_id','=',mod_name)]}
else:
res['value'] = {'filter_id':False}
return res
def post_action(self, cr, uid, ids, model, context=None):
# Searching for action rules
cr.execute("SELECT model.model, rule.id FROM base_action_rule rule \
LEFT JOIN ir_model model on (model.id = rule.model_id) \
WHERE active")
res = cr.fetchall()
# Check if any rule matching with current object
for obj_name, rule_id in res:
if not (model == obj_name):
continue # TODO add this condition in the WHERE clause above.
else:
obj = self.pool.get(obj_name)
# If the rule doesn't involve a time condition, run it immediately
# Otherwise we let the scheduler run the action
if self.browse(cr, uid, rule_id, context=context).trg_date_type == 'none':
self._action(cr, uid, [rule_id], obj.browse(cr, uid, ids, context=context), context=context)
return True
def _create(self, old_create, model, context=None):
"""
Return a wrapper around `old_create` calling both `old_create` and
`post_action`, in that order.
"""
def wrapper(cr, uid, vals, context=context):
if context is None:
context = {}
new_id = old_create(cr, uid, vals, context=context)
if not context.get('action'):
self.post_action(cr, uid, [new_id], model, context=context)
return new_id
return wrapper
def _write(self, old_write, model, context=None):
"""
Return a wrapper around `old_write` calling both `old_write` and
`post_action`, in that order.
"""
def wrapper(cr, uid, ids, vals, context=context):
if context is None:
context = {}
if isinstance(ids, (str, int, long)):
ids = [ids]
old_write(cr, uid, ids, vals, context=context)
if not context.get('action'):
self.post_action(cr, uid, ids, model, context=context)
return True
return wrapper
def _register_hook(self, cr, uid, ids, context=None):
"""
Wrap every `create` and `write` methods of the models specified by
the rules (given by `ids`).
"""
for action_rule in self.browse(cr, uid, ids, context=context):
model = action_rule.model_id.model
obj_pool = self.pool.get(model)
if not hasattr(obj_pool, 'base_action_ruled'):
obj_pool.create = self._create(obj_pool.create, model, context=context)
obj_pool.write = self._write(obj_pool.write, model, context=context)
obj_pool.base_action_ruled = True
return True
def create(self, cr, uid, vals, context=None):
res_id = super(base_action_rule, self).create(cr, uid, vals, context=context)
self._register_hook(cr, uid, [res_id], context=context)
return res_id
def write(self, cr, uid, ids, vals, context=None):
super(base_action_rule, self).write(cr, uid, ids, vals, context=context)
self._register_hook(cr, uid, ids, context=context)
return True
def _check(self, cr, uid, automatic=False, use_new_cursor=False, \
context=None):
"""
This Function is call by scheduler.
"""
rule_pool = self.pool.get('base.action.rule')
rule_ids = rule_pool.search(cr, uid, [], context=context)
self._register_hook(cr, uid, rule_ids, context=context)
rules = self.browse(cr, uid, rule_ids, context=context)
for rule in rules:
model = rule.model_id.model
model_pool = self.pool.get(model)
last_run = False
if rule.last_run:
last_run = get_datetime(rule.last_run)
now = datetime.now()
for obj_id in model_pool.search(cr, uid, [], context=context):
obj = model_pool.browse(cr, uid, obj_id, context=context)
# Calculate when this action should next occur for this object
base = False
if rule.trg_date_type=='create' and hasattr(obj, 'create_date'):
base = obj.create_date
elif (rule.trg_date_type=='action_last'
and hasattr(obj, 'create_date')):
if hasattr(obj, 'date_action_last') and obj.date_action_last:
base = obj.date_action_last
else:
base = obj.create_date
elif (rule.trg_date_type=='deadline'
and hasattr(obj, 'date_deadline')
and obj.date_deadline):
base = obj.date_deadline
elif (rule.trg_date_type=='date'
and hasattr(obj, 'date')
and obj.date):
base = obj.date
if base:
fnct = {
'minutes': lambda interval: timedelta(minutes=interval),
'day': lambda interval: timedelta(days=interval),
'hour': lambda interval: timedelta(hours=interval),
'month': lambda interval: timedelta(months=interval),
}
base = get_datetime(base)
delay = fnct[rule.trg_date_range_type](rule.trg_date_range)
action_date = base + delay
if (not last_run or (last_run <= action_date < now)):
self._action(cr, uid, [rule.id], [obj], context=context)
rule_pool.write(cr, uid, [rule.id], {'last_run': now},
context=context)
def format_body(self, body):
""" Foramat Action rule's body
@param self: The object pointer """
return body and tools.ustr(body) or ''
def format_mail(self, obj, body):
data = {
'object_id': obj.id,
'object_subject': hasattr(obj, 'name') and obj.name or False,
'object_date': hasattr(obj, 'date') and obj.date or False,
'object_description': hasattr(obj, 'description') and obj.description or False,
'object_user': hasattr(obj, 'user_id') and (obj.user_id and obj.user_id.name) or '/',
'object_user_email': hasattr(obj, 'user_id') and (obj.user_id and \
obj.user_id.user_email) or '/',
'object_user_phone': hasattr(obj, 'partner_address_id') and (obj.partner_address_id and \
obj.partner_address_id.phone) or '/',
'partner': hasattr(obj, 'partner_id') and (obj.partner_id and obj.partner_id.name) or '/',
'partner_email': hasattr(obj, 'partner_address_id') and (obj.partner_address_id and\
obj.partner_address_id.email) or '/',
}
return self.format_body(body % data)
def email_send(self, cr, uid, obj, emails, body, emailfrom=None, context=None):
""" send email
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param email: pass the emails
@param emailfrom: Pass name the email From else False
@param context: A standard dictionary for contextual values """
if not emailfrom:
emailfrom = tools.config.get('email_from', False)
if context is None:
context = {}
mail_message = self.pool.get('mail.message')
body = self.format_mail(obj, body)
if not emailfrom:
if hasattr(obj, 'user_id') and obj.user_id and obj.user_id.user_email:
emailfrom = obj.user_id.user_email
name = '[%d] %s' % (obj.id, tools.ustr(obj.name))
emailfrom = tools.ustr(emailfrom)
reply_to = emailfrom
if not emailfrom:
raise osv.except_osv(_('Error!'),
_("No E-Mail ID Found for your Company address!"))
return mail_message.schedule_with_attach(cr, uid, emailfrom, emails, name, body, model='base.action.rule', reply_to=reply_to, res_id=obj.id)
def do_check(self, cr, uid, action, obj, context=None):
""" check Action
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param context: A standard dictionary for contextual values """
if context is None:
context = {}
ok = True
if action.filter_id:
if action.model_id.model == action.filter_id.model_id:
context.update(eval(action.filter_id.context))
obj_ids = obj._table.search(cr, uid, eval(action.filter_id.domain), context=context)
if not obj.id in obj_ids:
ok = False
else:
ok = False
if getattr(obj, 'user_id', False):
ok = ok and (not action.trg_user_id.id or action.trg_user_id.id==obj.user_id.id)
if getattr(obj, 'partner_id', False):
ok = ok and (not action.trg_partner_id.id or action.trg_partner_id.id==obj.partner_id.id)
ok = ok and (
not action.trg_partner_categ_id.id or
(
obj.partner_id.id and
(action.trg_partner_categ_id.id in map(lambda x: x.id, obj.partner_id.category_id or []))
)
)
state_to = context.get('state_to', False)
state = getattr(obj, 'state', False)
if state:
ok = ok and (not action.trg_state_from or action.trg_state_from==state)
if state_to:
ok = ok and (not action.trg_state_to or action.trg_state_to==state_to)
elif action.trg_state_to:
ok = False
reg_name = action.regex_name
result_name = True
if reg_name:
ptrn = re.compile(ustr(reg_name))
_result = ptrn.search(ustr(obj.name))
if not _result:
result_name = False
regex_n = not reg_name or result_name
ok = ok and regex_n
return ok
def do_action(self, cr, uid, action, model_obj, obj, context=None):
""" Do Action
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param action: pass action
@param model_obj: pass Model object
@param context: A standard dictionary for contextual values """
if context is None:
context = {}
if action.server_action_id:
context.update({'active_id': obj.id, 'active_ids': [obj.id], 'active_model': obj._name})
self.pool.get('ir.actions.server').run(cr, uid, [action.server_action_id.id], context)
write = {}
if hasattr(obj, 'user_id') and action.act_user_id:
obj.user_id = action.act_user_id
write['user_id'] = action.act_user_id.id
if hasattr(obj, 'date_action_last'):
write['date_action_last'] = time.strftime('%Y-%m-%d %H:%M:%S')
if hasattr(obj, 'state') and action.act_state:
obj.state = action.act_state
write['state'] = action.act_state
if hasattr(obj, 'categ_id') and action.act_categ_id:
obj.categ_id = action.act_categ_id
write['categ_id'] = action.act_categ_id.id
model_obj.write(cr, uid, [obj.id], write, context)
if hasattr(model_obj, 'remind_user') and action.act_remind_user:
model_obj.remind_user(cr, uid, [obj.id], context, attach=action.act_remind_attach)
if hasattr(model_obj, 'remind_partner') and action.act_remind_partner:
model_obj.remind_partner(cr, uid, [obj.id], context, attach=action.act_remind_attach)
if action.act_method:
getattr(model_obj, 'act_method')(cr, uid, [obj.id], action, context)
emails = []
if hasattr(obj, 'user_id') and action.act_mail_to_user:
if obj.user_id:
emails.append(obj.user_id.user_email)
if action.act_mail_to_watchers:
emails += (action.act_email_cc or '').split(',')
if action.act_mail_to_email:
emails += (action.act_mail_to_email or '').split(',')
locals_for_emails = {
'user' : self.pool.get('res.users').browse(cr, uid, uid, context=context),
'obj' : obj,
}
if action.act_email_to:
emails.append(safe_eval(action.act_email_to, {}, locals_for_emails))
emails = filter(None, emails)
if len(emails) and action.act_mail_body:
emails = list(set(emails))
email_from = safe_eval(action.act_email_from, {}, locals_for_emails)
def to_email(text):
return re.findall(r'([^ ,<@]+@[^> ,]+)', text or '')
emails = to_email(','.join(filter(None, emails)))
email_froms = to_email(email_from)
if email_froms:
self.email_send(cr, uid, obj, emails, action.act_mail_body, emailfrom=email_froms[0])
return True
def _action(self, cr, uid, ids, objects, scrit=None, context=None):
""" Do Action
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Basic Action Rule’s IDs,
@param objects: pass objects
@param context: A standard dictionary for contextual values """
if context is None:
context = {}
context.update({'action': True})
if not scrit:
scrit = []
for action in self.browse(cr, uid, ids, context=context):
for obj in objects:
if self.do_check(cr, uid, action, obj, context=context):
model_obj = self.pool.get(action.model_id.model)
self.do_action(cr, uid, action, model_obj, obj, context=context)
context.update({'action': False})
return True
def _check_mail(self, cr, uid, ids, context=None):
""" Check Mail
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Action Rule’s IDs
@param context: A standard dictionary for contextual values """
empty = orm.browse_null()
rule_obj = self.pool.get('base.action.rule')
for rule in self.browse(cr, uid, ids, context=context):
if rule.act_mail_body:
try:
rule_obj.format_mail(empty, rule.act_mail_body)
except (ValueError, KeyError, TypeError):
return False
return True
_constraints = [
(_check_mail, 'Error: The mail is not well formated', ['act_mail_body']),
]
base_action_rule()
class ir_cron(osv.osv):
_inherit = 'ir.cron'
_init_done = False
def _poolJobs(self, db_name, check=False):
if not self._init_done:
self._init_done = True
try:
db = pooler.get_db(db_name)
except:
return False
cr = db.cursor()
try:
next = datetime.now().strftime('%Y-%m-%d %H:00:00')
# Putting nextcall always less than current time in order to call it every time
cr.execute('UPDATE ir_cron set nextcall = \'%s\' where numbercall<>0 and active and model=\'base.action.rule\' ' % (next))
finally:
cr.commit()
cr.close()
super(ir_cron, self)._poolJobs(db_name, check=check)
ir_cron()
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('instructor_task', '0002_gradereportsetting'),
]
operations = [
migrations.AlterField(
model_name='instructortask',
name='task_input',
field=models.TextField(),
),
]
|
from spack import *
class DhpmmF(MakefilePackage):
"""DHPMM_P:High-precision Matrix Multiplication with Faithful Rounding"""
homepage = "http://www.math.twcu.ac.jp/ogita/post-k/"
url = "http://www.math.twcu.ac.jp/ogita/post-k/software/DHPMM_F/DHPMM_F_alpha.tar.gz"
version('alpha', sha256='35321ecbc749f2682775ffcd27833afc8c8eb4fa7753ce769727c9d1fe097848')
depends_on('blas', type='link')
depends_on('lapack', type='link')
def patch(self):
math_libs = self.spec['lapack'].libs + self.spec['blas'].libs
makefile = FileFilter('Makefile')
if self.spec.satisfies('%gcc'):
makefile.filter(r'^MKL\s+=\s1', 'MKL=0')
makefile.filter(r'^CC\s+=\sgcc',
'CC={0}'.format(spack_cc))
makefile.filter(r'^CXX\s+=\sg\+\+',
'CXX={0}'.format(spack_cxx))
makefile.filter(r'^BLASLIBS\s+=\s-llapack\s-lblas',
'BLASLIBS={0}'.format(math_libs.ld_flags))
elif self.spec.satisfies('%fj'):
makefile.filter(r'^#ENV\s+=\sFX100', 'ENV=FX100')
makefile.filter(r'^ENV\s+=\sGCC', '#ENV=GCC')
makefile.filter(r'^MKL\s+=\s1', 'MKL=0')
makefile.filter(r'^CC\s+=\sfccpx',
'CC={0}'.format(spack_cc))
makefile.filter(r'^CXX\s+=\sFCCpx',
'CXX={0}'.format(spack_cxx))
makefile.filter(r'^BLASLIBS\s+=\s-llapack\s-lblas',
'BLASLIBS={0}'.format(math_libs.ld_flags))
elif self.spec.satisfies('%intel'):
makefile.filter(r'^ENV\s+=\sGCC', '#ENV=GCC')
makefile.filter(r'^ENV\s+=\sICC', 'ENV=ICC')
makefile.filter(r'^CC\s+=\sicc',
'CC={0}'.format(spack_cc))
makefile.filter(r'^CXX\s+=\sicc',
'CXX={0}'.format(spack_cxx))
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('test/source4_SpMV', prefix.bin)
|
from spack import *
class Minigan(Package):
"""miniGAN is a generative adversarial network code developed as part of the
Exascale Computing Project's (ECP) ExaLearn project at
Sandia National Laboratories."""
homepage = "https://github.com/SandiaMLMiniApps/miniGAN"
url = "https://github.com/SandiaMLMiniApps/miniGAN/archive/1.0.0.tar.gz"
version('1.0.0', sha256='ef6d5def9c7040af520acc64b7a8b6c8ec4b7901721b11b0cb25a583ea0c8ae3')
depends_on('python', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-torch', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-horovod@master', type=('build', 'run'))
depends_on('py-torchvision', type=('build', 'run'))
depends_on('py-matplotlib@3.0.0', type=('build', 'run'))
def install(self, spec, prefix):
install_tree('.', prefix)
|
import unittest
import sys
from PySide.QtCore import QObject, SIGNAL, QUrl
from PySide.QtWebKit import *
from PySide.QtNetwork import QNetworkRequest
from helper import adjust_filename, UsesQApplication
class TestWebFrame(UsesQApplication):
def load_finished(self, ok):
self.assert_(ok)
page = self.view.page()
self.assert_(page)
frame = page.mainFrame()
self.assert_(frame)
meta = frame.metaData()
self.assertEqual(meta['description'], ['PySide Test METADATA.'])
self.app.quit()
def testMetaData(self):
self.view = QWebView()
QObject.connect(self.view, SIGNAL('loadFinished(bool)'),
self.load_finished)
url = QUrl.fromLocalFile(adjust_filename('fox.html', __file__))
self.view.setUrl(url)
self.app.exec_()
if __name__ == '__main__':
unittest.main()
|
"""0.3.0 to 0.4.0
Revision ID: 0.3.0
Revises:
"""
revision = '0.4.0'
down_revision = '0.3.0'
branch_labels = None
depends_on = None
from alembic import op
from db_meta import *
from sqlalchemy.dialects import mysql
def upgrade():
"""
update schema&data
"""
bind = op.get_bind()
#alter column user.username, alter column user.email, project.name and add column replication_policy.deleted
op.alter_column('user', 'username', type_=sa.String(32), existing_type=sa.String(15))
op.alter_column('user', 'email', type_=sa.String(255), existing_type=sa.String(128))
op.alter_column('project', 'name', type_=sa.String(41), existing_type=sa.String(30), nullable=False)
op.alter_column('replication_target', 'password', type_=sa.String(128), existing_type=sa.String(40))
op.add_column('replication_policy', sa.Column('deleted', mysql.TINYINT(1), nullable=False, server_default=sa.text("'0'")))
#create index pid_optime (project_id, op_time) on table access_log, poid_uptime (policy_id, update_time) on table replication_job
op.create_index('pid_optime', 'access_log', ['project_id', 'op_time'])
op.create_index('poid_uptime', 'replication_job', ['policy_id', 'update_time'])
#create tables: repository
Repository.__table__.create(bind)
def downgrade():
"""
Downgrade has been disabled.
"""
pass
|
from __future__ import absolute_import, unicode_literals, division
import random
import string
import timeit
import os
import zipfile
import datrie
def words100k():
zip_name = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'words100k.txt.zip'
)
zf = zipfile.ZipFile(zip_name)
txt = zf.open(zf.namelist()[0]).read().decode('utf8')
return txt.splitlines()
def random_words(num):
russian = 'абвгдеёжзиклмнопрстуфхцчъыьэюя'
alphabet = russian + string.ascii_letters
return [
"".join([random.choice(alphabet) for x in range(random.randint(1,15))])
for y in range(num)
]
def truncated_words(words):
return [word[:3] for word in words]
def prefixes1k(words, prefix_len):
words = [w for w in words if len(w) >= prefix_len]
every_nth = int(len(words)/1000)
_words = [w[:prefix_len] for w in words[::every_nth]]
return _words[:1000]
WORDS100k = words100k()
MIXED_WORDS100k = truncated_words(WORDS100k)
NON_WORDS100k = random_words(100000)
PREFIXES_3_1k = prefixes1k(WORDS100k, 3)
PREFIXES_5_1k = prefixes1k(WORDS100k, 5)
PREFIXES_8_1k = prefixes1k(WORDS100k, 8)
PREFIXES_15_1k = prefixes1k(WORDS100k, 15)
def _alphabet(words):
chars = set()
for word in words:
for ch in word:
chars.add(ch)
return "".join(sorted(list(chars)))
ALPHABET = _alphabet(WORDS100k)
def bench(name, timer, descr='M ops/sec', op_count=0.1, repeats=3, runs=5):
times = []
for x in range(runs):
times.append(timer.timeit(repeats))
def op_time(time):
return op_count*repeats / time
print("%55s: %0.3f%s" % (
name,
op_time(min(times)),
descr,
))
def create_trie():
words = words100k()
trie = datrie.Trie(ALPHABET)
for word in words:
trie[word] = 1
return trie
def benchmark():
print('\n====== Benchmarks (100k unique unicode words) =======\n')
tests = [
('__getitem__ (hits)', "for word in words: data[word]", 'M ops/sec', 0.1, 3),
('__contains__ (hits)', "for word in words: word in data", 'M ops/sec', 0.1, 3),
('__contains__ (misses)', "for word in NON_WORDS100k: word in data", 'M ops/sec', 0.1, 3),
('__len__', 'len(data)', ' ops/sec', 1, 1),
('__setitem__ (updates)', 'for word in words: data[word]=1', 'M ops/sec', 0.1, 3),
('__setitem__ (inserts, random)', 'for word in NON_WORDS_10k: data[word]=1', 'M ops/sec',0.01, 3),
('__setitem__ (inserts, sorted)', 'for word in words: empty_data[word]=1', 'M ops/sec', 0.1, 3),
('setdefault (updates)', 'for word in words: data.setdefault(word, 1)', 'M ops/sec', 0.1, 3),
('setdefault (inserts)', 'for word in NON_WORDS_10k: data.setdefault(word, 1)', 'M ops/sec', 0.01, 3),
('values()', 'list(data.values())', ' ops/sec', 1, 1),
('keys()', 'list(data.keys())', ' ops/sec', 1, 1),
('items()', 'list(data.items())', ' ops/sec', 1, 1),
]
common_setup = """
from __main__ import create_trie, WORDS100k, NON_WORDS100k, MIXED_WORDS100k, datrie
from __main__ import PREFIXES_3_1k, PREFIXES_5_1k, PREFIXES_8_1k, PREFIXES_15_1k
from __main__ import ALPHABET
words = WORDS100k
NON_WORDS_10k = NON_WORDS100k[:10000]
NON_WORDS_1k = ['ыва', 'xyz', 'соы', 'Axx', 'avы']*200
"""
dict_setup = common_setup + 'data = dict((word, 1) for word in words); empty_data=dict()'
trie_setup = common_setup + 'data = create_trie(); empty_data = datrie.Trie(ALPHABET)'
for test_name, test, descr, op_count, repeats in tests:
t_dict = timeit.Timer(test, dict_setup)
t_trie = timeit.Timer(test, trie_setup)
bench('dict '+test_name, t_dict, descr, op_count, repeats)
bench('trie '+test_name, t_trie, descr, op_count, repeats)
# trie-specific benchmarks
bench(
'trie.iter_prefix_values (hits)',
timeit.Timer(
"for word in words:\n"
" for it in data.iter_prefix_values(word):\n"
" pass",
trie_setup
),
)
bench(
'trie.prefix_values (hits)',
timeit.Timer(
"for word in words: data.prefix_values(word)",
trie_setup
)
)
bench(
'trie.prefix_values loop (hits)',
timeit.Timer(
"for word in words:\n"
" for it in data.prefix_values(word):pass",
trie_setup
)
)
bench(
'trie.iter_prefix_items (hits)',
timeit.Timer(
"for word in words:\n"
" for it in data.iter_prefix_items(word):\n"
" pass",
trie_setup
),
)
bench(
'trie.prefix_items (hits)',
timeit.Timer(
"for word in words: data.prefix_items(word)",
trie_setup
)
)
bench(
'trie.prefix_items loop (hits)',
timeit.Timer(
"for word in words:\n"
" for it in data.prefix_items(word):pass",
trie_setup
)
)
bench(
'trie.iter_prefixes (hits)',
timeit.Timer(
"for word in words:\n"
" for it in data.iter_prefixes(word): pass",
trie_setup
)
)
bench(
'trie.iter_prefixes (misses)',
timeit.Timer(
"for word in NON_WORDS100k:\n"
" for it in data.iter_prefixes(word): pass",
trie_setup
)
)
bench(
'trie.iter_prefixes (mixed)',
timeit.Timer(
"for word in MIXED_WORDS100k:\n"
" for it in data.iter_prefixes(word): pass",
trie_setup
)
)
bench(
'trie.has_keys_with_prefix (hits)',
timeit.Timer(
"for word in words: data.has_keys_with_prefix(word)",
trie_setup
)
)
bench(
'trie.has_keys_with_prefix (misses)',
timeit.Timer(
"for word in NON_WORDS100k: data.has_keys_with_prefix(word)",
trie_setup
)
)
for meth in ('longest_prefix', 'longest_prefix_item', 'longest_prefix_value'):
bench(
'trie.%s (hits)' % meth,
timeit.Timer(
"for word in words: data.%s(word)" % meth,
trie_setup
)
)
bench(
'trie.%s (misses)' % meth,
timeit.Timer(
"for word in NON_WORDS100k: data.%s(word, default=None)" % meth,
trie_setup
)
)
bench(
'trie.%s (mixed)' % meth,
timeit.Timer(
"for word in MIXED_WORDS100k: data.%s(word, default=None)" % meth,
trie_setup
)
)
prefix_data = [
('xxx', 'avg_len(res)==415', 'PREFIXES_3_1k'),
('xxxxx', 'avg_len(res)==17', 'PREFIXES_5_1k'),
('xxxxxxxx', 'avg_len(res)==3', 'PREFIXES_8_1k'),
('xxxxx..xx', 'avg_len(res)==1.4', 'PREFIXES_15_1k'),
('xxx', 'NON_EXISTING', 'NON_WORDS_1k'),
]
for xxx, avg, data in prefix_data:
for meth in ('items', 'keys', 'values'):
bench(
'trie.%s(prefix="%s"), %s' % (meth, xxx, avg),
timeit.Timer(
"for word in %s: data.%s(word)" % (data, meth),
trie_setup
),
'K ops/sec',
op_count=1,
)
def profiling():
print('\n====== Profiling =======\n')
def profile_yep():
import yep
trie = create_trie()
#WORDS = words100k()
yep.start(b'output.prof')
for x in range(100):
trie.keys()
yep.stop()
def profile_cprofile():
import pstats
import cProfile
trie = create_trie()
WORDS = words100k()
def check_trie(trie, words):
value = 0
for word in words:
value += trie[word]
if value != len(words):
raise Exception()
cProfile.runctx("check_trie(trie, WORDS)", globals(), locals(), "Profile.prof")
s = pstats.Stats("Profile.prof")
s.strip_dirs().sort_stats("time").print_stats(20)
#profile_cprofile()
profile_yep()
if __name__ == '__main__':
benchmark()
#profiling()
#memory()
print('\n~~~~~~~~~~~~~~\n')
|
"""Tests for `tf.data.Dataset.from_sparse_tensor_slices()`."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class FromSparseTensorSlicesTest(test_base.DatasetTestBase,
parameterized.TestCase):
@combinations.generate(
combinations.times(
combinations.combine(tf_api_version=1, mode=["graph"]),
combinations.combine(slices=[[
[1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [], []
], [[1., 2.], [], [1., 2.], [1.], [1., 2.], [], [1., 2.]]])))
def testFromSparseTensorSlices(self, slices):
"""Test a dataset based on slices of a `tf.sparse.SparseTensor`."""
st = array_ops.sparse_placeholder(dtypes.float64)
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_sparse_tensor_slices(st))
init_op = iterator.initializer
get_next = sparse_tensor.SparseTensor(*iterator.get_next())
with self.cached_session() as sess:
# Test with sparse tensor in the appropriate order.
# pylint: disable=g-complex-comprehension
indices = np.array(
[[i, j] for i in range(len(slices)) for j in range(len(slices[i]))])
values = np.array([val for s in slices for val in s])
# pylint: enable=g-complex-comprehension
dense_shape = np.array([len(slices), max(len(s) for s in slices) + 1])
sparse_feed = sparse_tensor.SparseTensorValue(indices, values,
dense_shape)
sess.run(init_op, feed_dict={st: sparse_feed})
for i, s in enumerate(slices):
results = sess.run(get_next)
self.assertAllEqual(s, results.values)
expected_indices = np.array(
[[j] for j in range(len(slices[i]))]).reshape([-1, 1])
self.assertAllEqual(expected_indices, results.indices)
self.assertAllEqual(dense_shape[1:], results.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@combinations.generate(
combinations.times(
combinations.combine(tf_api_version=1, mode=["graph"]),
combinations.combine(slices=[[
[1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [], []
], [[1., 2.], [], [1., 2.], [1.], [1., 2.], [], [1., 2.]]])))
def testFromSparseTensorSlicesInReverse(self, slices):
"""Test a dataset based on slices of a `tf.sparse.SparseTensor` in reverse order."""
st = array_ops.sparse_placeholder(dtypes.float64)
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_sparse_tensor_slices(st))
init_op = iterator.initializer
with self.cached_session() as sess:
# pylint: disable=g-complex-comprehension
indices = np.array(
[[i, j] for i in range(len(slices)) for j in range(len(slices[i]))])
values = np.array([val for s in slices for val in s])
# pylint: enable=g-complex-comprehension
dense_shape = np.array([len(slices), max(len(s) for s in slices) + 1])
# Test with sparse tensor in the reverse order, which is not
# currently supported.
reverse_order_indices = indices[::-1, :]
reverse_order_values = values[::-1]
sparse_feed = sparse_tensor.SparseTensorValue(
reverse_order_indices, reverse_order_values, dense_shape)
with self.assertRaises(errors.UnimplementedError):
sess.run(init_op, feed_dict={st: sparse_feed})
@combinations.generate(combinations.combine(tf_api_version=1, mode=["graph"]))
def testEmptySparseTensorSlices(self):
"""Test a dataset based on slices of an empty `tf.sparse.SparseTensor`."""
st = array_ops.sparse_placeholder(dtypes.float64)
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_sparse_tensor_slices(st))
init_op = iterator.initializer
get_next = sparse_tensor.SparseTensor(*iterator.get_next())
with self.cached_session() as sess:
# Test with an empty sparse tensor.
empty_indices = np.empty((0, 4), dtype=np.int64)
empty_values = np.empty((0,), dtype=np.float64)
empty_dense_shape = [0, 4, 37, 9]
sparse_feed = sparse_tensor.SparseTensorValue(empty_indices, empty_values,
empty_dense_shape)
sess.run(init_op, feed_dict={st: sparse_feed})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@combinations.generate(combinations.combine(tf_api_version=1, mode=["graph"]))
def testEmptySparseTensorSlicesInvalid(self):
"""Test a dataset based on invalid `tf.sparse.SparseTensor`."""
st = array_ops.sparse_placeholder(dtypes.float64)
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_sparse_tensor_slices(st))
init_op = iterator.initializer
with self.cached_session() as sess:
# Test with an empty sparse tensor but with non empty values.
empty_indices = np.empty((0, 4), dtype=np.int64)
non_empty_values = [1, 2, 3, 4]
empty_dense_shape = [0, 4, 37, 9]
sparse_feed = sparse_tensor.SparseTensorValue(empty_indices,
non_empty_values,
empty_dense_shape)
# Here, we expect the test to fail when running the feed.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(init_op, feed_dict={st: sparse_feed})
@combinations.generate(combinations.combine(tf_api_version=1, mode=["graph"]))
def testEmptySparseTensorSlicesInvalid2(self):
"""Test a dataset based on invalid `tf.sparse.SparseTensor`."""
st = array_ops.sparse_placeholder(dtypes.float64)
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_sparse_tensor_slices(st))
init_op = iterator.initializer
with self.cached_session() as sess:
# Test with an empty sparse tensor but with non empty values.
empty_indices = [[]]
empty_values = []
dense_shape = [1, 1]
sparse_feed = sparse_tensor.SparseTensorValue(empty_indices, empty_values,
dense_shape)
# Here, we expect the test to fail when running the feed.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(init_op, feed_dict={st: sparse_feed})
@combinations.generate(combinations.combine(tf_api_version=2, mode=["eager"]))
def testFromSparseTensorSlicesError(self):
with self.assertRaises(AttributeError):
dataset_ops.Dataset.from_sparse_tensor_slices(None)
class FromSparseTensorSlicesCheckpointTest(
checkpoint_test_base.CheckpointTestBase, parameterized.TestCase):
def _build_sparse_tensor_slice_dataset(self, slices):
# pylint: disable=g-complex-comprehension
indices = np.array(
[[i, j] for i in range(len(slices)) for j in range(len(slices[i]))],
dtype=np.int64)
values = np.array([val for s in slices for val in s], dtype=np.float64)
# pylint: enable=g-complex-comprehension
dense_shape = np.array(
[len(slices), max(len(s) for s in slices) + 1], dtype=np.int64)
sparse_components = sparse_tensor.SparseTensor(indices, values, dense_shape)
return dataset_ops.Dataset.from_sparse_tensor_slices(sparse_components)
@combinations.generate(
combinations.times(test_base.v1_only_combinations(),
checkpoint_test_base.default_test_combinations()))
def test(self, verify_fn):
slices = [[1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [], []]
verify_fn(
self,
lambda: self._build_sparse_tensor_slice_dataset(slices),
num_outputs=9,
sparse_tensors=True)
if __name__ == "__main__":
test.main()
|
"""Tests for workflow object exports."""
from os.path import abspath, dirname, join
from flask.json import dumps
from ggrc.app import app
from ggrc_workflows.models import Workflow
from integration.ggrc import TestCase
from integration.ggrc_workflows.generator import WorkflowsGenerator
THIS_ABS_PATH = abspath(dirname(__file__))
CSV_DIR = join(THIS_ABS_PATH, 'test_csvs/')
class TestExportEmptyTemplate(TestCase):
"""Test empty export for all workflow object types."""
def setUp(self):
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "gGRC",
"X-export-view": "blocks",
}
def test_single_object_export(self):
"""Test empty exports for workflow only."""
data = [{"object_name": "Workflow", "fields": "all"}]
response = self.client.post("/_service/export_csv",
data=dumps(data), headers=self.headers)
self.assertEqual(response.status_code, 200)
self.assertIn("Title*", response.data)
def test_multiple_objects(self):
"""Test empty exports for all workflow object in one query."""
data = [
{"object_name": "Workflow", "fields": "all"},
{"object_name": "TaskGroup", "fields": "all"},
{"object_name": "TaskGroupTask", "fields": "all"},
{"object_name": "Cycle", "fields": "all"},
{"object_name": "CycleTaskGroup", "fields": "all"},
{"object_name": "CycleTaskGroupObjectTask", "fields": "all"},
]
response = self.client.post("/_service/export_csv",
data=dumps(data), headers=self.headers)
self.assertEqual(response.status_code, 200)
self.assertIn("Workflow,", response.data)
self.assertIn("Task Group,", response.data)
self.assertIn("Task,", response.data)
self.assertIn("Cycle,", response.data)
self.assertIn("Cycle Task Group,", response.data)
self.assertIn("Cycle Task Group Object Task,", response.data)
class TestExportMultipleObjects(TestCase):
""" Test data is found in the google sheet:
https://docs.google.com/spreadsheets/d/1Jg8jum2eQfvR3kZNVYbVKizWIGZXvfqv3yQpo2rIiD8/edit#gid=2035742544
"""
@classmethod
def setUpClass(cls): # pylint: disable=C0103
TestCase.clear_data()
cls.tc = app.test_client()
cls.tc.get("/login")
cls.import_file("workflow_big_sheet.csv")
@classmethod
def import_file(cls, filename, dry_run=False):
data = {"file": (open(join(CSV_DIR, filename)), filename)}
headers = {
"X-test-only": "true" if dry_run else "false",
"X-requested-by": "gGRC",
}
cls.tc.post("/_service/import_csv",
data=data, headers=headers)
def activate(self):
""" activate workflows just once after the class has been initialized
This should be in setUpClass method, but we can't access the server
context from there."""
gen = WorkflowsGenerator()
# generate cycle for the only one time wf
wf1 = Workflow.query.filter_by(status="Draft", slug="wf-1").first()
if wf1:
gen.generate_cycle(wf1)
workflows = Workflow.query.filter_by(status="Draft").all()
for wf in workflows:
gen.activate_workflow(wf)
def setUp(self):
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "gGRC",
"X-export-view": "blocks",
}
self.activate()
def export_csv(self, data):
response = self.client.post("/_service/export_csv", data=dumps(data),
headers=self.headers)
self.assert200(response)
return response
def test_workflow_task_group_mapping(self):
""" test workflow and task group mappings """
data = [
{
"object_name": "Workflow", # wf-1
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "TaskGroup",
"slugs": ["tg-1"],
},
},
"fields": "all",
}, {
"object_name": "TaskGroup", # tg-1, tg-2
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": "all",
},
]
response = self.export_csv(data).data
self.assertEqual(3, response.count("wf-1")) # 1 for wf and 1 on each tg
self.assertIn("tg-1", response)
self.assertIn("tg-6", response)
def test_tg_task(self):
""" test task group and task mappings """
data = [
{
"object_name": "TaskGroupTask", # task-1, task-7
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "TaskGroup",
"slugs": ["tg-1"],
},
},
"fields": "all",
}, {
"object_name": "TaskGroup", # tg-1, tg-2
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": "all",
},
]
response = self.export_csv(data).data
self.assertEqual(3, response.count("tg-1")) # 2 for tasks and 1 for tg
self.assertIn("task-1", response)
self.assertIn("task-7", response)
def test_workflow_cycle_mapping(self):
""" test workflow and cycle mappings """
data = [
{
"object_name": "Cycle", # cycle with title wf-1
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "Workflow",
"slugs": ["wf-1"],
},
},
"fields": "all",
}, {
"object_name": "Workflow", # wf-1
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": "all",
}, {
"object_name": "CycleTaskGroup", # two cycle groups
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": "all",
}, {
"object_name": "Cycle", # sholud be same cycle as in first block
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["2"],
},
},
"fields": "all",
}, {
# Task mapped to any of the two task groups, 3 tasks
"object_name": "CycleTaskGroupObjectTask",
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["2"],
},
},
"fields": "all",
}, {
"object_name": "CycleTaskGroup", # two cycle groups
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["4"],
},
},
"fields": "all",
},
]
response = self.export_csv(data).data
self.assertEqual(3, response.count("wf-1")) # 2 for cycles and 1 for wf
# 3rd block = 2, 5th block = 3, 6th block = 2.
self.assertEqual(7, response.count("CYCLEGROUP-"))
self.assertEqual(9, response.count("CYCLE-"))
self.assertEqual(3, response.count("CYCLETASK-"))
def test_cycle_taks_objects(self):
""" test cycle task and various objects """
data = [
{
"object_name": "CycleTaskGroupObjectTask", #
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "Policy",
"slugs": ["p1"],
},
},
"fields": "all",
}, {
"object_name": "Policy", #
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": ["slug", "title"],
},
]
response = self.export_csv(data).data
self.assertEqual(2, response.count("CYCLETASK-"))
self.assertEqual(3, response.count(",p1,"))
def test_wf_indirect_relevant_filters(self):
""" test related filter for indirect relationships on wf objects """
def block(obj):
return {
"object_name": obj,
"fields": ["slug"],
"filters": {
"expression": {
"object_name": "Policy",
"op": {"name": "relevant"},
"slugs": ["p1"],
},
},
}
data = [
block("Workflow"),
block("Cycle"),
block("CycleTaskGroup"),
block("CycleTaskGroupObjectTask"),
]
response = self.export_csv(data).data
wf = Workflow.query.filter_by(slug="wf-1").first()
cycle = wf.cycles[0]
cycle_tasks = []
for cycle_task in cycle.cycle_task_group_object_tasks:
is_related = False
for related_object in cycle_task.related_objects:
if related_object.slug == "p1":
is_related = True
if is_related:
cycle_tasks.append(cycle_task)
cycle_task_groups = list({cycle_task.cycle_task_group
for cycle_task in cycle_tasks})
self.assertEqual(1, response.count("wf-"))
self.assertRegexpMatches(response, ",{}[,\r\n]".format(wf.slug))
self.assertEqual(1, response.count("CYCLE-"))
self.assertRegexpMatches(response, ",{}[,\r\n]".format(cycle.slug))
self.assertEqual(1, response.count("CYCLEGROUP-"))
self.assertEqual(1, len(cycle_task_groups))
self.assertRegexpMatches(response, ",{}[,\r\n]".format(
cycle_task_groups[0].slug))
self.assertEqual(2, response.count("CYCLETASK-"))
self.assertEqual(2, len(cycle_tasks))
for cycle_task in cycle_tasks:
self.assertRegexpMatches(response, ",{}[,\r\n]".format(
cycle_task.slug))
destinations = [
("Workflow", wf.slug, 3),
("Cycle", cycle.slug, 3),
("CycleTaskGroupObjectTask", cycle_tasks[0].slug, 1),
("CycleTaskGroupObjectTask", cycle_tasks[1].slug, 1),
]
for object_name, slug, count in destinations:
data = [{
"object_name": "Policy",
"fields": ["slug"],
"filters": {
"expression": {
"object_name": object_name,
"op": {"name": "relevant"},
"slugs": [slug],
},
},
}]
response = self.export_csv(data).data
self.assertEqual(count, response.count(",p"), "Count for " + object_name)
self.assertIn(",p1", response)
|
"""Tests for training.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import json
import os
import random
import shutil
import tempfile
import time
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.estimator import exporter as exporter_lib
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator import run_config as run_config_lib
from tensorflow.python.estimator import training
from tensorflow.python.estimator.canned import dnn
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export as export_lib
from tensorflow.python.feature_column import feature_column
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary_iterator
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
from tensorflow.python.util import compat
_DEFAULT_EVAL_STEPS = 100
_DEFAULT_EVAL_DELAY_SECS = 120
_DEFAULT_EVAL_THROTTLE_SECS = 600
_DELAY_SECS_PER_WORKER = 5
_GLOBAL_STEP_KEY = ops.GraphKeys.GLOBAL_STEP
_INVALID_INPUT_FN_MSG = '`input_fn` must be callable'
_INVALID_HOOK_MSG = 'All hooks must be `SessionRunHook` instances'
_INVALID_MAX_STEPS_MSG = 'Must specify max_steps > 0'
_INVALID_STEPS_MSG = 'Must specify steps > 0'
_INVALID_NAME_MSG = '`name` must be string'
_INVALID_EVAL_DELAY_SECS_MSG = 'Must specify start_delay_secs >= 0'
_INVALID_EVAL_THROTTLE_SECS_MSG = 'Must specify throttle_secs >= 0'
_INVALID_ESTIMATOR_MSG = '`estimator` must have type `tf.estimator.Estimator`'
_STALE_CHECKPOINT_MSG = 'There was no new checkpoint after the training.'
_INVALID_EXPORTER_MSG = '`exporters` must be an Exporter'
_INVALID_EXPORTER_NAME_TYPE_MSG = 'An Exporter must have a string name'
_DUPLICATE_EXPORTER_NAMES_MSG = '`exporters` must have unique names.'
_NONE_EXPORTER_NAME_MSG = (
'An Exporter cannot have a name that is `None` or empty.')
_INVALID_TRAIN_SPEC_MSG = '`train_spec` must have type `tf.estimator.TrainSpec`'
_INVALID_EVAL_SPEC_MSG = '`eval_spec` must have type `tf.estimator.EvalSpec`'
_EVAL_SPEC_OR_NONE_MSG = (
'`eval_spec` must be either `None` or have type `tf.estimator.EvalSpec`')
_INVALID_EVAL_LISTENER_MSG = 'must have type `_ContinuousEvalListener`'
_INVALID_CONFIG_FOR_STD_SERVER_MSG = 'Could not start server; .*TF_CONFIG'
_INVALID_LOCAL_TASK_WITH_CLUSTER = '`task.type` in TF_CONFIG cannot be `local`'
_INVALID_TASK_TYPE = '`estimator.config` must have task_type set.'
_INPROPER_THROTTL_SECS = (
'EvalSpec.throttle_secs is set as 0.*Please consider to increase')
_INVALID_TASK_TO_RUN = (
'Task type .* is not supported. Supported task types are ((?!local).)*$')
_INVALID_EMPTY_EVAL_RESULT_ERR = (
'Internal error: `Estimator.evaluate` should never return empty metrics')
_INVALID_EVAL_RESULT_TYPE_ERR = '`Estimator.evaluate` should return dict.'
_MISSING_GLOBAL_STEP_IN_EVAL_RESULT_ERR = (
'Internal error: `Estimator.evaluate` result should have `global_step`')
_INVALID_EVAL_TASK_ID_ERR = (
'there can only be one `evaluator` task .*with task id 0')
_TF_CONFIG_FOR_CHIEF = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4']
},
'task': {
'type': run_config_lib.TaskType.CHIEF,
'index': 0
}
}
_TF_CONFIG_FOR_MASTER = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4']
},
'task': {
'type': run_config_lib.TaskType.MASTER,
'index': 0
}
}
_TF_CONFIG_FOR_WORKER = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4']
},
'task': {
'type': run_config_lib.TaskType.WORKER,
'index': 1
}
}
_TF_CONFIG_FOR_PS = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4']
},
'task': {
'type': run_config_lib.TaskType.PS,
'index': 1
}
}
_TF_CONFIG_FOR_EVALUATOR = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4']
},
'task': {
'type': run_config_lib.TaskType.EVALUATOR,
'index': 0
}
}
_TF_CONFIG_FOR_GOOGLE = {'environment': 'google'}
class _FakeHook(session_run_hook.SessionRunHook):
"""Fake implementation of `SessionRunHook`."""
class _InvalidHook(object):
"""Invalid hook (not a subclass of `SessionRunHook`)."""
def _create_exporter(name):
class FakeExporter(exporter_lib.Exporter):
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
def export(self, *args, **kwargs):
del args, kwargs
return FakeExporter(name=name)
def _create_run_config_with_cluster_spec(tf_config):
with test.mock.patch.dict('os.environ', {'TF_CONFIG': json.dumps(tf_config)}):
return run_config_lib.RunConfig()
class TrainSpecTest(test.TestCase):
"""Tests TrainSpec."""
def testRequiredArgumentsSet(self):
"""Tests that no errors are raised when all required arguments are set."""
spec = training.TrainSpec(input_fn=lambda: 1)
self.assertEqual(1, spec.input_fn())
self.assertIsNone(spec.max_steps)
self.assertEqual(0, len(spec.hooks))
def testAllArgumentsSet(self):
"""Tests that no errors are raised when all arguments are set."""
hooks = [_FakeHook()]
spec = training.TrainSpec(input_fn=lambda: 1, max_steps=2, hooks=hooks)
self.assertEqual(1, spec.input_fn())
self.assertEqual(2, spec.max_steps)
self.assertEqual(tuple(hooks), spec.hooks)
def testInvalidInputFn(self):
with self.assertRaisesRegexp(TypeError, _INVALID_INPUT_FN_MSG):
training.TrainSpec(input_fn='invalid')
def testInvalidMaxStep(self):
with self.assertRaisesRegexp(ValueError, _INVALID_MAX_STEPS_MSG):
training.TrainSpec(input_fn=lambda: 1, max_steps=0)
def testInvalidHook(self):
with self.assertRaisesRegexp(TypeError, _INVALID_HOOK_MSG):
training.TrainSpec(input_fn=lambda: 1, hooks=[_InvalidHook()])
class EvalSpecTest(test.TestCase):
"""Tests EvalSpec."""
def testRequiredArgumentsSet(self):
"""Tests that no errors are raised when all required arguments are set."""
spec = training.EvalSpec(input_fn=lambda: 1)
self.assertEqual(1, spec.input_fn())
self.assertEqual(_DEFAULT_EVAL_STEPS, spec.steps)
self.assertIsNone(spec.name)
self.assertEqual(0, len(spec.hooks))
self.assertEqual(0, len(spec.exporters))
self.assertEqual(_DEFAULT_EVAL_DELAY_SECS, spec.start_delay_secs)
self.assertEqual(_DEFAULT_EVAL_THROTTLE_SECS, spec.throttle_secs)
def testAllArgumentsSet(self):
"""Tests that no errors are raised when all arguments are set."""
hooks = [_FakeHook()]
exporter = _create_exporter('a')
spec = training.EvalSpec(
input_fn=lambda: 1,
steps=2,
name='name',
hooks=hooks,
exporters=exporter,
start_delay_secs=3,
throttle_secs=4)
self.assertEqual(1, spec.input_fn())
self.assertEqual(2, spec.steps)
self.assertEqual('name', spec.name)
self.assertEqual(tuple(hooks), spec.hooks)
self.assertEqual((exporter,), spec.exporters)
self.assertEqual(3, spec.start_delay_secs)
self.assertEqual(4, spec.throttle_secs)
def testListOfExporters(self):
"""Tests that no errors are raised with multiple exporters."""
exporters = [_create_exporter('a'), _create_exporter('b')]
spec = training.EvalSpec(input_fn=lambda: 1, exporters=exporters)
self.assertEqual(1, spec.input_fn())
self.assertEqual(tuple(exporters), spec.exporters)
def testInvalidInputFn(self):
with self.assertRaisesRegexp(TypeError, _INVALID_INPUT_FN_MSG):
training.EvalSpec(input_fn='invalid')
def testInvalidMaxStep(self):
with self.assertRaisesRegexp(ValueError, _INVALID_STEPS_MSG):
training.EvalSpec(input_fn=lambda: 1, steps=0)
def testInvalidName(self):
with self.assertRaisesRegexp(TypeError, _INVALID_NAME_MSG):
training.EvalSpec(input_fn=lambda: 1, name=123)
def testInvalidHook(self):
with self.assertRaisesRegexp(TypeError, _INVALID_HOOK_MSG):
training.EvalSpec(input_fn=lambda: 1, hooks=[_InvalidHook()])
def testInvalidDelaySecs(self):
with self.assertRaisesRegexp(ValueError, _INVALID_EVAL_DELAY_SECS_MSG):
training.EvalSpec(input_fn=lambda: 1, start_delay_secs=-1)
def testInvalidThrottleSecs(self):
with self.assertRaisesRegexp(ValueError, _INVALID_EVAL_THROTTLE_SECS_MSG):
training.EvalSpec(input_fn=lambda: 1, throttle_secs=-1)
def testInvalidTypeOfListOfExporters(self):
with self.assertRaisesRegexp(TypeError, _INVALID_EXPORTER_MSG):
training.EvalSpec(
input_fn=lambda: 1, exporters=[_create_exporter('a'),
_FakeHook()])
def testInvalidTypeOfIndividualExporter(self):
with self.assertRaisesRegexp(TypeError, _INVALID_EXPORTER_MSG):
training.EvalSpec(input_fn=lambda: 1, exporters=_FakeHook())
def testInvalidTypeOfExporterName(self):
with self.assertRaisesRegexp(ValueError, _INVALID_EXPORTER_NAME_TYPE_MSG):
training.EvalSpec(input_fn=lambda: 1,
exporters=_create_exporter(name=123))
def testMultipleExportersWithTheSameName(self):
with self.assertRaisesRegexp(ValueError, _DUPLICATE_EXPORTER_NAMES_MSG):
training.EvalSpec(
input_fn=lambda: 1,
exporters=[_create_exporter('a'), _create_exporter('a')])
def testMultipleExportersAndOneWithoutAName(self):
with self.assertRaisesRegexp(ValueError, _NONE_EXPORTER_NAME_MSG):
training.EvalSpec(
input_fn=lambda: 1,
exporters=[_create_exporter('a'),
_create_exporter(None)])
def testSingleExporterWithoutAName(self):
with self.assertRaisesRegexp(ValueError, _NONE_EXPORTER_NAME_MSG):
training.EvalSpec(input_fn=lambda: 1, exporters=_create_exporter(None))
class TrainAndEvaluateTest(test.TestCase):
def test_run_task(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
with test.mock.patch.object(training, '_TrainingExecutor') as mock_executor:
mock_executor_instance = test.mock.Mock()
mock_executor.return_value = mock_executor_instance
training.train_and_evaluate(mock_est, mock_train_spec, mock_eval_spec)
mock_executor.assert_called_with(estimator=mock_est,
train_spec=mock_train_spec,
eval_spec=mock_eval_spec)
self.assertTrue(mock_executor_instance.run.called)
def test_error_out_if_evaluator_task_id_is_non_zero(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
},
'task': {
'type': run_config_lib.TaskType.EVALUATOR,
'index': 1
}
}
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.config = _create_run_config_with_cluster_spec(tf_config)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
with self.assertRaisesRegexp(ValueError, _INVALID_EVAL_TASK_ID_ERR):
training.train_and_evaluate(mock_est, mock_train_spec, mock_eval_spec)
def test_invalid_estimator(self):
invalid_estimator = object()
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
with self.assertRaisesRegexp(TypeError, _INVALID_ESTIMATOR_MSG):
training.train_and_evaluate(invalid_estimator, mock_train_spec,
mock_eval_spec)
def test_fail_fast_if_invalid_eval_spec(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
invalid_eval_spec = object()
with test.mock.patch.object(training, '_TrainingExecutor') as mock_executor:
with self.assertRaisesRegexp(TypeError, _INVALID_EVAL_SPEC_MSG):
training.train_and_evaluate(mock_est, mock_train_spec,
invalid_eval_spec)
mock_executor.assert_not_called()
class TrainingExecutorConstructorTest(test.TestCase):
"""Tests constructor of _TrainingExecutor."""
def test_required_arguments_set(self):
estimator = estimator_lib.Estimator(model_fn=lambda features: features)
train_spec = training.TrainSpec(input_fn=lambda: 1)
eval_spec = training.EvalSpec(input_fn=lambda: 1)
executor = training._TrainingExecutor(estimator, train_spec, eval_spec)
self.assertEqual(estimator, executor.estimator)
def test_invalid_estimator(self):
invalid_estimator = object()
train_spec = training.TrainSpec(input_fn=lambda: 1)
eval_spec = training.EvalSpec(input_fn=lambda: 1)
with self.assertRaisesRegexp(TypeError, _INVALID_ESTIMATOR_MSG):
training._TrainingExecutor(invalid_estimator, train_spec, eval_spec)
def test_invalid_train_spec(self):
estimator = estimator_lib.Estimator(model_fn=lambda features: features)
invalid_train_spec = object()
eval_spec = training.EvalSpec(input_fn=lambda: 1)
with self.assertRaisesRegexp(TypeError, _INVALID_TRAIN_SPEC_MSG):
training._TrainingExecutor(estimator, invalid_train_spec, eval_spec)
def test_invalid_eval_spec(self):
estimator = estimator_lib.Estimator(model_fn=lambda features: features)
train_spec = training.TrainSpec(input_fn=lambda: 1)
invalid_eval_spec = object()
with self.assertRaisesRegexp(TypeError, _EVAL_SPEC_OR_NONE_MSG):
training._TrainingExecutor(estimator, train_spec, invalid_eval_spec)
def test_eval_spec_none(self):
estimator = estimator_lib.Estimator(model_fn=lambda features: features)
train_spec = training.TrainSpec(input_fn=lambda: 1)
eval_spec = None
# Tests that no error is raised.
training._TrainingExecutor(estimator, train_spec, eval_spec)
def test_invalid_train_hooks(self):
estimator = estimator_lib.Estimator(model_fn=lambda features: features)
train_spec = training.TrainSpec(input_fn=lambda: 1)
eval_spec = training.EvalSpec(input_fn=lambda: 1)
invalid_train_hooks = [object()]
with self.assertRaisesRegexp(TypeError, _INVALID_HOOK_MSG):
training._TrainingExecutor(
estimator, train_spec, eval_spec, train_hooks=invalid_train_hooks)
def test_invalid_continuous_eval_listener(self):
estimator = estimator_lib.Estimator(model_fn=lambda features: features)
train_spec = training.TrainSpec(input_fn=lambda: 1)
eval_spec = training.EvalSpec(input_fn=lambda: 1)
invalid_continuous_eval_listener = object()
with self.assertRaisesRegexp(TypeError, _INVALID_EVAL_LISTENER_MSG):
training._TrainingExecutor(
estimator,
train_spec,
eval_spec,
continuous_eval_listener=invalid_continuous_eval_listener)
class _TrainingExecutorTrainingTest(object):
"""Tests training of _TrainingExecutor."""
def __init__(self, run_config):
self._run_config = run_config
def _run_task(self, executor):
# We should not call executor.run as the test here is intended to test
# run_foo explicitly (foo is the task type).
return getattr(executor, 'run_' + self._run_config.task_type)()
@test.mock.patch.object(time, 'sleep')
@test.mock.patch.object(server_lib, 'Server')
def test_train_with_train_spec(self, mock_server, unused_mock_sleep):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.config = self._run_config
train_spec = training.TrainSpec(
input_fn=lambda: 1, max_steps=2, hooks=[_FakeHook()])
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
mock_server_instance = mock_server.return_value
executor = training._TrainingExecutor(mock_est, train_spec, mock_eval_spec)
self._run_task(executor)
mock_server.assert_called_with(
mock_est.config.cluster_spec,
job_name=mock_est.config.task_type,
task_index=mock_est.config.task_id,
config=test.mock.ANY,
protocol=None,
start=False)
self.assertTrue(mock_server_instance.start.called)
mock_est.train.assert_called_with(
input_fn=train_spec.input_fn,
max_steps=train_spec.max_steps,
hooks=list(train_spec.hooks),
saving_listeners=test.mock.ANY)
mock_est.evaluate.assert_not_called()
mock_est.export_savedmodel.assert_not_called()
@test.mock.patch.object(time, 'sleep')
@test.mock.patch.object(server_lib, 'Server')
def test_train_with_no_eval_spec(self, mock_server, unused_mock_sleep):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.config = self._run_config
train_spec = training.TrainSpec(
input_fn=lambda: 1, max_steps=2, hooks=[_FakeHook()])
eval_spec = None
mock_server_instance = mock_server.return_value
executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
self._run_task(executor)
mock_server.assert_called_with(
mock_est.config.cluster_spec,
job_name=mock_est.config.task_type,
task_index=mock_est.config.task_id,
config=test.mock.ANY,
protocol=None,
start=False)
self.assertTrue(mock_server_instance.start.called)
mock_est.train.assert_called_with(
input_fn=train_spec.input_fn,
max_steps=train_spec.max_steps,
hooks=list(train_spec.hooks),
saving_listeners=test.mock.ANY)
mock_est.evaluate.assert_not_called()
mock_est.export_savedmodel.assert_not_called()
@test.mock.patch.object(time, 'sleep')
@test.mock.patch.object(server_lib, 'Server')
def test_train_with_train_hooks(self, unused_mock_server, unused_mock_sleep):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.config = self._run_config
train_spec = training.TrainSpec(
input_fn=lambda: 1, max_steps=2, hooks=[_FakeHook()])
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
extra_hooks = [_FakeHook()]
executor = training._TrainingExecutor(
mock_est, train_spec, mock_eval_spec, train_hooks=extra_hooks)
self._run_task(executor)
mock_est.train.assert_called_with(
input_fn=train_spec.input_fn,
max_steps=train_spec.max_steps,
hooks=list(train_spec.hooks) + extra_hooks,
saving_listeners=test.mock.ANY)
@test.mock.patch.object(time, 'sleep')
@test.mock.patch.object(server_lib, 'Server')
def test_no_server_startup_in_google(self, mock_server, unused_mock_sleep):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.config = self._run_config
mock_train_spec = test.mock.Mock(spec=training.TrainSpec, hooks=[])
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
executor = training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec)
tf_config = {'TF_CONFIG': json.dumps(_TF_CONFIG_FOR_GOOGLE)}
with test.mock.patch.dict('os.environ', tf_config):
self._run_task(executor)
mock_server.assert_not_called()
def test_fail_with_empty_cluster_spec(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
mock_est.config = test.mock.PropertyMock(spec=run_config_lib.RunConfig)
mock_est.config.cluster_spec = None
mock_est.config.master = 'grpc://...'
mock_est.config.task_type = 'worker'
mock_est.config.task_id = 2
with self.assertRaisesRegexp(RuntimeError,
_INVALID_CONFIG_FOR_STD_SERVER_MSG):
self._run_task(training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec))
def test_fail_with_empty_master(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
mock_est.config = test.mock.PropertyMock(spec=run_config_lib.RunConfig)
mock_est.config.cluster_spec = server_lib.ClusterSpec(
{'worker': ['dummy', 'dummy1']})
mock_est.config.master = ''
mock_est.config.task_type = 'worker'
mock_est.config.task_id = 2
with self.assertRaisesRegexp(RuntimeError,
_INVALID_CONFIG_FOR_STD_SERVER_MSG):
self._run_task(training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec))
@test.mock.patch.object(time, 'sleep')
@test.mock.patch.object(server_lib, 'Server')
def test_single_worker_node_with_empty_tf_master(
self, mock_server, unused_mock_sleep):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec, hooks=[])
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
mock_est.config = test.mock.PropertyMock(spec=run_config_lib.RunConfig)
# Single node cluster.
mock_est.config.cluster_spec = server_lib.ClusterSpec({'worker': ['dummy']})
mock_est.config.master = ''
mock_est.config.task_type = 'worker'
mock_est.config.task_id = 2
self._run_task(training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec))
self.assertTrue(mock_est.train.called)
mock_server.assert_not_called()
def test_fail_with_empty_task_type(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
mock_est.config = test.mock.PropertyMock(spec=run_config_lib.RunConfig)
mock_est.config.cluster_spec = server_lib.ClusterSpec({'worker': ['dummy']})
mock_est.config.master = 'grpc://...'
mock_est.config.task_type = ''
mock_est.config.task_id = 2
with self.assertRaisesRegexp(RuntimeError,
_INVALID_CONFIG_FOR_STD_SERVER_MSG):
self._run_task(training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec))
def test_fail_with_none_task_id(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
mock_est.config = test.mock.PropertyMock(spec=run_config_lib.RunConfig)
mock_est.config.cluster_spec = server_lib.ClusterSpec({'worker': ['dummy']})
mock_est.config.master = 'grpc://...'
mock_est.config.task_type = 'worker'
mock_est.config.task_id = None
with self.assertRaisesRegexp(RuntimeError,
_INVALID_CONFIG_FOR_STD_SERVER_MSG):
self._run_task(training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec))
class TrainingExecutorRunWorkerTest(_TrainingExecutorTrainingTest,
test.TestCase):
"""Tests run_worker of _TrainingExecutor."""
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
_TrainingExecutorTrainingTest.__init__(
self,
run_config=_create_run_config_with_cluster_spec(_TF_CONFIG_FOR_WORKER))
@test.mock.patch.object(server_lib, 'Server')
def test_delay_for_worker(self, _):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.config = self._run_config
mock_train_spec = test.mock.Mock(spec=training.TrainSpec, hooks=[])
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
executor = training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec)
expected_secs = (self._run_config.task_id + 1) * _DELAY_SECS_PER_WORKER
with test.mock.patch.object(time, 'sleep') as mock_sleep:
mock_sleep.side_effect = lambda s: self.assertEqual(expected_secs, s)
self._run_task(executor)
self.assertTrue(mock_sleep.called)
class TrainingExecutorRunChiefTest(_TrainingExecutorTrainingTest,
test.TestCase):
"""Tests run_chief of _TrainingExecutor."""
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
_TrainingExecutorTrainingTest.__init__(
self,
run_config=_create_run_config_with_cluster_spec(_TF_CONFIG_FOR_CHIEF))
@test.mock.patch.object(server_lib, 'Server')
def test_no_delay_for_chief(self, _):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.config = self._run_config
mock_train_spec = test.mock.Mock(spec=training.TrainSpec, hooks=[])
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
executor = training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec)
with test.mock.patch.object(time, 'sleep') as mock_sleep:
self._run_task(executor)
mock_sleep.assert_not_called()
class TrainingExecutorRunMasterTest(test.TestCase):
"""Tests run_chief of _TrainingExecutor."""
def setUp(self):
self._run_config = _create_run_config_with_cluster_spec(
_TF_CONFIG_FOR_MASTER)
@test.mock.patch.object(server_lib, 'Server')
def test_no_delay_for_master(self, _):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.evaluate = lambda *args, **kw: {ops.GraphKeys.GLOBAL_STEP: 123}
mock_est.config = self._run_config
mock_train_spec = test.mock.Mock(
spec=training.TrainSpec, max_steps=123, hooks=[])
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec, exporters=[])
executor = training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec)
with test.mock.patch.object(time, 'sleep') as mock_sleep:
executor.run_master()
mock_sleep.assert_not_called()
@test.mock.patch.object(time, 'sleep')
@test.mock.patch.object(server_lib, 'Server')
def test_train_with_train_spec(self, mock_server, unused_mock_sleep):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.evaluate = lambda *args, **kw: {ops.GraphKeys.GLOBAL_STEP: 123}
mock_est.config = self._run_config
train_spec = training.TrainSpec(
input_fn=lambda: 1, max_steps=2, hooks=[_FakeHook()])
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec, exporters=[])
mock_server_instance = mock_server.return_value
executor = training._TrainingExecutor(mock_est, train_spec, mock_eval_spec)
executor.run_master()
mock_server.assert_called_with(
mock_est.config.cluster_spec,
job_name=mock_est.config.task_type,
task_index=mock_est.config.task_id,
config=test.mock.ANY,
protocol=None,
start=False)
self.assertTrue(mock_server_instance.start.called)
mock_est.train.assert_called_with(
input_fn=train_spec.input_fn,
max_steps=train_spec.max_steps,
hooks=list(train_spec.hooks),
saving_listeners=test.mock.ANY)
mock_est.export_savedmodel.assert_not_called()
@test.mock.patch.object(time, 'sleep')
@test.mock.patch.object(server_lib, 'Server')
def test_train_with_no_eval_spec_fails(self, mock_server, unused_mock_sleep):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.evaluate = lambda *args, **kw: {ops.GraphKeys.GLOBAL_STEP: 123}
mock_est.config = self._run_config
train_spec = training.TrainSpec(
input_fn=lambda: 1, max_steps=2, hooks=[_FakeHook()])
eval_spec = None
executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
with self.assertRaisesRegexp(TypeError, _INVALID_EVAL_SPEC_MSG):
executor.run_master()
@test.mock.patch.object(time, 'sleep')
@test.mock.patch.object(server_lib, 'Server')
def test_train_with_train_hooks(self, mock_server, unused_mock_sleep):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.evaluate = lambda *args, **kw: {ops.GraphKeys.GLOBAL_STEP: 123}
mock_est.config = self._run_config
train_spec = training.TrainSpec(
input_fn=lambda: 1, max_steps=2, hooks=[_FakeHook()])
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec, exporters=[])
extra_hooks = [_FakeHook()]
executor = training._TrainingExecutor(
mock_est, train_spec, mock_eval_spec, train_hooks=extra_hooks)
executor.run_master()
mock_est.train.assert_called_with(
input_fn=train_spec.input_fn,
max_steps=train_spec.max_steps,
hooks=list(train_spec.hooks) + extra_hooks,
saving_listeners=test.mock.ANY)
@test.mock.patch.object(time, 'sleep')
@test.mock.patch.object(server_lib, 'Server')
def test_no_server_startup_in_google(self, mock_server, unused_mock_sleep):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.evaluate = lambda *args, **kw: {ops.GraphKeys.GLOBAL_STEP: 123}
mock_est.config = self._run_config
mock_train_spec = test.mock.Mock(
spec=training.TrainSpec, max_steps=123, hooks=[])
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec, exporters=[])
executor = training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec)
tf_config = {'TF_CONFIG': json.dumps(_TF_CONFIG_FOR_GOOGLE)}
with test.mock.patch.dict('os.environ', tf_config):
executor.run_master()
mock_server.assert_not_called()
def test_fail_with_empty_cluster_spec(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
mock_est.config = test.mock.PropertyMock(spec=run_config_lib.RunConfig)
mock_est.config.cluster_spec = None
mock_est.config.master = 'grpc://...'
mock_est.config.task_type = 'master'
mock_est.config.task_id = 2
with self.assertRaisesRegexp(RuntimeError,
_INVALID_CONFIG_FOR_STD_SERVER_MSG):
training._TrainingExecutor(
mock_est, mock_train_spec, mock_eval_spec).run_master()
def test_fail_with_empty_master(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
mock_est.config = test.mock.PropertyMock(spec=run_config_lib.RunConfig)
mock_est.config.cluster_spec = server_lib.ClusterSpec(
{'master': ['dummy'], 'worker': ['dummy1']})
mock_est.config.master = ''
mock_est.config.task_type = 'master'
mock_est.config.task_id = 0
with self.assertRaisesRegexp(RuntimeError,
_INVALID_CONFIG_FOR_STD_SERVER_MSG):
training._TrainingExecutor(
mock_est, mock_train_spec, mock_eval_spec).run_master()
@test.mock.patch.object(time, 'sleep')
@test.mock.patch.object(server_lib, 'Server')
def test_single_master_node_with_empty_tf_master(
self, mock_server, unused_mock_sleep):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.evaluate = lambda *args, **kw: {ops.GraphKeys.GLOBAL_STEP: 123}
mock_train_spec = test.mock.Mock(
spec=training.TrainSpec, max_steps=123, hooks=[])
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec, exporters=[])
mock_est.config = test.mock.PropertyMock(spec=run_config_lib.RunConfig)
mock_est.config.cluster_spec = server_lib.ClusterSpec(
{'master': ['dummy']})
mock_est.config.master = ''
mock_est.config.task_type = 'master'
mock_est.config.task_id = 0
executor = training._TrainingExecutor(
mock_est, mock_train_spec, mock_eval_spec)
executor.run_master()
mock_server.assert_not_called()
self.assertTrue(mock_est.train.called)
def test_fail_with_empty_task_type(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
mock_est.config = test.mock.PropertyMock(spec=run_config_lib.RunConfig)
mock_est.config.cluster_spec = server_lib.ClusterSpec({'master': ['dummy']})
mock_est.config.master = 'grpc://...'
mock_est.config.task_type = ''
mock_est.config.task_id = 2
with self.assertRaisesRegexp(RuntimeError,
_INVALID_CONFIG_FOR_STD_SERVER_MSG):
training._TrainingExecutor(
mock_est, mock_train_spec, mock_eval_spec).run_master()
def test_fail_with_none_task_id(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
mock_est.config = test.mock.PropertyMock(spec=run_config_lib.RunConfig)
mock_est.config.cluster_spec = server_lib.ClusterSpec({'master': ['dummy']})
mock_est.config.master = 'grpc://...'
mock_est.config.task_type = 'master'
mock_est.config.task_id = None
with self.assertRaisesRegexp(RuntimeError,
_INVALID_CONFIG_FOR_STD_SERVER_MSG):
training._TrainingExecutor(
mock_est, mock_train_spec, mock_eval_spec).run_master()
@test.mock.patch.object(server_lib, 'Server')
def test_run_master_triggers_evaluate_and_export(self, _):
def estimator_train(saving_listeners, *args, **kwargs):
# There shalt be a saving_listener. Estimator is going to call
# `after_save`.
del args, kwargs
saving_listeners[0].begin()
saving_listeners[0].after_save(session=None, global_step_value=0)
saving_listeners[0].after_save(session=None, global_step_value=10)
mock_est = test.mock.Mock(
spec=estimator_lib.Estimator, model_dir='path/', train=estimator_train)
mock_est.latest_checkpoint.return_value = 'checkpoint_path/'
mock_est.config = self._run_config
exporter = test.mock.PropertyMock(spec=exporter_lib.Exporter)
exporter.name = 'see_whether_export_is_called'
train_spec = training.TrainSpec(input_fn=lambda: 1, max_steps=300)
eval_spec = training.EvalSpec(
input_fn=lambda: 1, steps=2, exporters=exporter)
eval_result = {_GLOBAL_STEP_KEY: train_spec.max_steps}
mock_est.evaluate.return_value = eval_result
executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
executor.run_master()
mock_est.evaluate.assert_called_with(
name=eval_spec.name,
input_fn=eval_spec.input_fn,
steps=eval_spec.steps,
checkpoint_path='checkpoint_path/',
hooks=eval_spec.hooks)
self.assertEqual(1, exporter.export.call_count)
exporter.export.assert_called_with(
estimator=mock_est,
export_path=os.path.join('path/', 'export', exporter.name),
checkpoint_path='checkpoint_path/',
eval_result=eval_result,
is_the_final_export=True)
@test.mock.patch.object(basic_session_run_hooks, 'SecondOrStepTimer')
@test.mock.patch.object(server_lib, 'Server')
def test_run_master_throttle_eval(self, _, mock_timer_class):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator, model_dir='path/')
mock_timer = test.mock.Mock()
mock_timer_class.return_value = mock_timer
def estimator_train(saving_listeners, *args, **kwargs):
del args, kwargs
saving_listeners[0].begin()
# Call four times.
mock_timer.should_trigger_for_step.return_value = True
saving_listeners[0].after_save(session=None, global_step_value=None)
mock_timer.should_trigger_for_step.return_value = True
saving_listeners[0].after_save(session=None, global_step_value=None)
mock_timer.should_trigger_for_step.return_value = False
saving_listeners[0].after_save(session=None, global_step_value=None)
mock_timer.should_trigger_for_step.return_value = True
saving_listeners[0].after_save(session=None, global_step_value=None)
mock_est.train = estimator_train
mock_est.latest_checkpoint.side_effect = ['ckpt1', 'ckpt2']
mock_est.config = self._run_config
exporter = test.mock.PropertyMock(spec=exporter_lib.Exporter)
exporter.name = 'see_whether_export_is_called'
train_spec = training.TrainSpec(input_fn=lambda: 1, max_steps=300)
eval_spec = training.EvalSpec(
input_fn=lambda: 1, steps=2, exporters=exporter, throttle_secs=10)
mock_est.evaluate.side_effect = [
{_GLOBAL_STEP_KEY: train_spec.max_steps //2},
{_GLOBAL_STEP_KEY: train_spec.max_steps}
]
executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
executor.run_master()
self.assertEqual(2, mock_est.evaluate.call_count)
self.assertEqual(2, exporter.export.call_count)
is_final_export_list = [call[1]['is_the_final_export']
for call in exporter.export.call_args_list]
self.assertEqual([False, True], is_final_export_list)
@test.mock.patch.object(basic_session_run_hooks, 'SecondOrStepTimer')
@test.mock.patch.object(server_lib, 'Server')
def test_run_master_throttle_eval_which_skips_final_ckpt(
self, _, mock_timer_class):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator, model_dir='path/')
mock_timer = test.mock.Mock()
mock_timer_class.return_value = mock_timer
def estimator_train(saving_listeners, *args, **kwargs):
del args, kwargs
saving_listeners[0].begin()
# Call tree times (one for first saving).
mock_timer.should_trigger_for_step.return_value = True
saving_listeners[0].after_save(session=None, global_step_value=0)
mock_timer.should_trigger_for_step.return_value = True
saving_listeners[0].after_save(session=None, global_step_value=125)
mock_timer.should_trigger_for_step.return_value = False
saving_listeners[0].after_save(session=None, global_step_value=250)
# At the end evaluate should be called even if throttle secs prevents it.
mock_timer.should_trigger_for_step.return_value = False
saving_listeners[0].end(session=None, global_step_value=300)
mock_est.train = estimator_train
mock_est.latest_checkpoint.side_effect = ['ckpt1', 'ckpt2']
mock_est.config = self._run_config
exporter = test.mock.PropertyMock(spec=exporter_lib.Exporter)
exporter.name = 'see_whether_export_is_called'
train_spec = training.TrainSpec(input_fn=lambda: 1, max_steps=300)
eval_spec = training.EvalSpec(
input_fn=lambda: 1, steps=2, exporters=exporter, throttle_secs=10)
mock_est.evaluate.side_effect = [
{_GLOBAL_STEP_KEY: train_spec.max_steps //2},
{_GLOBAL_STEP_KEY: train_spec.max_steps}
]
executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
executor.run_master()
self.assertEqual(2, mock_est.evaluate.call_count)
self.assertEqual(2, exporter.export.call_count)
is_final_export_list = [call[1]['is_the_final_export']
for call in exporter.export.call_args_list]
self.assertEqual([False, True], is_final_export_list)
class TrainingExecutorRunEvaluatorTest(test.TestCase):
"""Tests run_evaluator of _TrainingExecutor."""
def _set_up_mock_est_to_train_and_evaluate_once(self, mock_est,
mock_train_spec):
"""Sets global step in eval result to end the while True eval loop."""
training_max_step = 200
mock_est.evaluate.return_value = {_GLOBAL_STEP_KEY: training_max_step}
mock_train_spec.max_steps = training_max_step
def test_evaluate_with_evaluate_spec(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.latest_checkpoint.return_value = 'latest_it_is'
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
self._set_up_mock_est_to_train_and_evaluate_once(mock_est, mock_train_spec)
eval_spec = training.EvalSpec(
input_fn=lambda: 1, steps=2, hooks=[_FakeHook()], name='cont_eval',
start_delay_secs=0, throttle_secs=0)
executor = training._TrainingExecutor(mock_est, mock_train_spec, eval_spec)
executor.run_evaluator()
mock_est.evaluate.assert_called_with(
name='cont_eval',
input_fn=eval_spec.input_fn,
steps=eval_spec.steps,
checkpoint_path='latest_it_is',
hooks=eval_spec.hooks)
self.assertFalse(mock_est.train.called)
def test_evaluate_with_no_eval_spec_fails(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.latest_checkpoint.return_value = 'latest_it_is'
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
self._set_up_mock_est_to_train_and_evaluate_once(mock_est, mock_train_spec)
eval_spec = None
executor = training._TrainingExecutor(mock_est, mock_train_spec, eval_spec)
with self.assertRaisesRegexp(TypeError, _INVALID_EVAL_SPEC_MSG):
executor.run_evaluator()
def test_evaluate_with_train_hooks(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.latest_checkpoint.return_value = 'latest_it_is'
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
self._set_up_mock_est_to_train_and_evaluate_once(mock_est, mock_train_spec)
eval_spec = training.EvalSpec(
input_fn=lambda: 1,
steps=2,
hooks=[_FakeHook()],
name='cont_eval',
start_delay_secs=0,
throttle_secs=0)
# The train_hooks will not be called during eval.
mock_hook = test.mock.Mock(spec=session_run_hook.SessionRunHook)
executor = training._TrainingExecutor(
mock_est, mock_train_spec, eval_spec, train_hooks=[mock_hook])
executor.run_evaluator()
mock_hook.begin.assert_not_called()
def test_evaluate_multiple_times(self):
training_max_step = 200
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.model_dir = compat.as_bytes(test.get_temp_dir())
mock_est.evaluate.side_effect = [
{_GLOBAL_STEP_KEY: training_max_step // 2},
{_GLOBAL_STEP_KEY: training_max_step}
]
mock_est.latest_checkpoint.side_effect = ['path_1', 'path_2']
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_train_spec.max_steps = training_max_step
exporter = test.mock.PropertyMock(spec=exporter_lib.Exporter)
exporter.name = 'see_how_many_times_export_is_called'
mock_est.times_export_was_called = 0
mock_est.times_final_export_was_called = 0
def export(estimator, export_path, checkpoint_path, eval_result,
is_the_final_export):
del export_path, checkpoint_path, eval_result
estimator.times_export_was_called += 1
# final_export is happened at the end.
self.assertEqual(0, estimator.times_final_export_was_called)
if is_the_final_export:
estimator.times_final_export_was_called += 1
exporter.export = export
eval_spec = training.EvalSpec(
input_fn=lambda: 1,
start_delay_secs=0,
throttle_secs=0,
exporters=exporter)
executor = training._TrainingExecutor(mock_est, mock_train_spec, eval_spec)
executor.run_evaluator()
self.assertEqual(2, mock_est.evaluate.call_count)
self.assertEqual(2, mock_est.times_export_was_called)
self.assertEqual(1, mock_est.times_final_export_was_called)
def test_evaluate_listener_before_eval(self):
training_max_step = 200
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.model_dir = compat.as_bytes(test.get_temp_dir())
# Without early stopping, this eval will be run twice.
mock_est.evaluate.side_effect = [{
_GLOBAL_STEP_KEY: training_max_step // 2
}, {
_GLOBAL_STEP_KEY: training_max_step
}]
mock_est.latest_checkpoint.side_effect = ['path_1', 'path_2']
mock_train_spec = test.mock.Mock(spec=training.TrainSpec, hooks=[])
mock_train_spec.max_steps = training_max_step
class _Listener(training._ContinuousEvalListener):
def __init__(self):
self.call_count = 0
def before_eval(self):
self.call_count += 1
return self.call_count == 1
listener = _Listener()
eval_spec = training.EvalSpec(
input_fn=lambda: 1, start_delay_secs=0, throttle_secs=0)
training._TrainingExecutor(
mock_est, mock_train_spec, eval_spec,
continuous_eval_listener=listener).run_evaluator()
# Before_eval returns False during the second time, so, evaluate will be
# called once.
self.assertEqual(1, mock_est.evaluate.call_count)
self.assertEqual(2, listener.call_count)
def test_evaluate_listener_after_eval(self):
training_max_step = 200
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.model_dir = compat.as_bytes(test.get_temp_dir())
# Without early stopping, this eval will be run twice.
expected_eval_metrics = [{
_GLOBAL_STEP_KEY: training_max_step // 2
}, {
_GLOBAL_STEP_KEY: training_max_step
}]
mock_est.evaluate.side_effect = expected_eval_metrics
mock_est.latest_checkpoint.side_effect = ['path_1', 'path_2']
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_train_spec.max_steps = training_max_step
class _Listener(training._ContinuousEvalListener):
def __init__(self):
self.call_count = 0
def after_eval(self, eval_result):
self.call_count += 1
self.eval_result = eval_result
return False
listener = _Listener()
eval_spec = training.EvalSpec(
input_fn=lambda: 1, start_delay_secs=0, throttle_secs=0)
training._TrainingExecutor(
mock_est, mock_train_spec, eval_spec,
continuous_eval_listener=listener).run_evaluator()
# after_eval returns False during the first time, so, evaluate will be
# called once.
self.assertEqual(1, mock_est.evaluate.call_count)
self.assertEqual(1, listener.call_count)
self.assertAllEqual(expected_eval_metrics[0], listener.eval_result.metrics)
self.assertEqual('path_1', listener.eval_result.checkpoint_path)
def test_final_export_is_true_in_the_end(self):
training_max_step = 200
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.model_dir = compat.as_bytes(test.get_temp_dir())
mock_est.evaluate.side_effect = [
{_GLOBAL_STEP_KEY: training_max_step // 2},
{_GLOBAL_STEP_KEY: training_max_step}
]
mock_est.latest_checkpoint.side_effect = ['path_1', 'path_2']
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_train_spec.max_steps = training_max_step
mock_est.times_export_fn_was_called = 0
mock_est.times_the_final_export_was_true = 0
def export(estimator, export_path, checkpoint_path, eval_result,
is_the_final_export):
del export_path, checkpoint_path, eval_result
estimator.times_export_fn_was_called += 1
if is_the_final_export:
estimator.times_the_final_export_was_true += 1
exporter = test.mock.PropertyMock(spec=exporter_lib.Exporter)
exporter.name = 'see_how_many_times_export_is_called'
exporter.export = export
eval_spec = training.EvalSpec(
input_fn=lambda: 1,
start_delay_secs=0,
throttle_secs=0,
exporters=exporter)
executor = training._TrainingExecutor(mock_est, mock_train_spec, eval_spec)
executor.run_evaluator()
self.assertEqual(2, mock_est.evaluate.call_count)
self.assertEqual(2, mock_est.times_export_fn_was_called)
self.assertEqual(1, mock_est.times_the_final_export_was_true)
def test_skip_evaluation_due_to_ckpt(self):
training_max_step = 200
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.evaluate.side_effect = [
{_GLOBAL_STEP_KEY: training_max_step // 2},
{_GLOBAL_STEP_KEY: training_max_step}
]
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_train_spec.max_steps = training_max_step
self._set_up_mock_est_to_train_and_evaluate_once(mock_est, mock_train_spec)
# First two items are invalid, next two items are same.
mock_est.latest_checkpoint.side_effect = [
None, '', 'same', 'same', 'path_2'
]
eval_spec = training.EvalSpec(
input_fn=lambda: 1, start_delay_secs=0, throttle_secs=2)
executor = training._TrainingExecutor(mock_est, mock_train_spec, eval_spec)
with test.mock.patch.object(logging, 'warning') as mock_log:
executor.run_evaluator()
# Three checkpoint paths are invalid.
self.assertEqual(5, mock_est.latest_checkpoint.call_count)
self.assertEqual(2, mock_est.evaluate.call_count)
# Two warning logs are expected (last warning time is reset after a
# successuful evaluation)
self.assertEqual(2, mock_log.call_count)
def test_warning_if_throttle_secs_is_zero(self):
training_max_step = 200
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.evaluate.side_effect = [
{_GLOBAL_STEP_KEY: training_max_step}
]
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_train_spec.max_steps = training_max_step
self._set_up_mock_est_to_train_and_evaluate_once(mock_est, mock_train_spec)
# We need to make the first one invalid, so it will check the
# throttle_secs=0.
mock_est.latest_checkpoint.side_effect = [None, 'path']
eval_spec = training.EvalSpec(
input_fn=lambda: 1, start_delay_secs=0, throttle_secs=0)
executor = training._TrainingExecutor(mock_est, mock_train_spec, eval_spec)
with test.mock.patch.object(logging, 'warning') as mock_log:
executor.run_evaluator()
# First ckpt is invalid.
self.assertEqual(2, mock_est.latest_checkpoint.call_count)
self.assertEqual(1, mock_est.evaluate.call_count)
self.assertRegexpMatches(str(mock_log.call_args), _INPROPER_THROTTL_SECS)
def test_continuous_eval_listener_eval_result(self):
training_max_step = 200
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
expected_eval_metrics = [{
_GLOBAL_STEP_KEY: training_max_step // 2
}, {
_GLOBAL_STEP_KEY: training_max_step
}]
mock_est.evaluate.side_effect = expected_eval_metrics
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_train_spec.max_steps = training_max_step
class _Listener(training._ContinuousEvalListener):
def __init__(self):
self.eval_results = []
def after_eval(self, eval_result):
self.eval_results.append(eval_result)
return True
continuous_eval_listener = _Listener()
self._set_up_mock_est_to_train_and_evaluate_once(mock_est, mock_train_spec)
# First two items are invalid, next two items are same.
mock_est.latest_checkpoint.side_effect = [
None, '', 'same', 'same', 'path_2'
]
expected_eval_results = [
training._EvalResult(training._EvalStatus.MISSING_CHECKPOINT),
training._EvalResult(training._EvalStatus.MISSING_CHECKPOINT),
training._EvalResult(
training._EvalStatus.EVALUATED,
metrics=expected_eval_metrics[0],
checkpoint_path='same'),
training._EvalResult(training._EvalStatus.NO_NEW_CHECKPOINT),
training._EvalResult(
training._EvalStatus.EVALUATED,
metrics=expected_eval_metrics[1],
checkpoint_path='path_2'),
]
eval_spec = training.EvalSpec(
input_fn=lambda: 1, start_delay_secs=0, throttle_secs=0)
executor = training._TrainingExecutor(
mock_est,
mock_train_spec,
eval_spec,
continuous_eval_listener=continuous_eval_listener)
executor.run_evaluator()
# Three checkpoint paths are invalid.
self.assertEqual(5, mock_est.latest_checkpoint.call_count)
self.assertEqual(2, mock_est.evaluate.call_count)
self.assertEqual(5, len(continuous_eval_listener.eval_results))
for i, result in enumerate(continuous_eval_listener.eval_results):
self.assertEqual(expected_eval_results[i].status, result.status)
self.assertAllEqual(expected_eval_results[i].metrics, result.metrics)
self.assertEqual(expected_eval_results[i].checkpoint_path,
result.checkpoint_path)
def test_sleep_start_delay_secs(self):
training_max_step = 200
start_delay_secs = 123
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.evaluate.return_value = {_GLOBAL_STEP_KEY: training_max_step}
mock_est.model_dir = compat.as_bytes(test.get_temp_dir())
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_train_spec.max_steps = training_max_step
eval_spec = training.EvalSpec(
input_fn=lambda: 1, steps=2, hooks=[_FakeHook()], name='cont_eval',
start_delay_secs=start_delay_secs, throttle_secs=0)
executor = training._TrainingExecutor(mock_est, mock_train_spec, eval_spec)
with test.mock.patch.object(time, 'sleep') as mock_sleep:
executor.run_evaluator()
mock_sleep.assert_called_with(start_delay_secs)
self.assertTrue(mock_est.evaluate.called)
@test.mock.patch.object(time, 'time')
@test.mock.patch.object(time, 'sleep')
def test_throttle_secs(self, mock_sleep, mock_time):
throttle_secs = 123
operation_secs = 12
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
self._set_up_mock_est_to_train_and_evaluate_once(mock_est, mock_train_spec)
eval_spec = training.EvalSpec(
input_fn=lambda: 1, start_delay_secs=0, throttle_secs=throttle_secs)
mock_time.side_effect = [921, 921 + operation_secs]
executor = training._TrainingExecutor(mock_est, mock_train_spec, eval_spec)
# Disable logging as it calls time.time also.
with test.mock.patch.object(logging, 'info'):
executor.run_evaluator()
mock_sleep.assert_called_with(throttle_secs - operation_secs)
self.assertTrue(mock_est.evaluate.called)
def test_that_export_is_called(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
self._set_up_mock_est_to_train_and_evaluate_once(mock_est, mock_train_spec)
def export(estimator, *args, **kwargs):
del args, kwargs
estimator.export_was_called = True
exporter = test.mock.PropertyMock(spec=exporter_lib.Exporter)
exporter.name = 'see_whether_export_is_called'
exporter.export = export
eval_spec = training.EvalSpec(
input_fn=lambda: 1,
steps=2,
start_delay_secs=0,
throttle_secs=0,
exporters=exporter)
executor = training._TrainingExecutor(mock_est, mock_train_spec, eval_spec)
executor.run_evaluator()
# Verify that export was called on the right estimator.
self.assertTrue(mock_est.export_was_called)
def test_errors_out_if_evaluate_returns_empty_dict(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
train_spec = training.TrainSpec(input_fn=lambda: 1)
eval_spec = training.EvalSpec(input_fn=(lambda: 1),
start_delay_secs=0, throttle_secs=0)
mock_est.evaluate.return_value = {}
executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
with self.assertRaisesRegexp(ValueError, _INVALID_EMPTY_EVAL_RESULT_ERR):
executor.run_evaluator()
def test_errors_out_if_evaluate_returns_non_dict(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
train_spec = training.TrainSpec(input_fn=lambda: 1)
eval_spec = training.EvalSpec(input_fn=(lambda: 1),
start_delay_secs=0, throttle_secs=0)
mock_est.evaluate.return_value = 123
executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
with self.assertRaisesRegexp(TypeError, _INVALID_EVAL_RESULT_TYPE_ERR):
executor.run_evaluator()
def test_errors_out_if_evaluate_returns_dict_without_global_step(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
train_spec = training.TrainSpec(input_fn=lambda: 1)
eval_spec = training.EvalSpec(input_fn=(lambda: 1),
start_delay_secs=0, throttle_secs=0)
mock_est.evaluate.return_value = {'loss': 123}
executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
with self.assertRaisesRegexp(ValueError,
_MISSING_GLOBAL_STEP_IN_EVAL_RESULT_ERR):
executor.run_evaluator()
class TrainingExecutorRunPsTest(test.TestCase):
"""Tests run_ps of _TrainingExecutor."""
@test.mock.patch.object(server_lib, 'Server')
def test_std_server(self, mock_server):
mock_server_instance = test.mock.Mock()
mock_server.return_value = mock_server_instance
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.config = _create_run_config_with_cluster_spec(_TF_CONFIG_FOR_PS)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
executor = training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec)
executor.run_ps()
mock_server.assert_called_with(
mock_est.config.cluster_spec,
job_name=mock_est.config.task_type,
task_index=mock_est.config.task_id,
config=test.mock.ANY,
protocol=None,
start=False)
self.assertTrue(mock_server_instance.start.called)
self.assertTrue(mock_server_instance.join.called)
def test_fail_with_empty_cluster_spec(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
mock_est.config = test.mock.PropertyMock(spec=run_config_lib.RunConfig)
mock_est.config.cluster_spec = None
mock_est.config.master = 'grpc://...'
mock_est.config.task_type = 'ps'
mock_est.config.task_id = 2
with self.assertRaisesRegexp(RuntimeError,
_INVALID_CONFIG_FOR_STD_SERVER_MSG):
training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec).run_ps()
def test_fail_with_empty_master(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
mock_est.config = test.mock.PropertyMock(spec=run_config_lib.RunConfig)
mock_est.config.cluster_spec = server_lib.ClusterSpec({'ps': ['dummy']})
mock_est.config.master = ''
mock_est.config.task_type = 'ps'
mock_est.config.task_id = 2
with self.assertRaisesRegexp(RuntimeError,
_INVALID_CONFIG_FOR_STD_SERVER_MSG):
training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec).run_ps()
def test_fail_with_empty_task_type(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
mock_est.config = test.mock.PropertyMock(spec=run_config_lib.RunConfig)
mock_est.config.cluster_spec = server_lib.ClusterSpec({'ps': ['dummy']})
mock_est.config.master = 'grpc://...'
mock_est.config.task_type = ''
mock_est.config.task_id = 2
with self.assertRaisesRegexp(RuntimeError,
_INVALID_CONFIG_FOR_STD_SERVER_MSG):
training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec).run_ps()
def test_fail_with_none_task_id(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
mock_est.config = test.mock.PropertyMock(spec=run_config_lib.RunConfig)
mock_est.config.cluster_spec = server_lib.ClusterSpec({'ps': ['dummy']})
mock_est.config.master = 'grpc://...'
mock_est.config.task_type = 'ps'
mock_est.config.task_id = None
with self.assertRaisesRegexp(RuntimeError,
_INVALID_CONFIG_FOR_STD_SERVER_MSG):
training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec).run_ps()
class StopAtSecsHookTest(test.TestCase):
"""Tests StopAtSecsHook."""
@test.mock.patch.object(time, 'time')
def test_stops_after_time(self, mock_time):
mock_time.return_value = 1484695987.209386
hook = training._StopAtSecsHook(1000)
with ops.Graph().as_default():
no_op = control_flow_ops.no_op()
# some time passed before training starts
mock_time.return_value += 250
with monitored_session.MonitoredSession(hooks=[hook]) as sess:
self.assertFalse(sess.should_stop())
sess.run(no_op)
self.assertFalse(sess.should_stop())
mock_time.return_value += 500
sess.run(no_op)
self.assertFalse(sess.should_stop())
mock_time.return_value += 400
sess.run(no_op)
self.assertFalse(sess.should_stop())
mock_time.return_value += 200
sess.run(no_op)
self.assertTrue(sess.should_stop())
class TrainingExecutorRunLocalTest(test.TestCase):
"""Tests run_local of _TrainingExecutor."""
def _model_fn(self, features, labels, mode):
del labels
with ops.control_dependencies([features]):
train_op = state_ops.assign_add(training_util.get_global_step(), 1)
return model_fn_lib.EstimatorSpec(
mode,
loss=constant_op.constant(0.),
train_op=train_op,
predictions=constant_op.constant([[10.]]),
eval_metric_ops={'mean_of_features': metrics_lib.mean(features)})
def _input_fn(self, repeat=True):
ds = dataset_ops.Dataset.from_tensors([1])
if repeat:
return ds.repeat()
return ds
def unique_checkpoint_every_time_fn(self):
return 'checkpoint_path_%s/' % random.random()
def test_runs_evaluate_with_every_new_checkpoint(self):
est = estimator_lib.Estimator(
model_fn=self._model_fn,
config=run_config_lib.RunConfig(save_checkpoints_steps=10))
mock_est = test.mock.Mock(spec=estimator_lib.Estimator, wraps=est)
mock_est.times_export_was_called = 0
mock_est.times_final_export_was_called = 0
def export(estimator, export_path, checkpoint_path, eval_result,
is_the_final_export):
del export_path, checkpoint_path, eval_result
estimator.times_export_was_called += 1
# final_export is happened at the end.
self.assertEqual(0, estimator.times_final_export_was_called)
if is_the_final_export:
estimator.times_final_export_was_called += 1
exporter = test.mock.PropertyMock(spec=exporter_lib.Exporter)
exporter.name = 'see_how_many_times_export_is_called'
exporter.export = export
train_spec = training.TrainSpec(input_fn=self._input_fn, max_steps=22)
eval_spec = training.EvalSpec(
input_fn=lambda: self._input_fn(repeat=False),
throttle_secs=0,
exporters=exporter)
executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
executor.run_local()
self.assertEqual(1, mock_est.train.call_count)
self.assertEqual(3, mock_est.evaluate.call_count)
self.assertEqual(3, mock_est.times_export_was_called)
self.assertEqual(1, mock_est.times_final_export_was_called)
def test_runs_with_eval_listener_before_eval(self):
est = estimator_lib.Estimator(
model_fn=self._model_fn,
config=run_config_lib.RunConfig(save_checkpoints_steps=10))
mock_est = test.mock.Mock(spec=estimator_lib.Estimator, wraps=est)
mock_est.latest_checkpoint = self.unique_checkpoint_every_time_fn
train_spec = training.TrainSpec(input_fn=self._input_fn, max_steps=12)
eval_spec = training.EvalSpec(input_fn=lambda: self._input_fn(repeat=False))
mock_est.evaluate.side_effect = [{_GLOBAL_STEP_KEY: train_spec.max_steps}]
class _Listener(training._ContinuousEvalListener):
def __init__(self):
self.call_count = 0
def before_eval(self):
self.call_count += 1
return False # Will stop the run_local before first eval.
listener = _Listener()
executor = training._TrainingExecutor(
mock_est, train_spec, eval_spec, continuous_eval_listener=listener)
executor.run_local()
self.assertEqual(1, mock_est.train.call_count)
self.assertEqual(0, mock_est.evaluate.call_count)
def test_runs_with_eval_listener_after_eval(self):
est = estimator_lib.Estimator(
model_fn=self._model_fn,
config=run_config_lib.RunConfig(save_checkpoints_steps=10))
mock_est = test.mock.Mock(spec=estimator_lib.Estimator, wraps=est)
train_spec = training.TrainSpec(input_fn=self._input_fn, max_steps=3000)
eval_spec = training.EvalSpec(
input_fn=lambda: self._input_fn(repeat=False), throttle_secs=0)
class _Listener(training._ContinuousEvalListener):
def __init__(self):
self.call_count = 0
def after_eval(self, eval_result):
self.call_count += 1
return False # Will stop the run_local after first eval.
listener = _Listener()
executor = training._TrainingExecutor(
mock_est, train_spec, eval_spec, continuous_eval_listener=listener)
metrics, _ = executor.run_local() # pylint: disable=assignment-from-no-return
self.assertEqual(1, mock_est.train.call_count)
self.assertEqual(1, mock_est.evaluate.call_count)
self.assertEqual(1, listener.call_count)
# Should be less than max_steps since listener did early stopping.
self.assertLess(metrics[_GLOBAL_STEP_KEY], train_spec.max_steps)
def test_handles_no_new_checkpoint_found(self):
est = estimator_lib.Estimator(
model_fn=self._model_fn,
# disable saving checkpoint
config=run_config_lib.RunConfig(
save_checkpoints_steps=None, save_checkpoints_secs=None))
train_spec = training.TrainSpec(
input_fn=self._input_fn, max_steps=300, hooks=[_FakeHook()])
eval_spec = training.EvalSpec(
input_fn=lambda: self._input_fn(repeat=False),
hooks=[_FakeHook()],
throttle_secs=100)
executor = training._TrainingExecutor(est, train_spec, eval_spec)
with self.assertRaisesRegexp(ValueError,
'There should be a CheckpointSaverHook'):
executor.run_local()
def test_final_export_is_true_in_the_end(self):
est = estimator_lib.Estimator(
model_fn=self._model_fn,
config=run_config_lib.RunConfig(save_checkpoints_steps=10))
mock_est = test.mock.Mock(spec=estimator_lib.Estimator, wraps=est)
mock_est.times_export_fn_was_called = 0
mock_est.times_the_final_export_was_true = 0
def export(estimator, export_path, checkpoint_path, eval_result,
is_the_final_export):
del export_path, checkpoint_path, eval_result
estimator.times_export_fn_was_called += 1
if is_the_final_export:
estimator.times_the_final_export_was_true += 1
exporter = test.mock.PropertyMock(spec=exporter_lib.Exporter)
exporter.name = 'see_how_many_times_export_is_called'
exporter.export = export
train_spec = training.TrainSpec(
input_fn=self._input_fn, max_steps=12, hooks=[_FakeHook()])
eval_spec = training.EvalSpec(
input_fn=lambda: self._input_fn(repeat=False),
throttle_secs=0,
exporters=exporter)
executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
executor.run_local()
self.assertEqual(1, mock_est.train.call_count)
self.assertEqual(2, mock_est.evaluate.call_count)
self.assertEqual(2, mock_est.times_export_fn_was_called)
self.assertEqual(1, mock_est.times_the_final_export_was_true)
def test_train_and_evaluate_args(self):
est = estimator_lib.Estimator(model_fn=self._model_fn)
mock_est = test.mock.Mock(spec=estimator_lib.Estimator, wraps=est)
train_spec = training.TrainSpec(
input_fn=self._input_fn, max_steps=300, hooks=[_FakeHook()])
eval_spec = training.EvalSpec(
input_fn=lambda: self._input_fn(repeat=False),
steps=2,
hooks=[_FakeHook()],
name='local_eval')
executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
executor.run_local()
mock_est.evaluate.assert_called_with(
name=eval_spec.name,
input_fn=eval_spec.input_fn,
steps=eval_spec.steps,
checkpoint_path=est.latest_checkpoint(),
hooks=eval_spec.hooks)
train_args = mock_est.train.call_args[1]
self.assertEqual(list(train_spec.hooks), list(train_args['hooks']))
self.assertEqual(train_spec.input_fn, train_args['input_fn'])
self.assertEqual(train_spec.max_steps, train_args['max_steps'])
def test_train_with_no_eval_spec_fails(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
train_spec = training.TrainSpec(
input_fn=lambda: 1, max_steps=300, hooks=[_FakeHook()])
eval_spec = None
executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
with self.assertRaisesRegexp(TypeError, _INVALID_EVAL_SPEC_MSG):
executor.run_local()
def test_train_hooks(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator, model_dir='path/')
mock_est.latest_checkpoint.return_value = 'checkpoint_path/'
train_spec = training.TrainSpec(
input_fn=lambda: 1, max_steps=300, hooks=[_FakeHook()])
eval_spec = training.EvalSpec(input_fn=lambda: 1, steps=2)
mock_est.evaluate.return_value = {_GLOBAL_STEP_KEY: train_spec.max_steps}
extra_hooks = [_FakeHook()]
executor = training._TrainingExecutor(
mock_est, train_spec, eval_spec, train_hooks=extra_hooks)
executor.run_local()
train_args = mock_est.train.call_args[1]
self.assertEqual(
list(train_spec.hooks) + extra_hooks, [
h for h in train_args['hooks']
if not isinstance(h, training._StopAtSecsHook)
])
def test_that_export_is_called_with_run_local(self):
est = estimator_lib.Estimator(model_fn=self._model_fn)
mock_est = test.mock.Mock(spec=estimator_lib.Estimator, wraps=est)
train_spec = training.TrainSpec(input_fn=self._input_fn, max_steps=12)
mock_est.evaluate.return_value = {_GLOBAL_STEP_KEY: train_spec.max_steps}
def export(estimator, *args, **kwargs):
del args, kwargs
estimator.export_was_called = True
return 'path_to_export'
exporter = test.mock.PropertyMock(spec=exporter_lib.Exporter)
exporter.name = 'see_whether_export_is_called'
exporter.export = export
eval_spec = training.EvalSpec(
input_fn=lambda: self._input_fn(repeat=False),
steps=2,
start_delay_secs=0,
throttle_secs=213,
exporters=exporter)
executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
# pylint: disable=assignment-from-no-return
_, export_results = executor.run_local()
# pylint: enable=assignment-from-no-return
self.assertTrue(mock_est.export_was_called)
self.assertEqual(export_results, ['path_to_export'])
def test_errors_out_if_evaluate_returns_empty_dict(self):
est = estimator_lib.Estimator(
model_fn=self._model_fn,
config=run_config_lib.RunConfig(save_checkpoints_steps=2))
mock_est = test.mock.Mock(spec=estimator_lib.Estimator, wraps=est)
train_spec = training.TrainSpec(input_fn=self._input_fn)
eval_spec = training.EvalSpec(
input_fn=lambda: self._input_fn(repeat=False), throttle_secs=0)
mock_est.evaluate.return_value = {}
executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
with self.assertRaisesRegexp(ValueError, _INVALID_EMPTY_EVAL_RESULT_ERR):
executor.run_local()
def test_errors_out_if_evaluate_returns_non_dict(self):
est = estimator_lib.Estimator(
model_fn=self._model_fn,
config=run_config_lib.RunConfig(save_checkpoints_steps=2))
mock_est = test.mock.Mock(spec=estimator_lib.Estimator, wraps=est)
train_spec = training.TrainSpec(input_fn=self._input_fn)
eval_spec = training.EvalSpec(
input_fn=lambda: self._input_fn(repeat=False), throttle_secs=0)
mock_est.evaluate.return_value = 123
executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
with self.assertRaisesRegexp(TypeError, _INVALID_EVAL_RESULT_TYPE_ERR):
executor.run_local()
def test_errors_out_if_evaluate_returns_dict_without_global_step(self):
est = estimator_lib.Estimator(
model_fn=self._model_fn,
config=run_config_lib.RunConfig(save_checkpoints_steps=2))
mock_est = test.mock.Mock(spec=estimator_lib.Estimator, wraps=est)
train_spec = training.TrainSpec(input_fn=self._input_fn)
eval_spec = training.EvalSpec(
input_fn=lambda: self._input_fn(repeat=False), throttle_secs=0)
mock_est.evaluate.return_value = {'loss': 123}
executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
with self.assertRaisesRegexp(ValueError,
_MISSING_GLOBAL_STEP_IN_EVAL_RESULT_ERR):
executor.run_local()
def test_train_and_evaluate_return_metrics(self):
est = estimator_lib.Estimator(model_fn=self._model_fn)
mock_est = test.mock.Mock(spec=estimator_lib.Estimator, wraps=est)
train_spec = training.TrainSpec(
input_fn=self._input_fn, max_steps=12, hooks=[_FakeHook()])
eval_spec = training.EvalSpec(
input_fn=lambda: self._input_fn(repeat=False),
steps=2,
hooks=[_FakeHook()],
name='local_eval')
executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
# pylint: disable=assignment-from-no-return
metrics, _ = executor.run_local()
# pylint: enable=assignment-from-no-return
self.assertEqual(metrics['global_step'], 12)
class TrainAndEvaluateRunTest(test.TestCase):
def _test_run_task_and_executor(self, run_config):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.config = run_config
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
executor = training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec)
executor.call_task = {}
def task_fn(name):
def _fn():
executor.call_task[name] = 1
return _fn
executor.run_chief = task_fn('chief')
executor.run_master = task_fn('master')
executor.run_ps = task_fn('ps')
executor.run_evaluator = task_fn('evaluator')
executor.run_worker = task_fn('worker')
executor.run_local = task_fn('local')
return executor
def test_run_chief(self):
executor = self._test_run_task_and_executor(
run_config=_create_run_config_with_cluster_spec(_TF_CONFIG_FOR_CHIEF))
executor.run()
self.assertEqual(1, executor.call_task['chief'])
def test_run_worker(self):
executor = self._test_run_task_and_executor(
run_config=_create_run_config_with_cluster_spec(_TF_CONFIG_FOR_WORKER))
executor.run()
self.assertEqual(1, executor.call_task['worker'])
def test_run_ps(self):
executor = self._test_run_task_and_executor(
run_config=_create_run_config_with_cluster_spec(_TF_CONFIG_FOR_PS))
executor.run()
self.assertEqual(1, executor.call_task['ps'])
def test_run_evaluator(self):
executor = self._test_run_task_and_executor(
run_config=_create_run_config_with_cluster_spec(
_TF_CONFIG_FOR_EVALUATOR))
executor.run()
self.assertEqual(1, executor.call_task['evaluator'])
def test_run_local(self):
executor = self._test_run_task_and_executor(
run_config=run_config_lib.RunConfig())
executor.run()
self.assertEqual(1, executor.call_task['local'])
def test_invalid_local_task(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
'local': ['hos1:1'],
},
'task': {
'type': 'local', # invalid task type.
'index': 0
}
}
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.config = _create_run_config_with_cluster_spec(tf_config)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
executor = training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec)
with self.assertRaisesRegexp(ValueError, _INVALID_LOCAL_TASK_WITH_CLUSTER):
executor.run()
def test_unsupported_task_due_to_missing_run_task(self):
unsupported_task = 'alloc'
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
unsupported_task: ['hos1:1'],
},
'task': {
'type': unsupported_task,
'index': 0
}
}
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.config = _create_run_config_with_cluster_spec(tf_config)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
executor = training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec)
with self.assertRaisesRegexp(ValueError, _INVALID_TASK_TO_RUN):
executor.run()
def test_unsupported_task_due_to_not_callable(self):
unsupported_task = 'alloc'
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
unsupported_task: ['hos1:1'],
},
'task': {
'type': unsupported_task,
'index': 0
}
}
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.config = _create_run_config_with_cluster_spec(tf_config)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
executor = training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec)
executor.run_alloc = 123 # not callable
with self.assertRaisesRegexp(ValueError, _INVALID_TASK_TO_RUN):
executor.run()
def test_invalid_task_type(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.config = test.mock.Mock()
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
mock_est.config = test.mock.Mock()
mock_est.config.cluster_spec = server_lib.ClusterSpec({'1': ['dummy']})
mock_est.config.task_type = ''
executor = training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec)
with self.assertRaisesRegexp(ValueError, _INVALID_TASK_TYPE):
executor.run()
class TrainAndEvaluateIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _as_label(self, data_in_float):
return np.rint(data_in_float).astype(np.int64)
def _get_exporter(self, name, fc):
feature_spec = feature_column.make_parse_example_spec(fc)
serving_input_receiver_fn = (
export_lib.build_parsing_serving_input_receiver_fn(feature_spec))
return exporter_lib.LatestExporter(
name, serving_input_receiver_fn=serving_input_receiver_fn)
def _extract_loss_and_global_step(self, event_folder):
"""Returns the loss and global step in last event."""
event_paths = glob.glob(os.path.join(event_folder, 'events*'))
loss = None
global_step_count = None
for e in summary_iterator.summary_iterator(event_paths[-1]):
current_loss = None
for v in e.summary.value:
if v.tag == 'loss':
current_loss = v.simple_value
# If loss is not found, global step is meaningless.
if current_loss is None:
continue
current_global_step = e.step
if global_step_count is None or current_global_step > global_step_count:
global_step_count = current_global_step
loss = current_loss
return (loss, global_step_count)
def test_complete_flow_with_non_distributed_configuration(self):
n_classes = 3
input_dimension = 2
batch_size = 10
eval_name = 'foo'
exporter_name = 'saved_model_exporter'
# max_steps should be larger than save_summary_steps
max_steps = 10
save_summary_steps = 9
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
x_data = data.reshape(batch_size, input_dimension)
y_data = np.reshape(self._as_label(data[:batch_size]), (batch_size, 1))
# learn y = x
def train_input_fn():
return dataset_ops.Dataset.from_tensor_slices(({
'x': x_data
}, y_data)).batch(batch_size).repeat().shuffle(1000)
def eval_input_fn():
return dataset_ops.Dataset.from_tensor_slices(({
'x': x_data
}, y_data)).batch(batch_size)
def predict_input_fn():
return dataset_ops.Dataset.from_tensor_slices({
'x': x_data
}).batch(batch_size)
feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
est = dnn.DNNClassifier(
hidden_units=(2, 2),
feature_columns=feature_columns,
n_classes=n_classes,
config=run_config_lib.RunConfig(save_summary_steps=save_summary_steps),
model_dir=self._model_dir)
train_spec = training.TrainSpec(input_fn=train_input_fn,
max_steps=max_steps)
eval_spec = training.EvalSpec(
name=eval_name,
input_fn=eval_input_fn,
steps=None,
exporters=self._get_exporter(exporter_name, feature_columns),
throttle_secs=0)
training.train_and_evaluate(est, train_spec, eval_spec)
# Make sure nothing is stuck in limbo.
writer_cache.FileWriterCache.clear()
# Examine the training events. Use a range to check global step to avoid
# flakyness due to global step race condition.
training_loss, _ = self._extract_loss_and_global_step(est.model_dir)
self.assertIsNotNone(training_loss)
# Examine the eval events. The global step should be accurate.
eval_loss, eval_global_step = self._extract_loss_and_global_step(
event_folder=est.eval_dir(eval_name))
self.assertIsNotNone(eval_loss)
self.assertEqual(max_steps, eval_global_step)
# Examine the export folder.
export_dir = os.path.join(os.path.join(est.model_dir, 'export'),
exporter_name)
self.assertTrue(gfile.Exists(export_dir))
# Examine the ckpt for predict.
predicted_proba = np.array([
x[prediction_keys.PredictionKeys.PROBABILITIES]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)
if __name__ == '__main__':
test.main()
|
"""Support for ISY994 lights."""
from typing import Callable, Dict
from pyisy.constants import ISY_VALUE_UNKNOWN
from homeassistant.components.light import (
DOMAIN as LIGHT,
SUPPORT_BRIGHTNESS,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.typing import HomeAssistantType
from .const import (
_LOGGER,
CONF_RESTORE_LIGHT_STATE,
DOMAIN as ISY994_DOMAIN,
ISY994_NODES,
)
from .entity import ISYNodeEntity
from .helpers import migrate_old_unique_ids
from .services import async_setup_device_services, async_setup_light_services
ATTR_LAST_BRIGHTNESS = "last_brightness"
async def async_setup_entry(
hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities: Callable[[list], None],
) -> bool:
"""Set up the ISY994 light platform."""
hass_isy_data = hass.data[ISY994_DOMAIN][entry.entry_id]
isy_options = entry.options
restore_light_state = isy_options.get(CONF_RESTORE_LIGHT_STATE, False)
devices = []
for node in hass_isy_data[ISY994_NODES][LIGHT]:
devices.append(ISYLightEntity(node, restore_light_state))
await migrate_old_unique_ids(hass, LIGHT, devices)
async_add_entities(devices)
async_setup_device_services(hass)
async_setup_light_services(hass)
class ISYLightEntity(ISYNodeEntity, LightEntity, RestoreEntity):
"""Representation of an ISY994 light device."""
def __init__(self, node, restore_light_state) -> None:
"""Initialize the ISY994 light device."""
super().__init__(node)
self._last_brightness = None
self._restore_light_state = restore_light_state
@property
def is_on(self) -> bool:
"""Get whether the ISY994 light is on."""
if self._node.status == ISY_VALUE_UNKNOWN:
return False
return int(self._node.status) != 0
@property
def brightness(self) -> float:
"""Get the brightness of the ISY994 light."""
if self._node.status == ISY_VALUE_UNKNOWN:
return None
return int(self._node.status)
def turn_off(self, **kwargs) -> None:
"""Send the turn off command to the ISY994 light device."""
self._last_brightness = self.brightness
if not self._node.turn_off():
_LOGGER.debug("Unable to turn off light")
def on_update(self, event: object) -> None:
"""Save brightness in the update event from the ISY994 Node."""
if self._node.status not in (0, ISY_VALUE_UNKNOWN):
self._last_brightness = self._node.status
super().on_update(event)
# pylint: disable=arguments-differ
def turn_on(self, brightness=None, **kwargs) -> None:
"""Send the turn on command to the ISY994 light device."""
if self._restore_light_state and brightness is None and self._last_brightness:
brightness = self._last_brightness
if not self._node.turn_on(val=brightness):
_LOGGER.debug("Unable to turn on light")
@property
def device_state_attributes(self) -> Dict:
"""Return the light attributes."""
attribs = super().device_state_attributes
attribs[ATTR_LAST_BRIGHTNESS] = self._last_brightness
return attribs
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
async def async_added_to_hass(self) -> None:
"""Restore last_brightness on restart."""
await super().async_added_to_hass()
self._last_brightness = self.brightness or 255
last_state = await self.async_get_last_state()
if not last_state:
return
if (
ATTR_LAST_BRIGHTNESS in last_state.attributes
and last_state.attributes[ATTR_LAST_BRIGHTNESS]
):
self._last_brightness = last_state.attributes[ATTR_LAST_BRIGHTNESS]
def set_on_level(self, value):
"""Set the ON Level for a device."""
self._node.set_on_level(value)
def set_ramp_rate(self, value):
"""Set the Ramp Rate for a device."""
self._node.set_ramp_rate(value)
|
import sys
from eventlet import event
from eventlet import greenthread
from keystone.openstack.common.gettextutils import _ # noqa
from keystone.openstack.common import log as logging
from keystone.openstack.common import timeutils
LOG = logging.getLogger(__name__)
class LoopingCallDone(Exception):
"""Exception to break out and stop a LoopingCall.
The poll-function passed to LoopingCall can raise this exception to
break out of the loop normally. This is somewhat analogous to
StopIteration.
An optional return-value can be included as the argument to the exception;
this return-value will be returned by LoopingCall.wait()
"""
def __init__(self, retvalue=True):
""":param retvalue: Value that LoopingCall.wait() should return."""
self.retvalue = retvalue
class LoopingCallBase(object):
def __init__(self, f=None, *args, **kw):
self.args = args
self.kw = kw
self.f = f
self._running = False
self.done = None
def stop(self):
self._running = False
def wait(self):
return self.done.wait()
class FixedIntervalLoopingCall(LoopingCallBase):
"""A fixed interval looping call."""
def start(self, interval, initial_delay=None):
self._running = True
done = event.Event()
def _inner():
if initial_delay:
greenthread.sleep(initial_delay)
try:
while self._running:
start = timeutils.utcnow()
self.f(*self.args, **self.kw)
end = timeutils.utcnow()
if not self._running:
break
delay = interval - timeutils.delta_seconds(start, end)
if delay <= 0:
LOG.warn(_('task run outlasted interval by %s sec') %
-delay)
greenthread.sleep(delay if delay > 0 else 0)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_('in fixed duration looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn_n(_inner)
return self.done
LoopingCall = FixedIntervalLoopingCall
class DynamicLoopingCall(LoopingCallBase):
"""A looping call which sleeps until the next known event.
The function called should return how long to sleep for before being
called again.
"""
def start(self, initial_delay=None, periodic_interval_max=None):
self._running = True
done = event.Event()
def _inner():
if initial_delay:
greenthread.sleep(initial_delay)
try:
while self._running:
idle = self.f(*self.args, **self.kw)
if not self._running:
break
if periodic_interval_max is not None:
idle = min(idle, periodic_interval_max)
LOG.debug(_('Dynamic looping call sleeping for %.02f '
'seconds'), idle)
greenthread.sleep(idle)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_('in dynamic looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn(_inner)
return self.done
|
import mock
from nose.tools import eq_, ok_, assert_raises
from funfactory.urlresolvers import reverse
from .base import ManageTestCase
class TestErrorTrigger(ManageTestCase):
def test_trigger_error(self):
url = reverse('manage:error_trigger')
response = self.client.get(url)
assert self.user.is_superuser
eq_(response.status_code, 200)
# sans a message
response = self.client.post(url, {'message': ''})
eq_(response.status_code, 200)
ok_('This field is required' in response.content)
assert_raises(
NameError,
self.client.post,
url,
{'message': 'Some Message'}
)
@mock.patch('airmozilla.manage.views.errors.Client')
def test_trigger_error_with_raven(self, mocked_client):
url = reverse('manage:error_trigger')
assert self.user.is_superuser
raven_config = {
'dsn': 'fake123'
}
with self.settings(RAVEN_CONFIG=raven_config):
response = self.client.post(url, {
'message': 'Some Message',
'capture_with_raven': True
})
eq_(response.status_code, 302)
mocked_client().captureException.assert_called_with()
|
from django.http import HttpResponseNotAllowed, HttpResponseServerError
from django.utils import simplejson as json
from util import to_json_response
from util import to_dojo_data
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.3, 2.4 fallback.
def expect_post_request(func):
"""Allow only POST requests to come in, throw an exception otherwise.
This relieves from checking every time that the request is
really a POST request, which it should be when using this
decorator.
"""
def _ret(*args, **kwargs):
ret = func(*args, **kwargs)
request = args[0]
if not request.method=='POST':
return HttpResponseNotAllowed(['POST'])
return ret
return _ret
def add_request_getdict(func):
"""Add the method getdict() to the request object.
This works just like getlist() only that it decodes any nested
JSON encoded object structure.
Since sending deep nested structures is not possible via
GET/POST by default, this enables it. Of course you need to
make sure that on the JavaScript side you are also sending
the data properly, which dojango.send() automatically does.
Example:
this is being sent:
one:1
two:{"three":3, "four":4}
using
request.POST.getdict('two')
returns a dict containing the values sent by the JavaScript.
"""
def _ret(*args, **kwargs):
args[0].POST.__class__.getdict = __getdict
ret = func(*args, **kwargs)
return ret
return _ret
def __getdict(self, key):
ret = self.get(key)
try:
ret = json.loads(ret)
except ValueError: # The value was not JSON encoded :-)
raise Exception('"%s" was not JSON encoded as expected (%s).' % (key, str(ret)))
return ret
def json_response(func):
"""
A simple json response decorator. Use it on views, where a python data object should be converted
to a json response:
@json_response
def my_view(request):
my_data = {'foo': 'bar'}
return my_data
"""
def inner(request, *args, **kwargs):
ret = func(request, *args, **kwargs)
return __prepare_json_ret(request, ret)
return wraps(func)(inner)
def jsonp_response_custom(callback_param_name):
"""
A jsonp (JSON with Padding) response decorator, where you can define your own callbackParamName.
It acts like the json_response decorator but with the difference, that it
wraps the returned json string into a client-specified function name (that is the Padding).
You can add this decorator to a function like that:
@jsonp_response_custom("my_callback_param")
def my_view(request):
my_data = {'foo': 'bar'}
return my_data
Your now can access this view from a foreign URL using JSONP.
An example with Dojo looks like that:
dojo.io.script.get({ url:"http://example.com/my_url/",
callbackParamName:"my_callback_param",
load: function(response){
console.log(response);
}
});
Note: the callback_param_name in the decorator and in your JavaScript JSONP call must be the same.
"""
def decorator(func):
def inner(request, *args, **kwargs):
ret = func(request, *args, **kwargs)
return __prepare_json_ret(request, ret, callback_param_name=callback_param_name)
return wraps(func)(inner)
return decorator
jsonp_response = jsonp_response_custom("jsonp_callback")
jsonp_response.__doc__ = "A predefined jsonp response decorator using 'jsoncallback' as a fixed callback_param_name."
def json_iframe_response(func):
"""
A simple json response decorator but wrapping the json response into a html page.
It helps when doing a json request using an iframe (e.g. file up-/download):
@json_iframe
def my_view(request):
my_data = {'foo': 'bar'}
return my_data
"""
def inner(request, *args, **kwargs):
ret = func(request, *args, **kwargs)
return __prepare_json_ret(request, ret, use_iframe=True)
return wraps(func)(inner)
def __prepare_json_ret(request, ret, callback_param_name=None, use_iframe=False):
if ret==False:
ret = {'success':False}
elif ret==None: # Sometimes there is no return.
ret = {}
# Add the 'ret'=True, since it was obviously no set yet and we got valid data, no exception.
func_name = None
if callback_param_name:
func_name = request.GET.get(callback_param_name, "callbackParamName")
try:
if not ret.has_key('success'):
ret['success'] = True
except AttributeError, e:
raise Exception("The returned data of your function must be a dictionary!")
json_ret = ""
try:
# Sometimes the serialization fails, i.e. when there are too deeply nested objects or even classes inside
json_ret = to_json_response(ret, func_name, use_iframe)
except Exception, e:
print '\n\n===============Exception=============\n\n'+str(e)+'\n\n'
print ret
print '\n\n'
return HttpResponseServerError(content=str(e))
return json_ret
|
from __future__ import print_function
import inspect
import numpy as np
import theano
from ..layers.advanced_activations import LeakyReLU, PReLU
from ..layers.core import Dense, Merge, Dropout, Activation, Reshape, Flatten, RepeatVector, Layer
from ..layers.core import ActivityRegularization, TimeDistributedDense, AutoEncoder, MaxoutDense
from ..layers.embeddings import Embedding, WordContextProduct
from ..layers.noise import GaussianNoise, GaussianDropout
from ..layers.normalization import BatchNormalization
from ..layers.recurrent import SimpleRNN, SimpleDeepRNN, GRU, LSTM, JZS1, JZS2, JZS3
from ..layers import containers
from .. import regularizers
from .. import constraints
def container_from_config(layer_dict):
name = layer_dict.get('name')
hasParams = False
if name == 'Merge':
mode = layer_dict.get('mode')
layers = layer_dict.get('layers')
layer_list = []
for layer in layers:
init_layer = container_from_config(layer)
layer_list.append(init_layer)
merge_layer = Merge(layer_list, mode)
return merge_layer
elif name == 'Sequential':
layers = layer_dict.get('layers')
layer_list = []
for layer in layers:
init_layer = container_from_config(layer)
layer_list.append(init_layer)
seq_layer = containers.Sequential(layer_list)
return seq_layer
elif name == 'Graph':
graph_layer = containers.Graph()
inputs = layer_dict.get('input_config')
for input in inputs:
graph_layer.add_input(**input)
nodes = layer_dict.get('node_config')
for node in nodes:
layer = container_from_config(layer_dict['nodes'].get(node['name']))
node['layer'] = layer
graph_layer.add_node(**node)
outputs = layer_dict.get('output_config')
for output in outputs:
graph_layer.add_output(**output)
return graph_layer
else:
# The case in which layer_dict represents an "atomic" layer
layer_dict.pop('name')
if 'parameters' in layer_dict:
params = layer_dict.get('parameters')
layer_dict.pop('parameters')
hasParams = True
for k, v in layer_dict.items():
# For now, this can only happen for regularizers and constraints
if isinstance(v, dict):
vname = v.get('name')
v.pop('name')
if vname in [x for x, y in inspect.getmembers(constraints, predicate=inspect.isclass)]:
layer_dict[k] = constraints.get(vname, v)
if vname in [x for x, y in inspect.getmembers(regularizers, predicate=inspect.isclass)]:
layer_dict[k] = regularizers.get(vname, v)
base_layer = get_layer(name, layer_dict)
if hasParams:
shaped_params = []
for param in params:
data = np.asarray(param.get('data'))
shape = tuple(param.get('shape'))
shaped_params.append(data.reshape(shape))
base_layer.set_weights(shaped_params)
return base_layer
def print_layer_shapes(model, input_shapes):
"""
Utility function to print the shape of the output at each layer of a Model
Arguments:
model: instance of Model / Merge
input_shapes: dict (Graph), list of tuples (Merge) or tuple (Sequential)
"""
if model.__class__.__name__ in ['Sequential', 'Merge']:
# in this case input_shapes is a tuple, or a list [shape1, shape2]
if not isinstance(input_shapes[0], tuple):
input_shapes = [input_shapes]
inputs = model.get_input(train=False)
if not isinstance(inputs, list):
inputs = [inputs]
input_dummy = [np.zeros(shape, dtype=np.float32)
for shape in input_shapes]
layers = model.layers
elif model.__class__.__name__ == 'Graph':
# in this case input_shapes is a dictionary
inputs = [model.inputs[name].input
for name in model.input_order]
input_dummy = [np.zeros(input_shapes[name], dtype=np.float32)
for name in model.input_order]
layers = [model.nodes[c['name']] for c in model.node_config]
print("input shapes : ", input_shapes)
for l in layers:
shape_f = theano.function(inputs, l.get_output(train=False).shape,
on_unused_input='ignore')
out_shape = tuple(shape_f(*input_dummy))
config = l.get_config()
print('shape after %s: %s' % (config['name'], out_shape))
from .generic_utils import get_from_module
def get_layer(identifier, kwargs=None):
return get_from_module(identifier, globals(), 'layer', instantiate=True, kwargs=kwargs)
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilentE3600A import *
from .. import scpi
class agilentE3634A(agilentE3600A, scpi.dcpwr.OCP):
"Agilent E3634A IVI DC power supply driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'E3634A')
super(agilentE3634A, self).__init__(*args, **kwargs)
self._output_count = 1
self._output_spec = [
{
'range': {
'P25V': (25.75, 7.21),
'P50V': (51.5, 4.12)
},
'ovp_max': 55.0,
'ocp_max': 7.5,
'voltage_max': 25.75,
'current_max': 7.21
}
]
self._memory_size = 3
self._init_outputs()
|
import base64
from selenium.webdriver.remote.command import Command
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from selenium.common.exceptions import WebDriverException
from .service import Service
from .options import Options
class WebDriver(RemoteWebDriver):
"""
Controls the ChromeDriver and allows you to drive the browser.
You will need to download the ChromeDriver executable from
http://chromedriver.storage.googleapis.com/index.html
"""
def __init__(self, executable_path="chromedriver", port=0,
chrome_options=None, service_args=None,
desired_capabilities=None, service_log_path=None):
"""
Creates a new instance of the chrome driver.
Starts the service and then creates new instance of chrome driver.
:Args:
- executable_path - path to the executable. If the default is used it assumes the executable is in the $PATH
- port - port you would like the service to run, if left as 0, a free port will be found.
- desired_capabilities: Dictionary object with non-browser specific
capabilities only, such as "proxy" or "loggingPref".
- chrome_options: this takes an instance of ChromeOptions
"""
if chrome_options is None:
# desired_capabilities stays as passed in
if desired_capabilities is None:
desired_capabilities = Options().to_capabilities()
else:
if desired_capabilities is None:
desired_capabilities = chrome_options.to_capabilities()
else:
desired_capabilities.update(chrome_options.to_capabilities())
self.service = Service(executable_path, port=port,
service_args=service_args, log_path=service_log_path)
self.service.start()
try:
RemoteWebDriver.__init__(self,
command_executor=self.service.service_url,
desired_capabilities=desired_capabilities,
keep_alive=True)
except:
self.quit()
raise
self._is_remote = False
def quit(self):
"""
Closes the browser and shuts down the ChromeDriver executable
that is started when starting the ChromeDriver
"""
try:
RemoteWebDriver.quit(self)
except:
# We don't care about the message because something probably has gone wrong
pass
finally:
self.service.stop()
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from past.builtins import basestring
import logging
from flexget import plugin
from flexget.event import event
from flexget.utils.log import log_once
try:
from flexget.plugins.internal.api_rottentomatoes import lookup_movie, API_KEY
except ImportError:
raise plugin.DependencyError(issued_by='rottentomatoes_lookup', missing='api_rottentomatoes',
message='rottentomatoes_lookup requires the `api_rottentomatoes` plugin')
log = logging.getLogger('rottentomatoes_lookup')
def get_rt_url(movie):
for link in movie.links:
if link.name == 'alternate':
return link.url
class PluginRottenTomatoesLookup(object):
"""
Retrieves Rotten Tomatoes information for entries.
Example::
rottentomatoes_lookup: yes
"""
field_map = {
'rt_name': 'title',
'rt_id': 'id',
'rt_year': 'year',
'rt_genres': lambda movie: [genre.name for genre in movie.genres],
'rt_mpaa_rating': 'mpaa_rating',
'rt_runtime': 'runtime',
'rt_critics_consensus': 'critics_consensus',
'rt_releases': lambda movie: dict((release.name, release.date) for
release in movie.release_dates),
'rt_critics_rating': 'critics_rating',
'rt_critics_score': 'critics_score',
'rt_audience_rating': 'audience_rating',
'rt_audience_score': 'audience_score',
'rt_average_score': lambda movie: (movie.critics_score + movie.audience_score) / 2,
'rt_synopsis': 'synopsis',
'rt_posters': lambda movie: dict((poster.name, poster.url) for poster in movie.posters),
'rt_actors': lambda movie: [actor.name for actor in movie.cast],
'rt_directors': lambda movie: [director.name for director in movie.directors],
'rt_studio': 'studio',
'rt_alternate_ids': lambda movie: dict((alt_id.name, alt_id.id)
for alt_id in movie.alternate_ids),
'rt_url': get_rt_url,
# Generic fields filled by all movie lookup plugins:
'movie_name': 'title',
'movie_year': 'year'}
schema = {'oneOf': [
{'type': 'boolean'},
{'type': 'string', 'description': 'provide a custom api key'}
]}
def __init__(self):
self.key = None
def lazy_loader(self, entry):
"""Does the lookup for this entry and populates the entry fields.
:param entry: entry to perform lookup on
:param field: the field to be populated (others may be populated as well)
:returns: the field value
"""
try:
self.lookup(entry, key=self.key)
except plugin.PluginError as e:
log_once(e.value.capitalize(), logger=log)
def lookup(self, entry, search_allowed=True, key=None):
"""
Perform Rotten Tomatoes lookup for entry.
:param entry: Entry instance
:param search_allowed: Allow fallback to search
:param key: optionally specify an API key to use
:raises PluginError: Failure reason
"""
if not key:
key = self.key or API_KEY
movie = lookup_movie(smart_match=entry['title'],
rottentomatoes_id=entry.get('rt_id', eval_lazy=False),
only_cached=(not search_allowed),
api_key=key
)
log.debug(u'Got movie: %s' % movie)
entry.update_using_map(self.field_map, movie)
if not entry.get('imdb_id', eval_lazy=False):
for alt_id in movie.alternate_ids:
if alt_id.name == 'imdb':
entry['imdb_id'] = 'tt' + alt_id.id
break
def on_task_metainfo(self, task, config):
if not config:
return
if isinstance(config, basestring):
self.key = config.lower()
else:
self.key = None
for entry in task.entries:
entry.register_lazy_func(self.lazy_loader, self.field_map)
@property
def movie_identifier(self):
"""Returns the plugin main identifier type"""
return 'rt_id'
@event('plugin.register')
def register_plugin():
plugin.register(PluginRottenTomatoesLookup, 'rottentomatoes_lookup', api_ver=2, interfaces=['task', 'movie_metainfo'])
|
"""
Test of the omero import control.
Copyright 2009 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import pytest
from path import path
import omero.clients
import uuid
from omero.cli import CLI, NonZeroReturnCode
plugin = __import__('omero.plugins.import', globals(), locals(),
['ImportControl'], -1)
ImportControl = plugin.ImportControl
help_arguments = ("-h", "--javahelp", "--java-help", "--advanced-help")
class MockClient(omero.clients.BaseClient):
def setSessionId(self, uuid):
self._uuid = uuid
def getSessionId(self):
return self._uuid
class TestImport(object):
def setup_method(self, method):
self.cli = CLI()
self.cli.register("import", ImportControl, "TEST")
self.args = ["import"]
def add_client_dir(self):
dist_dir = path(__file__) / ".." / ".." / ".." / ".." / ".." / ".." /\
".." / "dist" # FIXME: should not be hard-coded
dist_dir = dist_dir.abspath()
client_dir = dist_dir / "lib" / "client"
self.args += ["--clientdir", client_dir]
def mkdir(self, parent, name, with_ds_store=False):
child = parent / name
child.mkdir()
if with_ds_store:
ds_store = child / ".DS_STORE"
ds_store.write("")
return child
def mkfakescreen(self, screen_dir, nplates=2, nruns=2, nwells=2,
nfields=4, with_ds_store=False):
fieldfiles = []
for iplate in range(nplates):
plate_dir = self.mkdir(
screen_dir, "Plate00%s" % str(iplate),
with_ds_store=with_ds_store)
for irun in range(nruns):
run_dir = self.mkdir(
plate_dir, "Run00%s" % str(irun),
with_ds_store=with_ds_store)
for iwell in range(nwells):
well_dir = self.mkdir(
run_dir, "WellA00%s" % str(iwell),
with_ds_store=with_ds_store)
for ifield in range(nfields):
fieldfile = (well_dir / ("Field00%s.fake" %
str(ifield)))
fieldfile.write('')
fieldfiles.append(fieldfile)
return fieldfiles
def mkfakepattern(self, tmpdir, nangles=7, ntimepoints=10):
spim_dir = tmpdir.join("SPIM")
spim_dir.mkdir()
tiffiles = []
for angle in range(1, nangles + 1):
for timepoint in range(1, ntimepoints + 1):
tiffile = (spim_dir / ("spim_TL%s_Angle%s.fake" %
(str(timepoint), str(angle))))
tiffile.write('')
print str(tiffile)
tiffiles.append(tiffile)
patternfile = spim_dir / "spim.pattern"
patternfile.write("spim_TL<1-%s>_Angle<1-%s>.fake"
% (str(ntimepoints), str(nangles)))
assert len(tiffiles) == nangles * ntimepoints
return patternfile, tiffiles
def testDropBoxArgs(self):
class MockImportControl(ImportControl):
def importer(this, args):
assert args.server == "localhost"
assert args.port == "4064"
assert args.key == "b0742975-03a1-4f6d-b0ac-639943f1a147"
assert args.errs == "/tmp/dropbox.err"
assert args.file == "/tmp/dropbox.out"
self.cli.register("mock-import", MockImportControl, "HELP")
self.args = ['-s', 'localhost', '-p', '4064', '-k',
'b0742975-03a1-4f6d-b0ac-639943f1a147']
self.args += ['mock-import', '---errs=/tmp/dropbox.err']
self.args += ['---file=/tmp/dropbox.out']
self.args += ['--', '/OMERO/DropBox/root/tinyTest.d3d.dv']
self.cli.invoke(self.args)
@pytest.mark.parametrize('help_argument', help_arguments)
def testHelp(self, help_argument):
"""Test help arguments"""
self.args += [help_argument]
self.cli.invoke(self.args)
@pytest.mark.parametrize('clientdir_exists', [True, False])
def testImportNoClientDirFails(self, tmpdir, clientdir_exists):
"""Test fake screen import"""
fakefile = tmpdir.join("test.fake")
fakefile.write('')
if clientdir_exists:
self.args += ["--clientdir", str(tmpdir)]
self.args += [str(fakefile)]
with pytest.raises(NonZeroReturnCode):
self.cli.invoke(self.args, strict=True)
@pytest.mark.parametrize("data", (("1", False), ("3", True)))
def testImportDepth(self, tmpdir, capfd, data):
"""Test import using depth argument"""
dir1 = tmpdir.join("a")
dir1.mkdir()
dir2 = dir1 / "b"
dir2.mkdir()
fakefile = dir2 / "test.fake"
fakefile.write('')
self.add_client_dir()
self.args += ["-f", "--debug=ERROR"]
self.args += [str(dir1)]
depth, result = data
self.cli.invoke(self.args + ["--depth=%s" % depth], strict=True)
o, e = capfd.readouterr()
if result:
assert str(fakefile) in str(o)
else:
assert str(fakefile) not in str(o)
def testImportFakeImage(self, tmpdir, capfd):
"""Test fake image import"""
fakefile = tmpdir.join("test.fake")
fakefile.write('')
self.add_client_dir()
self.args += ["-f", "--debug=ERROR"]
self.args += [str(fakefile)]
self.cli.invoke(self.args, strict=True)
o, e = capfd.readouterr()
outputlines = str(o).split('\n')
reader = 'loci.formats.in.FakeReader'
assert outputlines[-2] == str(fakefile)
assert outputlines[-3] == \
"# Group: %s SPW: false Reader: %s" % (str(fakefile), reader)
@pytest.mark.parametrize('with_ds_store', (True, False))
def testImportFakeScreen(self, tmpdir, capfd, with_ds_store):
"""Test fake screen import"""
screen_dir = tmpdir.join("screen.fake")
screen_dir.mkdir()
fieldfiles = self.mkfakescreen(
screen_dir, with_ds_store=with_ds_store)
self.add_client_dir()
self.args += ["-f", "--debug=ERROR"]
self.args += [str(fieldfiles[0])]
self.cli.invoke(self.args, strict=True)
o, e = capfd.readouterr()
outputlines = str(o).split('\n')
reader = 'loci.formats.in.FakeReader'
assert outputlines[-len(fieldfiles)-2] == \
"# Group: %s SPW: true Reader: %s" % (str(fieldfiles[0]), reader)
for i in range(len(fieldfiles)):
assert outputlines[-1-len(fieldfiles)+i] == str(fieldfiles[i])
def testImportPattern(self, tmpdir, capfd):
"""Test pattern import"""
patternfile, tiffiles = self.mkfakepattern(tmpdir)
self.add_client_dir()
self.args += ["-f", "--debug=ERROR"]
self.args += [str(patternfile)]
self.cli.invoke(self.args, strict=True)
o, e = capfd.readouterr()
outputlines = str(o).split('\n')
reader = 'loci.formats.in.FilePatternReader'
print o
assert outputlines[-len(tiffiles)-3] == \
"# Group: %s SPW: false Reader: %s" % (str(patternfile), reader)
assert outputlines[-len(tiffiles)-2] == str(patternfile)
for i in range(len(tiffiles)):
assert outputlines[-1-len(tiffiles)+i] == str(tiffiles[i])
@pytest.mark.parametrize('hostname', ['localhost', 'servername'])
@pytest.mark.parametrize('port', [None, 4064, 14064])
def testLoginArguments(self, monkeypatch, hostname, port, tmpdir):
self.args += ['test.fake']
control = self.cli.controls['import']
control.command_args = []
sessionid = str(uuid.uuid4())
def new_client(x):
if port:
c = MockClient(hostname, port)
else:
c = MockClient(hostname)
c.setSessionId(sessionid)
return c
monkeypatch.setattr(self.cli, 'conn', new_client)
ice_config = tmpdir / 'ice.config'
ice_config.write('omero.host=%s\nomero.port=%g' % (
hostname, (port or 4064)))
monkeypatch.setenv("ICE_CONFIG", ice_config)
control.set_login_arguments(self.cli.parser.parse_args(self.args))
expected_args = ['-s', '%s' % hostname]
expected_args += ['-p', '%s' % (port or 4064)]
expected_args += ['-k', '%s' % sessionid]
assert control.command_args == expected_args
|
import os,sys
import traceback
def enumToString(constants, enum, elem):
all = constants.all_values(enum)
for e in all.keys():
if str(elem) == str(all[e]):
return e
return "<unknown>"
def main(argv):
from vboxapi import VirtualBoxManager
# This is a VirtualBox COM/XPCOM API client, no data needed.
wrapper = VirtualBoxManager(None, None)
# Get the VirtualBox manager
mgr = wrapper.mgr
# Get the global VirtualBox object
vbox = wrapper.vbox
print "Running VirtualBox version %s" %(vbox.version)
# Get all constants through the Python wrapper code
vboxConstants = wrapper.constants
# Enumerate all defined machines
for mach in wrapper.getArray(vbox, 'machines'):
try:
# Be prepared for failures - the VM can be inaccessible
vmname = '<inaccessible>'
try:
vmname = mach.name
except Exception, e:
None
vmid = '';
try:
vmid = mach.id
except Exception, e:
None
# Print some basic VM information even if there were errors
print "Machine name: %s [%s]" %(vmname,vmid)
if vmname == '<inaccessible>' or vmid == '':
continue
# Print some basic VM information
print " State: %s" %(enumToString(vboxConstants, "MachineState", mach.state))
print " Session state: %s" %(enumToString(vboxConstants, "SessionState", mach.sessionState))
# Do some stuff which requires a running VM
if mach.state == vboxConstants.MachineState_Running:
# Get the session object
session = mgr.getSessionObject(vbox)
# Lock the current machine (shared mode, since we won't modify the machine)
mach.lockMachine(session, vboxConstants.LockType_Shared)
# Acquire the VM's console and guest object
console = session.console
guest = console.guest
# Retrieve the current Guest Additions runlevel and print
# the installed Guest Additions version
addRunLevel = guest.additionsRunLevel
print " Additions State: %s" %(enumToString(vboxConstants, "AdditionsRunLevelType", addRunLevel))
if addRunLevel != vboxConstants.AdditionsRunLevelType_None:
print " Additions Ver: %s" %(guest.additionsVersion)
# Get the VM's display object
display = console.display
# Get the VM's current display resolution + bit depth + position
screenNum = 0 # From first screen
(screenW, screenH, screenBPP, screenX, screenY, _) = display.getScreenResolution(screenNum)
print " Display (%d): %dx%d, %d BPP at %d,%d" %(screenNum, screenW, screenH, screenBPP, screenX, screenY)
# We're done -- don't forget to unlock the machine!
session.unlockMachine()
except Exception, e:
print "Errror [%s]: %s" %(mach.name, str(e))
traceback.print_exc()
# Call destructor and delete wrapper
del wrapper
if __name__ == '__main__':
main(sys.argv)
|
'''
Created on 18/2/2015
@author: PC06
'''
from flaskext.mysql import MySQL
from flask import Flask
class DBcon():
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
pass
def conexion(self):
mysql = MySQL()
app = Flask(__name__)
app.config['MYSQL_DATABASE_USER'] = 'python'
app.config['MYSQL_DATABASE_PASSWORD'] = '123456'
app.config['MYSQL_DATABASE_DB'] = 'ventas'
app.config['MYSQL_DATABASE_HOST'] = 'localhost'
mysql.init_app(app)
return mysql
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import pwd
import random
import re
import string
import sys
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import shlex_quote
from ansible.module_utils._text import to_bytes
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.base import Base
from ansible.plugins import get_plugin_class
from ansible.utils.ssh_functions import check_for_controlpersist
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['PlayContext']
MAGIC_VARIABLE_MAPPING = dict(
# base
connection=('ansible_connection', ),
module_compression=('ansible_module_compression', ),
shell=('ansible_shell_type', ),
executable=('ansible_shell_executable', ),
remote_tmp_dir=('ansible_remote_tmp', ),
# connection common
remote_addr=('ansible_ssh_host', 'ansible_host'),
remote_user=('ansible_ssh_user', 'ansible_user'),
password=('ansible_ssh_pass', 'ansible_password'),
port=('ansible_ssh_port', 'ansible_port'),
pipelining=('ansible_ssh_pipelining', 'ansible_pipelining'),
timeout=('ansible_ssh_timeout', 'ansible_timeout'),
private_key_file=('ansible_ssh_private_key_file', 'ansible_private_key_file'),
# networking modules
network_os=('ansible_network_os', ),
connection_user=('ansible_connection_user',),
# ssh TODO: remove
ssh_executable=('ansible_ssh_executable', ),
ssh_common_args=('ansible_ssh_common_args', ),
sftp_extra_args=('ansible_sftp_extra_args', ),
scp_extra_args=('ansible_scp_extra_args', ),
ssh_extra_args=('ansible_ssh_extra_args', ),
ssh_transfer_method=('ansible_ssh_transfer_method', ),
# docker TODO: remove
docker_extra_args=('ansible_docker_extra_args', ),
# become
become=('ansible_become', ),
become_method=('ansible_become_method', ),
become_user=('ansible_become_user', ),
become_pass=('ansible_become_password', 'ansible_become_pass'),
become_exe=('ansible_become_exe', ),
become_flags=('ansible_become_flags', ),
# deprecated
sudo=('ansible_sudo', ),
sudo_user=('ansible_sudo_user', ),
sudo_pass=('ansible_sudo_password', 'ansible_sudo_pass'),
sudo_exe=('ansible_sudo_exe', ),
sudo_flags=('ansible_sudo_flags', ),
su=('ansible_su', ),
su_user=('ansible_su_user', ),
su_pass=('ansible_su_password', 'ansible_su_pass'),
su_exe=('ansible_su_exe', ),
su_flags=('ansible_su_flags', ),
)
b_SU_PROMPT_LOCALIZATIONS = [
to_bytes('Password'),
to_bytes('암호'),
to_bytes('パスワード'),
to_bytes('Adgangskode'),
to_bytes('Contraseña'),
to_bytes('Contrasenya'),
to_bytes('Hasło'),
to_bytes('Heslo'),
to_bytes('Jelszó'),
to_bytes('Lösenord'),
to_bytes('Mật khẩu'),
to_bytes('Mot de passe'),
to_bytes('Parola'),
to_bytes('Parool'),
to_bytes('Pasahitza'),
to_bytes('Passord'),
to_bytes('Passwort'),
to_bytes('Salasana'),
to_bytes('Sandi'),
to_bytes('Senha'),
to_bytes('Wachtwoord'),
to_bytes('ססמה'),
to_bytes('Лозинка'),
to_bytes('Парола'),
to_bytes('Пароль'),
to_bytes('गुप्तशब्द'),
to_bytes('शब्दकूट'),
to_bytes('సంకేతపదము'),
to_bytes('හස්පදය'),
to_bytes('密码'),
to_bytes('密碼'),
to_bytes('口令'),
]
TASK_ATTRIBUTE_OVERRIDES = (
'become',
'become_user',
'become_pass',
'become_method',
'become_flags',
'connection',
'docker_extra_args', # TODO: remove
'delegate_to',
'no_log',
'remote_user',
)
RESET_VARS = (
'ansible_connection',
'ansible_user',
'ansible_host',
'ansible_port',
# TODO: ???
'ansible_docker_extra_args',
'ansible_ssh_host',
'ansible_ssh_pass',
'ansible_ssh_port',
'ansible_ssh_user',
'ansible_ssh_private_key_file',
'ansible_ssh_pipelining',
'ansible_ssh_executable',
)
OPTION_FLAGS = ('connection', 'remote_user', 'private_key_file', 'verbosity', 'force_handlers', 'step', 'start_at_task', 'diff',
'ssh_common_args', 'docker_extra_args', 'sftp_extra_args', 'scp_extra_args', 'ssh_extra_args')
class PlayContext(Base):
'''
This class is used to consolidate the connection information for
hosts in a play and child tasks, where the task may override some
connection/authentication information.
'''
# base
_module_compression = FieldAttribute(isa='string', default=C.DEFAULT_MODULE_COMPRESSION)
_shell = FieldAttribute(isa='string')
_executable = FieldAttribute(isa='string', default=C.DEFAULT_EXECUTABLE)
# connection fields, some are inherited from Base:
# (connection, port, remote_user, environment, no_log)
_remote_addr = FieldAttribute(isa='string')
_remote_tmp_dir = FieldAttribute(isa='string', default=C.DEFAULT_REMOTE_TMP)
_password = FieldAttribute(isa='string')
_timeout = FieldAttribute(isa='int', default=C.DEFAULT_TIMEOUT)
_connection_user = FieldAttribute(isa='string')
_private_key_file = FieldAttribute(isa='string', default=C.DEFAULT_PRIVATE_KEY_FILE)
_pipelining = FieldAttribute(isa='bool', default=C.ANSIBLE_PIPELINING)
# networking modules
_network_os = FieldAttribute(isa='string')
# docker FIXME: remove these
_docker_extra_args = FieldAttribute(isa='string')
# ssh # FIXME: remove these
_ssh_executable = FieldAttribute(isa='string', default=C.ANSIBLE_SSH_EXECUTABLE)
_ssh_args = FieldAttribute(isa='string', default=C.ANSIBLE_SSH_ARGS)
_ssh_common_args = FieldAttribute(isa='string')
_sftp_extra_args = FieldAttribute(isa='string')
_scp_extra_args = FieldAttribute(isa='string')
_ssh_extra_args = FieldAttribute(isa='string')
_ssh_transfer_method = FieldAttribute(isa='string', default=C.DEFAULT_SSH_TRANSFER_METHOD)
# ???
_connection_lockfd = FieldAttribute(isa='int')
# privilege escalation fields
_become = FieldAttribute(isa='bool')
_become_method = FieldAttribute(isa='string')
_become_user = FieldAttribute(isa='string')
_become_pass = FieldAttribute(isa='string')
_become_exe = FieldAttribute(isa='string', default=C.DEFAULT_BECOME_EXE)
_become_flags = FieldAttribute(isa='string', default=C.DEFAULT_BECOME_FLAGS)
_prompt = FieldAttribute(isa='string')
# DEPRECATED: backwards compatibility fields for sudo/su
_sudo_exe = FieldAttribute(isa='string', default=C.DEFAULT_SUDO_EXE)
_sudo_flags = FieldAttribute(isa='string', default=C.DEFAULT_SUDO_FLAGS)
_sudo_pass = FieldAttribute(isa='string')
_su_exe = FieldAttribute(isa='string', default=C.DEFAULT_SU_EXE)
_su_flags = FieldAttribute(isa='string', default=C.DEFAULT_SU_FLAGS)
_su_pass = FieldAttribute(isa='string')
# general flags
_verbosity = FieldAttribute(isa='int', default=0)
_only_tags = FieldAttribute(isa='set', default=set())
_skip_tags = FieldAttribute(isa='set', default=set())
_force_handlers = FieldAttribute(isa='bool', default=False)
_start_at_task = FieldAttribute(isa='string')
_step = FieldAttribute(isa='bool', default=False)
# Fact gathering settings
_gather_subset = FieldAttribute(isa='string', default=C.DEFAULT_GATHER_SUBSET)
_gather_timeout = FieldAttribute(isa='string', default=C.DEFAULT_GATHER_TIMEOUT)
_fact_path = FieldAttribute(isa='string', default=C.DEFAULT_FACT_PATH)
def __init__(self, play=None, options=None, passwords=None, connection_lockfd=None):
super(PlayContext, self).__init__()
if passwords is None:
passwords = {}
self.password = passwords.get('conn_pass', '')
self.become_pass = passwords.get('become_pass', '')
self.prompt = ''
self.success_key = ''
# a file descriptor to be used during locking operations
self.connection_lockfd = connection_lockfd
# set options before play to allow play to override them
if options:
self.set_options(options)
if play:
self.set_play(play)
def set_play(self, play):
'''
Configures this connection information instance with data from
the play class.
'''
if play.connection:
self.connection = play.connection
if play.remote_user:
self.remote_user = play.remote_user
if play.port:
self.port = int(play.port)
if play.become is not None:
self.become = play.become
if play.become_method:
self.become_method = play.become_method
if play.become_user:
self.become_user = play.become_user
if play.force_handlers is not None:
self.force_handlers = play.force_handlers
def set_options_from_plugin(self, plugin):
# generic derived from connection plugin, temporary for backwards compat, in the end we should not set play_context properties
# get options for plugins
options = C.config.get_configuration_definitions(get_plugin_class(plugin), plugin._load_name)
for option in options:
if option:
flag = options[option].get('name')
if flag:
setattr(self, flag, self.connection.get_option(flag))
# TODO: made irrelavent by above
# get ssh options
# for flag in ('ssh_common_args', 'docker_extra_args', 'sftp_extra_args', 'scp_extra_args', 'ssh_extra_args'):
# setattr(self, flag, getattr(options, flag, ''))
def set_options(self, options):
'''
Configures this connection information instance with data from
options specified by the user on the command line. These have a
lower precedence than those set on the play or host.
'''
# privilege escalation
self.become = options.become
self.become_method = options.become_method
self.become_user = options.become_user
self.check_mode = boolean(options.check, strict=False)
self.diff = boolean(options.diff, strict=False)
# general flags (should we move out?)
# should only be 'non plugin' flags
for flag in OPTION_FLAGS:
attribute = getattr(options, flag, False)
if attribute:
setattr(self, flag, attribute)
if hasattr(options, 'timeout') and options.timeout:
self.timeout = int(options.timeout)
# get the tag info from options. We check to see if the options have
# the attribute, as it is not always added via the CLI
if hasattr(options, 'tags'):
self.only_tags.update(options.tags)
if len(self.only_tags) == 0:
self.only_tags = set(['all'])
if hasattr(options, 'skip_tags'):
self.skip_tags.update(options.skip_tags)
def set_task_and_variable_override(self, task, variables, templar):
'''
Sets attributes from the task if they are set, which will override
those from the play.
:arg task: the task object with the parameters that were set on it
:arg variables: variables from inventory
:arg templar: templar instance if templating variables is needed
'''
new_info = self.copy()
# loop through a subset of attributes on the task object and set
# connection fields based on their values
for attr in TASK_ATTRIBUTE_OVERRIDES:
if hasattr(task, attr):
attr_val = getattr(task, attr)
if attr_val is not None:
setattr(new_info, attr, attr_val)
# next, use the MAGIC_VARIABLE_MAPPING dictionary to update this
# connection info object with 'magic' variables from the variable list.
# If the value 'ansible_delegated_vars' is in the variables, it means
# we have a delegated-to host, so we check there first before looking
# at the variables in general
if task.delegate_to is not None:
# In the case of a loop, the delegated_to host may have been
# templated based on the loop variable, so we try and locate
# the host name in the delegated variable dictionary here
delegated_host_name = templar.template(task.delegate_to)
delegated_vars = variables.get('ansible_delegated_vars', dict()).get(delegated_host_name, dict())
delegated_transport = C.DEFAULT_TRANSPORT
for transport_var in MAGIC_VARIABLE_MAPPING.get('connection'):
if transport_var in delegated_vars:
delegated_transport = delegated_vars[transport_var]
break
# make sure this delegated_to host has something set for its remote
# address, otherwise we default to connecting to it by name. This
# may happen when users put an IP entry into their inventory, or if
# they rely on DNS for a non-inventory hostname
for address_var in ('ansible_%s_host' % transport_var,) + MAGIC_VARIABLE_MAPPING.get('remote_addr'):
if address_var in delegated_vars:
break
else:
display.debug("no remote address found for delegated host %s\nusing its name, so success depends on DNS resolution" % delegated_host_name)
delegated_vars['ansible_host'] = delegated_host_name
# reset the port back to the default if none was specified, to prevent
# the delegated host from inheriting the original host's setting
for port_var in ('ansible_%s_port' % transport_var,) + MAGIC_VARIABLE_MAPPING.get('port'):
if port_var in delegated_vars:
break
else:
if delegated_transport == 'winrm':
delegated_vars['ansible_port'] = 5986
else:
delegated_vars['ansible_port'] = C.DEFAULT_REMOTE_PORT
# and likewise for the remote user
for user_var in ('ansible_%s_user' % transport_var,) + MAGIC_VARIABLE_MAPPING.get('remote_user'):
if user_var in delegated_vars and delegated_vars[user_var]:
break
else:
delegated_vars['ansible_user'] = task.remote_user or self.remote_user
else:
delegated_vars = dict()
# setup shell
for exe_var in MAGIC_VARIABLE_MAPPING.get('executable'):
if exe_var in variables:
setattr(new_info, 'executable', variables.get(exe_var))
attrs_considered = []
for (attr, variable_names) in iteritems(MAGIC_VARIABLE_MAPPING):
for variable_name in variable_names:
if attr in attrs_considered:
continue
# if delegation task ONLY use delegated host vars, avoid delegated FOR host vars
if task.delegate_to is not None:
if isinstance(delegated_vars, dict) and variable_name in delegated_vars:
setattr(new_info, attr, delegated_vars[variable_name])
attrs_considered.append(attr)
elif variable_name in variables:
setattr(new_info, attr, variables[variable_name])
attrs_considered.append(attr)
# no else, as no other vars should be considered
# become legacy updates -- from commandline
if not new_info.become_pass:
if new_info.become_method == 'sudo' and new_info.sudo_pass:
new_info.become_pass = new_info.sudo_pass
elif new_info.become_method == 'su' and new_info.su_pass:
new_info.become_pass = new_info.su_pass
# become legacy updates -- from inventory file (inventory overrides
# commandline)
for become_pass_name in MAGIC_VARIABLE_MAPPING.get('become_pass'):
if become_pass_name in variables:
break
else: # This is a for-else
if new_info.become_method == 'sudo':
for sudo_pass_name in MAGIC_VARIABLE_MAPPING.get('sudo_pass'):
if sudo_pass_name in variables:
setattr(new_info, 'become_pass', variables[sudo_pass_name])
break
elif new_info.become_method == 'su':
for su_pass_name in MAGIC_VARIABLE_MAPPING.get('su_pass'):
if su_pass_name in variables:
setattr(new_info, 'become_pass', variables[su_pass_name])
break
# make sure we get port defaults if needed
if new_info.port is None and C.DEFAULT_REMOTE_PORT is not None:
new_info.port = int(C.DEFAULT_REMOTE_PORT)
# special overrides for the connection setting
if len(delegated_vars) > 0:
# in the event that we were using local before make sure to reset the
# connection type to the default transport for the delegated-to host,
# if not otherwise specified
for connection_type in MAGIC_VARIABLE_MAPPING.get('connection'):
if connection_type in delegated_vars:
break
else:
remote_addr_local = new_info.remote_addr in C.LOCALHOST
inv_hostname_local = delegated_vars.get('inventory_hostname') in C.LOCALHOST
if remote_addr_local and inv_hostname_local:
setattr(new_info, 'connection', 'local')
elif getattr(new_info, 'connection', None) == 'local' and (not remote_addr_local or not inv_hostname_local):
setattr(new_info, 'connection', C.DEFAULT_TRANSPORT)
# if the final connection type is local, reset the remote_user value to that of the currently logged in user
# this ensures any become settings are obeyed correctly
# we store original in 'connection_user' for use of network/other modules that fallback to it as login user
if new_info.connection == 'local':
if not new_info.connection_user:
new_info.connection_user = new_info.remote_user
new_info.remote_user = pwd.getpwuid(os.getuid()).pw_name
# set no_log to default if it was not previouslly set
if new_info.no_log is None:
new_info.no_log = C.DEFAULT_NO_LOG
if task.always_run:
display.deprecated("always_run is deprecated. Use check_mode = no instead.", version="2.4", removed=False)
new_info.check_mode = False
# check_mode replaces always_run, overwrite always_run if both are given
if task.check_mode is not None:
new_info.check_mode = task.check_mode
if task.diff is not None:
new_info.diff = task.diff
return new_info
def make_become_cmd(self, cmd, executable=None):
""" helper function to create privilege escalation commands """
prompt = None
success_key = None
self.prompt = None
if self.become:
if not executable:
executable = self.executable
becomecmd = None
randbits = ''.join(random.choice(string.ascii_lowercase) for x in range(32))
success_key = 'BECOME-SUCCESS-%s' % randbits
success_cmd = shlex_quote('echo %s; %s' % (success_key, cmd))
if executable:
command = '%s -c %s' % (executable, success_cmd)
else:
command = success_cmd
# set executable to use for the privilege escalation method, with various overrides
exe = self.become_exe or getattr(self, '%s_exe' % self.become_method, self.become_method)
# set flags to use for the privilege escalation method, with various overrides
flags = self.become_flags or getattr(self, '%s_flags' % self.become_method, '')
if self.become_method == 'sudo':
# If we have a password, we run sudo with a randomly-generated
# prompt set using -p. Otherwise we run it with default -n, which makes
# it fail if it would have prompted for a password.
# Cannot rely on -n as it can be removed from defaults, which should be
# done for older versions of sudo that do not support the option.
#
# Passing a quoted compound command to sudo (or sudo -s)
# directly doesn't work, so we shellquote it with shlex_quote()
# and pass the quoted string to the user's shell.
# force quick error if password is required but not supplied, should prevent sudo hangs.
if self.become_pass:
prompt = '[sudo via ansible, key=%s] password: ' % randbits
becomecmd = '%s %s -p "%s" -u %s %s' % (exe, flags.replace('-n', ''), prompt, self.become_user, command)
else:
becomecmd = '%s %s -u %s %s' % (exe, flags, self.become_user, command)
elif self.become_method == 'su':
# passing code ref to examine prompt as simple string comparisson isn't good enough with su
def detect_su_prompt(b_data):
b_password_string = b"|".join([b'(\w+\'s )?' + x for x in b_SU_PROMPT_LOCALIZATIONS])
# Colon or unicode fullwidth colon
b_password_string = b_password_string + to_bytes(u' ?(:|:) ?')
b_SU_PROMPT_LOCALIZATIONS_RE = re.compile(b_password_string, flags=re.IGNORECASE)
return bool(b_SU_PROMPT_LOCALIZATIONS_RE.match(b_data))
prompt = detect_su_prompt
becomecmd = '%s %s %s -c %s' % (exe, flags, self.become_user, shlex_quote(command))
elif self.become_method == 'pbrun':
prompt = 'Password:'
becomecmd = '%s %s -u %s %s' % (exe, flags, self.become_user, success_cmd)
elif self.become_method == 'ksu':
def detect_ksu_prompt(b_data):
return re.match(b"Kerberos password for .*@.*:", b_data)
prompt = detect_ksu_prompt
becomecmd = '%s %s %s -e %s' % (exe, self.become_user, flags, command)
elif self.become_method == 'pfexec':
# No user as it uses it's own exec_attr to figure it out
becomecmd = '%s %s "%s"' % (exe, flags, success_cmd)
elif self.become_method == 'runas':
# become is handled inside the WinRM connection plugin
display.warning("The Windows 'runas' become method is experimental, and may change significantly in future Ansible releases.")
if not self.become_user:
raise AnsibleError(("The 'runas' become method requires a username "
"(specify with the '--become-user' CLI arg, the 'become_user' keyword, or the 'ansible_become_user' variable)"))
becomecmd = cmd
elif self.become_method == 'doas':
prompt = 'doas (%s@' % self.remote_user
exe = self.become_exe or 'doas'
if not self.become_pass:
flags += ' -n '
if self.become_user:
flags += ' -u %s ' % self.become_user
# FIXME: make shell independent
becomecmd = '%s %s echo %s && %s %s env ANSIBLE=true %s' % (exe, flags, success_key, exe, flags, cmd)
elif self.become_method == 'dzdo':
exe = self.become_exe or 'dzdo'
if self.become_pass:
prompt = '[dzdo via ansible, key=%s] password: ' % randbits
becomecmd = '%s -p %s -u %s %s' % (exe, shlex_quote(prompt), self.become_user, command)
else:
becomecmd = '%s -u %s %s' % (exe, self.become_user, command)
elif self.become_method == 'pmrun':
exe = self.become_exe or 'pmrun'
prompt = 'Enter UPM user password:'
becomecmd = '%s %s %s' % (exe, flags, shlex_quote(command))
else:
raise AnsibleError("Privilege escalation method not found: %s" % self.become_method)
if self.become_pass:
self.prompt = prompt
self.success_key = success_key
return becomecmd
return cmd
def update_vars(self, variables):
'''
Adds 'magic' variables relating to connections to the variable dictionary provided.
In case users need to access from the play, this is a legacy from runner.
'''
for prop, var_list in MAGIC_VARIABLE_MAPPING.items():
try:
if 'become' in prop:
continue
var_val = getattr(self, prop)
for var_opt in var_list:
if var_opt not in variables and var_val is not None:
variables[var_opt] = var_val
except AttributeError:
continue
def _get_attr_connection(self):
''' connections are special, this takes care of responding correctly '''
conn_type = None
if self._attributes['connection'] == 'smart':
conn_type = 'ssh'
if sys.platform.startswith('darwin') and self.password:
# due to a current bug in sshpass on OSX, which can trigger
# a kernel panic even for non-privileged users, we revert to
# paramiko on that OS when a SSH password is specified
conn_type = "paramiko"
else:
# see if SSH can support ControlPersist if not use paramiko
if not check_for_controlpersist(self.ssh_executable):
conn_type = "paramiko"
# if someone did `connection: persistent`, default it to using a persistent paramiko connection to avoid problems
elif self._attributes['connection'] == 'persistent':
conn_type = 'paramiko'
if conn_type:
self.connection = conn_type
return self._attributes['connection']
|
from odoo import api, fields, models, _
class ProcurementOrder(models.Model):
_inherit = 'procurement.order'
task_id = fields.Many2one('project.task', 'Task', copy=False)
def _is_procurement_task(self):
return self.product_id.type == 'service' and self.product_id.track_service == 'task'
@api.multi
def _assign(self):
self.ensure_one()
res = super(ProcurementOrder, self)._assign()
if not res:
# if there isn't any specific procurement.rule defined for the product, we may want to create a task
return self._is_procurement_task()
return res
@api.multi
def _run(self):
self.ensure_one()
if self._is_procurement_task() and not self.task_id:
# If the SO was confirmed, cancelled, set to draft then confirmed, avoid creating a new
# task.
if self.sale_line_id:
existing_task = self.env['project.task'].search(
[('sale_line_id', '=', self.sale_line_id.id)]
)
if existing_task:
return existing_task
# create a task for the procurement
return self._create_service_task()
return super(ProcurementOrder, self)._run()
def _convert_qty_company_hours(self):
company_time_uom_id = self.env.user.company_id.project_time_mode_id
if self.product_uom.id != company_time_uom_id.id and self.product_uom.category_id.id == company_time_uom_id.category_id.id:
planned_hours = self.product_uom._compute_quantity(self.product_qty, company_time_uom_id)
else:
planned_hours = self.product_qty
return planned_hours
def _get_project(self):
Project = self.env['project.project']
project = self.product_id.with_context(force_company=self.company_id.id).project_id
if not project and self.sale_line_id:
# find the project corresponding to the analytic account of the sales order
account = self.sale_line_id.order_id.project_id
if not account:
self.sale_line_id.order_id._create_analytic_account()
account = self.sale_line_id.order_id.project_id
project = Project.search([('analytic_account_id', '=', account.id)], limit=1)
if not project:
project_id = account.project_create({'name': account.name, 'use_tasks': True})
project = Project.browse(project_id)
return project
def _create_service_task(self):
project = self._get_project()
planned_hours = self._convert_qty_company_hours()
task = self.env['project.task'].create({
'name': '%s:%s' % (self.origin or '', self.product_id.name),
'date_deadline': self.date_planned,
'planned_hours': planned_hours,
'remaining_hours': planned_hours,
'partner_id': self.sale_line_id.order_id.partner_id.id or self.partner_dest_id.id,
'user_id': self.env.uid,
'procurement_id': self.id,
'description': self.name + '\n',
'project_id': project.id,
'company_id': self.company_id.id,
})
self.write({'task_id': task.id})
msg_body = _("Task Created (%s): <a href=# data-oe-model=project.task data-oe-id=%d>%s</a>") % (self.product_id.name, task.id, task.name)
self.message_post(body=msg_body)
if self.sale_line_id.order_id:
self.sale_line_id.order_id.message_post(body=msg_body)
task_msg = _("This task has been created from: <a href=# data-oe-model=sale.order data-oe-id=%d>%s</a> (%s)") % (self.sale_line_id.order_id.id, self.sale_line_id.order_id.name, self.product_id.name)
task.message_post(body=task_msg)
return task
|
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2\f")
buf.write("f\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\3\2\3\2\3\2\3\3\3\3")
buf.write("\3\3\3\3\3\3\3\3\5\3!\n\3\3\4\3\4\3\5\3\5\3\6\3\6\3\6")
buf.write("\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3")
buf.write("\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6")
buf.write("\3\6\5\6F\n\6\3\7\3\7\3\b\3\b\6\bL\n\b\r\b\16\bM\5\bP")
buf.write("\n\b\3\t\3\t\5\tT\n\t\3\t\6\tW\n\t\r\t\16\tX\3\n\3\n\3")
buf.write("\n\6\n^\n\n\r\n\16\n_\3\13\6\13c\n\13\r\13\16\13d\2\2")
buf.write("\f\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n\23\13\25\f\3\2\5")
buf.write("\4\2BBaa\6\2\"\"BBaa}\177\5\2\62;C\\c|\2v\2\3\3\2\2\2")
buf.write("\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2\2\r")
buf.write("\3\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2\2\2\25\3")
buf.write("\2\2\2\3\27\3\2\2\2\5 \3\2\2\2\7\"\3\2\2\2\t$\3\2\2\2")
buf.write("\13E\3\2\2\2\rG\3\2\2\2\17O\3\2\2\2\21Q\3\2\2\2\23Z\3")
buf.write("\2\2\2\25b\3\2\2\2\27\30\7}\2\2\30\31\7~\2\2\31\4\3\2")
buf.write("\2\2\32\33\7}\2\2\33!\7-\2\2\34\35\7}\2\2\35!\7,\2\2\36")
buf.write("\37\7}\2\2\37!\7A\2\2 \32\3\2\2\2 \34\3\2\2\2 \36\3\2")
buf.write("\2\2!\6\3\2\2\2\"#\7}\2\2#\b\3\2\2\2$%\7\177\2\2%\n\3")
buf.write("\2\2\2&\'\7\'\2\2\'F\7}\2\2()\7\'\2\2)F\7\177\2\2*+\7")
buf.write("\'\2\2+F\7~\2\2,-\7b\2\2-.\7\'\2\2.F\7}\2\2/\60\7B\2\2")
buf.write("\60\61\7\'\2\2\61F\7}\2\2\62\63\7\'\2\2\63\64\7~\2\2\64")
buf.write("F\7/\2\2\65\66\7\'\2\2\66\67\7~\2\2\678\7/\2\28F\7@\2")
buf.write("\29:\7\'\2\2:;\7~\2\2;F\7~\2\2<=\7\'\2\2=>\7~\2\2>?\7")
buf.write("~\2\2?F\7~\2\2@A\7\'\2\2AB\7~\2\2BC\7~\2\2CD\7~\2\2DF")
buf.write("\7~\2\2E&\3\2\2\2E(\3\2\2\2E*\3\2\2\2E,\3\2\2\2E/\3\2")
buf.write("\2\2E\62\3\2\2\2E\65\3\2\2\2E9\3\2\2\2E<\3\2\2\2E@\3\2")
buf.write("\2\2F\f\3\2\2\2GH\7~\2\2H\16\3\2\2\2IP\t\2\2\2JL\n\3\2")
buf.write("\2KJ\3\2\2\2LM\3\2\2\2MK\3\2\2\2MN\3\2\2\2NP\3\2\2\2O")
buf.write("I\3\2\2\2OK\3\2\2\2P\20\3\2\2\2QV\7B\2\2RT\7a\2\2SR\3")
buf.write("\2\2\2ST\3\2\2\2TU\3\2\2\2UW\t\4\2\2VS\3\2\2\2WX\3\2\2")
buf.write("\2XV\3\2\2\2XY\3\2\2\2Y\22\3\2\2\2Z[\7a\2\2[]\7a\2\2\\")
buf.write("^\t\4\2\2]\\\3\2\2\2^_\3\2\2\2_]\3\2\2\2_`\3\2\2\2`\24")
buf.write("\3\2\2\2ac\7\"\2\2ba\3\2\2\2cd\3\2\2\2db\3\2\2\2de\3\2")
buf.write("\2\2e\26\3\2\2\2\13\2 EMOSX_d\2")
return buf.getvalue()
class TacticNotationsLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
LALT = 1
LGROUP = 2
LBRACE = 3
RBRACE = 4
ESCAPED = 5
PIPE = 6
ATOM = 7
ID = 8
SUB = 9
WHITESPACE = 10
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'{|'", "'{'", "'}'", "'|'" ]
symbolicNames = [ "<INVALID>",
"LALT", "LGROUP", "LBRACE", "RBRACE", "ESCAPED", "PIPE", "ATOM",
"ID", "SUB", "WHITESPACE" ]
ruleNames = [ "LALT", "LGROUP", "LBRACE", "RBRACE", "ESCAPED", "PIPE",
"ATOM", "ID", "SUB", "WHITESPACE" ]
grammarFileName = "TacticNotations.g"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.7.2")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
|
def load_test_data(load_onto=None):
from openstack_dashboard.test.test_data import ceilometer_data
from openstack_dashboard.test.test_data import cinder_data
from openstack_dashboard.test.test_data import exceptions
from openstack_dashboard.test.test_data import glance_data
from openstack_dashboard.test.test_data import heat_data
from openstack_dashboard.test.test_data import keystone_data
from openstack_dashboard.test.test_data import neutron_data
from openstack_dashboard.test.test_data import nova_data
from openstack_dashboard.test.test_data import swift_data
from openstack_dashboard.test.test_data import trove_data
# The order of these loaders matters, some depend on others.
loaders = (
exceptions.data,
keystone_data.data,
glance_data.data,
nova_data.data,
cinder_data.data,
neutron_data.data,
swift_data.data,
heat_data.data,
ceilometer_data.data,
trove_data.data,
)
if load_onto:
for data_func in loaders:
data_func(load_onto)
return load_onto
else:
return TestData(*loaders)
class TestData(object):
"""Holder object for test data. Any functions passed to the init method
will be called with the ``TestData`` object as their only argument. They
can then load data onto the object as desired.
The idea is to use the instantiated object like this::
>>> import glance_data
>>> TEST = TestData(glance_data.data)
>>> TEST.images.list()
[<Image: visible_image>, <Image: invisible_image>]
>>> TEST.images.first()
<Image: visible_image>
You can load as little or as much data as you like as long as the loaders
don't conflict with each other.
See the
:class:`~openstack_dashboard.test.test_data.utils.TestDataContainer`
class for a list of available methods.
"""
def __init__(self, *args):
for data_func in args:
data_func(self)
class TestDataContainer(object):
"""A container for test data objects.
The behavior of this class is meant to mimic a "manager" class, which
has convenient shortcuts for common actions like "list", "filter", "get",
and "add".
"""
def __init__(self):
self._objects = []
def add(self, *args):
"""Add a new object to this container.
Generally this method should only be used during data loading, since
adding data during a test can affect the results of other tests.
"""
for obj in args:
if obj not in self._objects:
self._objects.append(obj)
def list(self):
"""Returns a list of all objects in this container."""
return self._objects
def filter(self, filtered=None, **kwargs):
"""Returns objects in this container whose attributes match the given
keyword arguments.
"""
if filtered is None:
filtered = self._objects
try:
key, value = kwargs.popitem()
except KeyError:
# We're out of filters, return
return filtered
def get_match(obj):
return hasattr(obj, key) and getattr(obj, key) == value
return self.filter(filtered=filter(get_match, filtered), **kwargs)
def get(self, **kwargs):
"""Returns the single object in this container whose attributes match
the given keyword arguments. An error will be raised if the arguments
provided don't return exactly one match.
"""
matches = self.filter(**kwargs)
if not matches:
raise Exception("No matches found.")
elif len(matches) > 1:
raise Exception("Multiple matches found.")
else:
return matches.pop()
def first(self):
"""Returns the first object from this container."""
return self._objects[0]
def count(self):
return len(self._objects)
|
from nose.tools import * # noqa: F403
from tests.base import AdminTestCase
from osf_tests.factories import NodeFactory, UserFactory
from osf.utils.permissions import ADMIN
from admin.nodes.serializers import serialize_simple_user_and_node_permissions, serialize_node
class TestNodeSerializers(AdminTestCase):
def test_serialize_node(self):
node = NodeFactory()
info = serialize_node(node)
assert_is_instance(info, dict)
assert_equal(info['parent'], node.parent_id)
assert_equal(info['title'], node.title)
assert_equal(info['children'], [])
assert_equal(info['id'], node._id)
assert_equal(info['public'], node.is_public)
assert_equal(len(info['contributors']), 1)
assert_false(info['deleted'])
def test_serialize_deleted(self):
node = NodeFactory()
info = serialize_node(node)
assert_false(info['deleted'])
node.is_deleted = True
info = serialize_node(node)
assert_true(info['deleted'])
node.is_deleted = False
info = serialize_node(node)
assert_false(info['deleted'])
def test_serialize_simple_user(self):
user = UserFactory()
node = NodeFactory(creator=user)
info = serialize_simple_user_and_node_permissions(node, user)
assert_is_instance(info, dict)
assert_equal(info['id'], user._id)
assert_equal(info['name'], user.fullname)
assert_equal(info['permission'], ADMIN)
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("zerver", "0301_fix_unread_messages_in_deactivated_streams"),
]
operations = [
# We do Stream lookups case-insensitively with respect to the name, but we were missing
# the appropriate (realm_id, upper(name::text)) unique index to enforce uniqueness
# on database level.
migrations.RunSQL(
"""
CREATE UNIQUE INDEX zerver_stream_realm_id_name_uniq ON zerver_stream (realm_id, upper(name::text));
"""
),
migrations.AlterUniqueTogether(
name="stream",
unique_together=set(),
),
]
|
"""
SQLAlchemy models.
"""
import six
from sqlalchemy import Column, Integer
from sqlalchemy import DateTime
from sqlalchemy.orm import object_mapper
from solum.openstack.common.db.sqlalchemy import session as sa
from solum.openstack.common import timeutils
class ModelBase(object):
"""Base class for models."""
__table_initialized__ = False
def save(self, session=None):
"""Save this object."""
if not session:
session = sa.get_session()
# NOTE(boris-42): This part of code should be look like:
# session.add(self)
# session.flush()
# But there is a bug in sqlalchemy and eventlet that
# raises NoneType exception if there is no running
# transaction and rollback is called. As long as
# sqlalchemy has this bug we have to create transaction
# explicitly.
with session.begin(subtransactions=True):
session.add(self)
session.flush()
def __setitem__(self, key, value):
setattr(self, key, value)
def __getitem__(self, key):
return getattr(self, key)
def get(self, key, default=None):
return getattr(self, key, default)
@property
def _extra_keys(self):
"""Specifies custom fields
Subclasses can override this property to return a list
of custom fields that should be included in their dict
representation.
For reference check tests/db/sqlalchemy/test_models.py
"""
return []
def __iter__(self):
columns = dict(object_mapper(self).columns).keys()
# NOTE(russellb): Allow models to specify other keys that can be looked
# up, beyond the actual db columns. An example would be the 'name'
# property for an Instance.
columns.extend(self._extra_keys)
self._i = iter(columns)
return self
def next(self):
n = six.advance_iterator(self._i)
return n, getattr(self, n)
def update(self, values):
"""Make the model object behave like a dict."""
for k, v in six.iteritems(values):
setattr(self, k, v)
def iteritems(self):
"""Make the model object behave like a dict.
Includes attributes from joins.
"""
local = dict(self)
joined = dict([(k, v) for k, v in six.iteritems(self.__dict__)
if not k[0] == '_'])
local.update(joined)
return six.iteritems(local)
class TimestampMixin(object):
created_at = Column(DateTime, default=lambda: timeutils.utcnow())
updated_at = Column(DateTime, onupdate=lambda: timeutils.utcnow())
class SoftDeleteMixin(object):
deleted_at = Column(DateTime)
deleted = Column(Integer, default=0)
def soft_delete(self, session=None):
"""Mark this object as deleted."""
self.deleted = self.id
self.deleted_at = timeutils.utcnow()
self.save(session=session)
|
""" msgfmt tool """
__revision__ = "src/engine/SCons/Tool/msgfmt.py 2014/03/02 14:18:15 garyo"
from SCons.Builder import BuilderBase
class _MOFileBuilder(BuilderBase):
""" The builder class for `MO` files.
The reason for this builder to exists and its purpose is quite simillar
as for `_POFileBuilder`. This time, we extend list of sources, not targets,
and call `BuilderBase._execute()` only once (as we assume single-target
here).
"""
def _execute(self, env, target, source, *args, **kw):
# Here we add support for 'LINGUAS_FILE' keyword. Emitter is not suitable
# in this case, as it is called too late (after multiple sources
# are handled single_source builder.
import SCons.Util
from SCons.Tool.GettextCommon import _read_linguas_from_files
linguas_files = None
if env.has_key('LINGUAS_FILE') and env['LINGUAS_FILE'] is not None:
linguas_files = env['LINGUAS_FILE']
# This should prevent from endless recursion.
env['LINGUAS_FILE'] = None
# We read only languages. Suffixes shall be added automatically.
linguas = _read_linguas_from_files(env, linguas_files)
if SCons.Util.is_List(source):
source.extend(linguas)
elif source is not None:
source = [source] + linguas
else:
source = linguas
result = BuilderBase._execute(self,env,target,source,*args, **kw)
if linguas_files is not None:
env['LINGUAS_FILE'] = linguas_files
return result
def _create_mo_file_builder(env, **kw):
""" Create builder object for `MOFiles` builder """
import SCons.Action
# FIXME: What factory use for source? Ours or their?
kw['action'] = SCons.Action.Action('$MSGFMTCOM','$MSGFMTCOMSTR')
kw['suffix'] = '$MOSUFFIX'
kw['src_suffix'] = '$POSUFFIX'
kw['src_builder'] = '_POUpdateBuilder'
kw['single_source'] = True
return _MOFileBuilder(**kw)
def generate(env,**kw):
""" Generate `msgfmt` tool """
import SCons.Util
from SCons.Tool.GettextCommon import _detect_msgfmt
try:
env['MSGFMT'] = _detect_msgfmt(env)
except:
env['MSGFMT'] = 'msgfmt'
env.SetDefault(
MSGFMTFLAGS = [ SCons.Util.CLVar('-c') ],
MSGFMTCOM = '$MSGFMT $MSGFMTFLAGS -o $TARGET $SOURCE',
MSGFMTCOMSTR = '',
MOSUFFIX = ['.mo'],
POSUFFIX = ['.po']
)
env.Append( BUILDERS = { 'MOFiles' : _create_mo_file_builder(env) } )
def exists(env):
""" Check if the tool exists """
from SCons.Tool.GettextCommon import _msgfmt_exists
try:
return _msgfmt_exists(env)
except:
return False
|
import sys
import inspect
from pylons import config
import logging
import zkpylons.lib.helpers as h
from pylons import request, response, session, tmpl_context as c
from zkpylons.lib.helpers import redirect_to
from pylons.util import class_name_from_module_name
from zkpylons.model import meta
from pylons.controllers.util import abort
from zkpylons.lib.base import BaseController, render
from zkpylons.model import URLHash
log = logging.getLogger(__name__)
class SecretHashController(BaseController):
def lookup(self, hash):
c.hash = URLHash.find_by_hash(hash)
if c.hash is None:
abort(404, "Sorry, Invalid Hash.")
return self.transfer(url=c.hash.url)
# as per http://www.mail-archive.com/pylons-discuss@googlegroups.com/msg06643.html
def transfer(controller = None, action = None, url = None, **kwargs):
"""usage:
1. result = transfer(url = "/someurl/someaction")
2. result = transfer(controller = "/controller1/sub_controller2",
action = "test") # kwargs will pass to action.
"""
if (url != None):
route_map = config['routes.map']
match_route= route_map.match(url)
if (match_route == None):
raise(Exception("no route matched url '%s'" % url))
# if
controller = match_route["controller"].replace("/", ".")
action = match_route["action"]
del(match_route["controller"])
del(match_route["action"])
kwargs.update(match_route)
else:
if (controller == None):
route_map = config['routes.map']
match_route = route_map.match("/")
if (match_route == None):
raise(Exception("no route matched url '%s'" % url))
# if
controller = match_route["controller"].replace("/", ".")
if (action == None):
action = match_route["action"]
# if
del(match_route["controller"])
del(match_route["action"])
kwargs.update(match_route)
else:
controller = controller.replace("/", ".")
if (action == None):
action = "index"
# if
# if
# if
full_module_name = config['pylons.package'] + '.controllers.' + controller
__traceback_hide__ = 'before_and_this'
try:
__import__(full_module_name)
except ImportError, e:
raise(NotImplementedError("'%s' not found: %s" % (controller, e)))
# try
module_name = controller.split('.')[-1]
class_name = class_name_from_module_name(module_name) + 'Controller'
controller_class = getattr(sys.modules[full_module_name], class_name)
controller_inst = controller_class()
if (hasattr(controller_inst, action)):
action_method = getattr(controller_inst, action, None)
#if (not isinstance(action_method, types.MethodType)):
# raise(NotImplementedError("action '%s' not found in '%s'" % (action, controller)))
# if
if (hasattr(controller_inst, "__before__")):
before_method = getattr(controller_inst, "__before__", None)
#if (isinstance(before_method, types.MethodType)):
# before_method(action)
# if
# if
action_args_name, action_args, action_kargs, action_defaults = inspect.getargspec(action_method)
del(action_args_name[0])
call_kargs = {}
for k, v in kwargs.iteritems():
if (k in action_args_name):
call_kargs[k] = v
# if
# for
result = action_method(**call_kargs)
if (hasattr(controller_inst, "__after__")):
after_method = getattr(controller_inst, "__after__", None)
#if (isinstance(after_method, types.MethodType)):
# after_method(action)
# if
# if
return(result)
else:
raise(NotImplementedError("action '%s' not found in '%s'" % (action, controller)))
# if
# def
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat import unittest
from units.mock.loader import DictDataLoader
from mock import MagicMock
from ansible.template import Templar
from ansible import errors
from ansible.playbook import conditional
class TestConditional(unittest.TestCase):
def setUp(self):
self.loader = DictDataLoader({})
self.cond = conditional.Conditional(loader=self.loader)
self.templar = Templar(loader=self.loader, variables={})
def _eval_con(self, when=None, variables=None):
when = when or []
variables = variables or {}
self.cond.when = when
ret = self.cond.evaluate_conditional(self.templar, variables)
return ret
def test_false(self):
when = [u"False"]
ret = self._eval_con(when, {})
self.assertFalse(ret)
def test_true(self):
when = [u"True"]
ret = self._eval_con(when, {})
self.assertTrue(ret)
def test_true_boolean(self):
self.cond.when = [True]
m = MagicMock()
ret = self.cond.evaluate_conditional(m, {})
self.assertTrue(ret)
self.assertFalse(m.is_template.called)
def test_false_boolean(self):
self.cond.when = [False]
m = MagicMock()
ret = self.cond.evaluate_conditional(m, {})
self.assertFalse(ret)
self.assertFalse(m.is_template.called)
def test_undefined(self):
when = [u"{{ some_undefined_thing }}"]
self.assertRaisesRegex(errors.AnsibleError, "The conditional check '{{ some_undefined_thing }}' failed",
self._eval_con, when, {})
def test_defined(self):
variables = {'some_defined_thing': True}
when = [u"{{ some_defined_thing }}"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_dict_defined_values(self):
variables = {'dict_value': 1,
'some_defined_dict': {'key1': 'value1',
'key2': '{{ dict_value }}'}}
when = [u"some_defined_dict"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_dict_defined_values_is_defined(self):
variables = {'dict_value': 1,
'some_defined_dict': {'key1': 'value1',
'key2': '{{ dict_value }}'}}
when = [u"some_defined_dict.key1 is defined"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_dict_defined_multiple_values_is_defined(self):
variables = {'dict_value': 1,
'some_defined_dict': {'key1': 'value1',
'key2': '{{ dict_value }}'}}
when = [u"some_defined_dict.key1 is defined",
u"some_defined_dict.key2 is not undefined"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_nested_hostvars_undefined_values(self):
variables = {'dict_value': 1,
'hostvars': {'host1': {'key1': 'value1',
'key2': '{{ dict_value }}'},
'host2': '{{ dict_value }}',
'host3': '{{ undefined_dict_value }}',
# no host4
},
'some_dict': {'some_dict_key1': '{{ hostvars["host3"] }}'}
}
when = [u"some_dict.some_dict_key1 == hostvars['host3']"]
# self._eval_con(when, variables)
self.assertRaisesRegex(errors.AnsibleError,
r"The conditional check 'some_dict.some_dict_key1 == hostvars\['host3'\]' failed",
# "The conditional check 'some_dict.some_dict_key1 == hostvars['host3']' failed",
# "The conditional check 'some_dict.some_dict_key1 == hostvars['host3']' failed.",
self._eval_con,
when, variables)
def test_dict_undefined_values_bare(self):
variables = {'dict_value': 1,
'some_defined_dict_with_undefined_values': {'key1': 'value1',
'key2': '{{ dict_value }}',
'key3': '{{ undefined_dict_value }}'
}}
# raises an exception when a non-string conditional is passed to extract_defined_undefined()
when = [u"some_defined_dict_with_undefined_values"]
self.assertRaisesRegex(errors.AnsibleError,
"The conditional check 'some_defined_dict_with_undefined_values' failed.",
self._eval_con,
when, variables)
def test_dict_undefined_values_is_defined(self):
variables = {'dict_value': 1,
'some_defined_dict_with_undefined_values': {'key1': 'value1',
'key2': '{{ dict_value }}',
'key3': '{{ undefined_dict_value }}'
}}
when = [u"some_defined_dict_with_undefined_values is defined"]
self.assertRaisesRegex(errors.AnsibleError,
"The conditional check 'some_defined_dict_with_undefined_values is defined' failed.",
self._eval_con,
when, variables)
def test_is_defined(self):
variables = {'some_defined_thing': True}
when = [u"some_defined_thing is defined"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_is_undefined(self):
variables = {'some_defined_thing': True}
when = [u"some_defined_thing is undefined"]
ret = self._eval_con(when, variables)
self.assertFalse(ret)
def test_is_undefined_and_defined(self):
variables = {'some_defined_thing': True}
when = [u"some_defined_thing is undefined", u"some_defined_thing is defined"]
ret = self._eval_con(when, variables)
self.assertFalse(ret)
def test_is_undefined_and_defined_reversed(self):
variables = {'some_defined_thing': True}
when = [u"some_defined_thing is defined", u"some_defined_thing is undefined"]
ret = self._eval_con(when, variables)
self.assertFalse(ret)
def test_is_not_undefined(self):
variables = {'some_defined_thing': True}
when = [u"some_defined_thing is not undefined"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_is_not_defined(self):
variables = {'some_defined_thing': True}
when = [u"some_undefined_thing is not defined"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_is_hostvars_quotes_is_defined(self):
variables = {'hostvars': {'some_host': {}},
'compare_targets_single': "hostvars['some_host']",
'compare_targets_double': 'hostvars["some_host"]',
'compare_targets': {'double': '{{ compare_targets_double }}',
'single': "{{ compare_targets_single }}"},
}
when = [u"hostvars['some_host'] is defined",
u'hostvars["some_host"] is defined',
u"{{ compare_targets.double }} is defined",
u"{{ compare_targets.single }} is defined"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_is_hostvars_quotes_is_defined_but_is_not_defined(self):
variables = {'hostvars': {'some_host': {}},
'compare_targets_single': "hostvars['some_host']",
'compare_targets_double': 'hostvars["some_host"]',
'compare_targets': {'double': '{{ compare_targets_double }}',
'single': "{{ compare_targets_single }}"},
}
when = [u"hostvars['some_host'] is defined",
u'hostvars["some_host"] is defined',
u"{{ compare_targets.triple }} is defined",
u"{{ compare_targets.quadruple }} is defined"]
self.assertRaisesRegex(errors.AnsibleError,
"The conditional check '{{ compare_targets.triple }} is defined' failed",
self._eval_con,
when, variables)
def test_is_hostvars_host_is_defined(self):
variables = {'hostvars': {'some_host': {}, }}
when = [u"hostvars['some_host'] is defined"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_is_hostvars_host_undefined_is_defined(self):
variables = {'hostvars': {'some_host': {}, }}
when = [u"hostvars['some_undefined_host'] is defined"]
ret = self._eval_con(when, variables)
self.assertFalse(ret)
def test_is_hostvars_host_undefined_is_undefined(self):
variables = {'hostvars': {'some_host': {}, }}
when = [u"hostvars['some_undefined_host'] is undefined"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_is_hostvars_host_undefined_is_not_defined(self):
variables = {'hostvars': {'some_host': {}, }}
when = [u"hostvars['some_undefined_host'] is not defined"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: template
version_added: historical
options:
follow:
description:
- Determine whether symbolic links should be followed.
- When set to C(yes) symbolic links will be followed, if they exist.
- When set to C(no) symbolic links will not be followed.
- Previous to Ansible 2.4, this was hardcoded as C(yes).
type: bool
default: no
version_added: '2.4'
notes:
- You can use the M(copy) module with the C(content:) option if you prefer the template inline,
as part of the playbook.
- For Windows you can use M(win_template) which uses '\\r\\n' as C(newline_sequence) by default.
seealso:
- module: copy
- module: win_copy
- module: win_template
author:
- Ansible Core Team
- Michael DeHaan
extends_documentation_fragment:
- backup
- files
- template_common
- validate
'''
EXAMPLES = r'''
- name: Template a file to /etc/files.conf
template:
src: /mytemplates/foo.j2
dest: /etc/file.conf
owner: bin
group: wheel
mode: '0644'
- name: Template a file, using symbolic modes (equivalent to 0644)
template:
src: /mytemplates/foo.j2
dest: /etc/file.conf
owner: bin
group: wheel
mode: u=rw,g=r,o=r
- name: Copy a version of named.conf that is dependent on the OS. setype obtained by doing ls -Z /etc/named.conf on original file
template:
src: named.conf_{{ ansible_os_family}}.j2
dest: /etc/named.conf
group: named
setype: named_conf_t
mode: 0640
- name: Create a DOS-style text file from a template
template:
src: config.ini.j2
dest: /share/windows/config.ini
newline_sequence: '\r\n'
- name: Copy a new sudoers file into place, after passing validation with visudo
template:
src: /mine/sudoers
dest: /etc/sudoers
validate: /usr/sbin/visudo -cf %s
- name: Update sshd configuration safely, avoid locking yourself out
template:
src: etc/ssh/sshd_config.j2
dest: /etc/ssh/sshd_config
owner: root
group: root
mode: '0600'
validate: /usr/sbin/sshd -t -f %s
backup: yes
'''
|
from __future__ import unicode_literals
from django import template
from temba.channels.views import get_channel_icon
register = template.Library()
@register.filter
def channel_icon(channel):
return get_channel_icon(channel.channel_type)
|
from Acspy.Nc.CommonNC import CommonNC
from Acspy.Nc.Supplier import Supplier
import datacapEx
from datacapEx import ExecBlockProcessedEvent, DataCapturerId, ExecBlockStartedEvent, ScanStartedEvent
import asdmEX
s = Supplier('pyTest-NC')
name = 'DATACAP1'
s.publishEvent(name)
sessionId = asdmEX.IDLEntityRef('SessionId','X1','SID','1.0')
sb = asdmEX.IDLEntityRef('SB1','X1','SB1','1.0')
dcId = DataCapturerId (name, 'arrayId', sessionId, sb)
execBlockId = asdmEX.IDLEntityRef('ExecBlockId','X1','SB1','1.0')
d = ExecBlockProcessedEvent( dcId, 'statu', execBlockId, 0)
s.publishEvent(d)
execId = asdmEX.IDLEntityRef('4','3','2', '1')
execBlockId = asdmEX.IDLEntityRef('1','2','3','4')
sse = ScanStartedEvent(execId, "something", 4, [datacapEx.LAST, datacapEx.LAST],0)
s.publishEvent(sse)
execId = "23"
execBlockEntityRef = asdmEX.IDLEntityRef(execId,"X00000000","0","0")
sbId = asdmEX.IDLEntityRef(execId,"X00000000","0","0")
arrayId = "1"
time = 100
startExecBlock = datacapEx.ExecBlockStartedEvent(execBlockEntityRef,sbId,sessionId,arrayId,time)
s.publishEvent(startExecBlock)
endExecBlock = datacapEx.ExecBlockEndedEvent(execBlockEntityRef,sbId,sessionId,arrayId,datacapEx.SUCCESS,time+10)
s.publishEvent(endExecBlock)
print "All structures successfully sent!!"
s.destroyNotificationChannel()
|
from mock import *
from .gp_unittest import *
from gppylib.programs.gppkg import GpPkgProgram
import sys
class GpPkgProgramTestCase(GpTestCase):
def setUp(self):
self.mock_cmd = Mock()
self.mock_gppkg = Mock()
self.mock_uninstall_package = Mock()
self.apply_patches([
patch('gppylib.operations.package.logger', return_value=Mock(spec=['log', 'info', 'debug', 'error'])),
patch('gppylib.programs.gppkg.Command', return_value=self.mock_cmd),
patch('gppylib.programs.gppkg.Gppkg', return_value=self.mock_gppkg),
patch('gppylib.programs.gppkg.UninstallPackage', return_value=self.mock_uninstall_package),
patch('os.listdir')
])
self.mock_logger = self.get_mock_from_apply_patch('logger')
self.mock_listdir = self.get_mock_from_apply_patch('listdir')
def test__remove_raises_when_gppkg_was_not_installed(self):
sys.argv = ["gppkg", "--remove", "sample"]
get_result_mock = Mock()
get_result_mock.stdout.strip.return_value = "RPM version 4.8.0"
self.mock_listdir.return_value = ['another.gppkg']
self.mock_cmd.get_results.return_value = get_result_mock
parser = GpPkgProgram.create_parser()
options, args = parser.parse_args()
with self.assertRaisesRegex(Exception, "Package sample has not been installed"):
self.subject = GpPkgProgram(options, args)
self.subject.run()
def test__remove_succeeds_when_gppkg_had_been_installed(self):
sys.argv = ["gppkg", "--remove", "sample"]
get_result_mock = Mock()
get_result_mock.stdout.strip.return_value = "RPM version 4.8.0"
self.mock_cmd.get_results.return_value = get_result_mock
self.mock_listdir.return_value = ['sample.gppkg', 'another.gppkg', 'sample2.gppkg']
self.mock_gppkg.from_package_path.return_value = []
self.mock_uninstall_package.run.return_value = None
parser = GpPkgProgram.create_parser()
options, args = parser.parse_args()
self.subject = GpPkgProgram(options, args)
self.subject.run()
self.mock_listdir.assert_called()
self.mock_uninstall_package.run.assert_called_once()
def test__input_matches_multiple_packages(self):
sys.argv = ["gppkg", "--remove", "sampl"]
get_result_mock = Mock()
get_result_mock.stdout.strip.return_value = "RPM version 4.8.0"
self.mock_cmd.get_results.return_value = get_result_mock
self.mock_listdir.return_value = ['sample.gppkg', 'sample2.gppkg', 'another.gppkg']
self.mock_gppkg.from_package_path.return_value = []
self.mock_uninstall_package.run.return_value = None
parser = GpPkgProgram.create_parser()
options, args = parser.parse_args()
self.subject = GpPkgProgram(options, args)
with self.assertRaisesRegex(Exception, "Remove request 'sampl' too broad. "
"Multiple packages match remove request: \( sample.gppkg, sample2.gppkg \)."):
self.subject.run()
self.assertFalse(self.mock_uninstall_package.run.called)
def test__input_exact_match_when_wildcard_would_have_more(self):
sys.argv = ["gppkg", "--remove", "sample"]
get_result_mock = Mock()
get_result_mock.stdout.strip.return_value = "RPM version 4.8.0"
self.mock_cmd.get_results.return_value = get_result_mock
self.mock_listdir.return_value = ['sample.gppkg', 'sample2.gppkg', 'another.gppkg']
self.mock_gppkg.from_package_path.return_value = []
self.mock_uninstall_package.run.return_value = None
parser = GpPkgProgram.create_parser()
options, args = parser.parse_args()
self.subject = GpPkgProgram(options, args)
self.subject.run()
self.mock_listdir.assert_called()
self.mock_uninstall_package.run.assert_called_once()
|
from datetime import date
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_date04.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {'xl/charts/chart1.xml': ['<c:formatCode']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'line'})
date_format = workbook.add_format({'num_format': 14})
chart.axis_ids = [51761152, 51762688]
worksheet.set_column('A:A', 12)
dates = [date(2013, 1, 1),
date(2013, 1, 2),
date(2013, 1, 3),
date(2013, 1, 4),
date(2013, 1, 5),
date(2013, 1, 6),
date(2013, 1, 7),
date(2013, 1, 8),
date(2013, 1, 9),
date(2013, 1, 10)]
values = [10, 30, 20, 40, 20, 60, 50, 40, 30, 30]
worksheet.write_column('A1', dates, date_format)
worksheet.write_column('B1', values)
chart.add_series({
'categories': '=Sheet1!$A$1:$A$10',
'values': '=Sheet1!$B$1:$B$10',
})
chart.set_x_axis({
'date_axis': True,
'minor_unit': 1,
'major_unit': 1,
'minor_unit_type': 'months',
'major_unit_type': 'years',
'num_format': 'dd/mm/yyyy',
'num_format_linked': True,
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
""" DIRECT Nine DoF Manipulation Panel """
from direct.showbase.DirectObject import DirectObject
from direct.directtools.DirectGlobals import *
from direct.tkwidgets.AppShell import AppShell
from direct.tkwidgets.Dial import AngleDial
from direct.tkwidgets.Floater import Floater
from Tkinter import Button, Menubutton, Menu, StringVar
from pandac.PandaModules import *
import Tkinter, Pmw
"""
TODO:
Task to monitor pose
"""
class Placer(AppShell):
# Override class variables here
appname = 'Placer Panel'
frameWidth = 625
frameHeight = 215
usecommandarea = 0
usestatusarea = 0
def __init__(self, parent = None, **kw):
INITOPT = Pmw.INITOPT
optiondefs = (
('title', self.appname, None),
('nodePath', SEditor.camera, None),
)
self.defineoptions(kw, optiondefs)
# Call superclass initialization function
AppShell.__init__(self)
self.initialiseoptions(Placer)
# Accept the message from sceneEditor to update the information about the target nodePath
self.accept('placerUpdate', self.updatePlacer)
def appInit(self):
# Initialize state
self.tempCS = SEditor.group.attachNewNode('placerTempCS')
self.orbitFromCS = SEditor.group.attachNewNode(
'placerOrbitFromCS')
self.orbitToCS = SEditor.group.attachNewNode('placerOrbitToCS')
self.refCS = self.tempCS
# Dictionary keeping track of all node paths manipulated so far
self.nodePathDict = {}
self.nodePathDict['camera'] = SEditor.camera
self.nodePathDict['widget'] = SEditor.widget
self.nodePathNames = ['camera', 'widget', 'selected']
self.refNodePathDict = {}
self.refNodePathDict['parent'] = self['nodePath'].getParent()
self.refNodePathDict['render'] = render
self.refNodePathDict['camera'] = SEditor.camera
self.refNodePathDict['widget'] = SEditor.widget
self.refNodePathNames = ['parent', 'self', 'render',
'camera', 'widget', 'selected']
# Initial state
self.initPos = Vec3(0)
self.initHpr = Vec3(0)
self.initScale = Vec3(1)
self.deltaHpr = Vec3(0)
# Offset for orbital mode
self.posOffset = Vec3(0)
# Set up event hooks
self.undoEvents = [('DIRECT_undo', self.undoHook),
('DIRECT_pushUndo', self.pushUndoHook),
('DIRECT_undoListEmpty', self.undoListEmptyHook),
('DIRECT_redo', self.redoHook),
('DIRECT_pushRedo', self.pushRedoHook),
('DIRECT_redoListEmpty', self.redoListEmptyHook)]
for event, method in self.undoEvents:
self.accept(event, method)
# Init movement mode
self.movementMode = 'Relative To:'
def createInterface(self):
# The interior of the toplevel panel
interior = self.interior()
interior['relief'] = Tkinter.FLAT
# Add placer commands to menubar
self.menuBar.addmenu('Placer', 'Placer Panel Operations')
self.menuBar.addmenuitem('Placer', 'command',
'Zero Node Path',
label = 'Zero All',
command = self.zeroAll)
self.menuBar.addmenuitem('Placer', 'command',
'Reset Node Path',
label = 'Reset All',
command = self.resetAll)
self.menuBar.addmenuitem('Placer', 'command',
'Print Node Path Info',
label = 'Print Info',
command = self.printNodePathInfo)
self.menuBar.addmenuitem(
'Placer', 'command',
'Toggle widget visability',
label = 'Toggle Widget Vis',
command = SEditor.toggleWidgetVis)
self.menuBar.addmenuitem(
'Placer', 'command',
'Toggle widget manipulation mode',
label = 'Toggle Widget Mode',
command = SEditor.manipulationControl.toggleObjectHandlesMode)
# Get a handle to the menu frame
menuFrame = self.menuFrame
self.nodePathMenu = Pmw.ComboBox(
menuFrame, labelpos = Tkinter.W, label_text = 'Node Path:',
entry_width = 20,
selectioncommand = self.selectNodePathNamed,
scrolledlist_items = self.nodePathNames)
self.nodePathMenu.selectitem('selected')
self.nodePathMenuEntry = (
self.nodePathMenu.component('entryfield_entry'))
self.nodePathMenuBG = (
self.nodePathMenuEntry.configure('background')[3])
self.nodePathMenu.pack(side = 'left', fill = 'x', expand = 1)
self.bind(self.nodePathMenu, 'Select node path to manipulate')
modeMenu = Pmw.OptionMenu(menuFrame,
items = ('Relative To:',
'Orbit:'),
initialitem = 'Relative To:',
command = self.setMovementMode,
menubutton_width = 8)
modeMenu.pack(side = 'left', expand = 0)
self.bind(modeMenu, 'Select manipulation mode')
self.refNodePathMenu = Pmw.ComboBox(
menuFrame, entry_width = 16,
selectioncommand = self.selectRefNodePathNamed,
scrolledlist_items = self.refNodePathNames)
self.refNodePathMenu.selectitem('parent')
self.refNodePathMenuEntry = (
self.refNodePathMenu.component('entryfield_entry'))
self.refNodePathMenu.pack(side = 'left', fill = 'x', expand = 1)
self.bind(self.refNodePathMenu, 'Select relative node path')
self.undoButton = Button(menuFrame, text = 'Undo',
command = SEditor.undo)
if SEditor.undoList:
self.undoButton['state'] = 'normal'
else:
self.undoButton['state'] = 'disabled'
self.undoButton.pack(side = 'left', expand = 0)
self.bind(self.undoButton, 'Undo last operation')
self.redoButton = Button(menuFrame, text = 'Redo',
command = SEditor.redo)
if SEditor.redoList:
self.redoButton['state'] = 'normal'
else:
self.redoButton['state'] = 'disabled'
self.redoButton.pack(side = 'left', expand = 0)
self.bind(self.redoButton, 'Redo last operation')
# Create and pack the Pos Controls
posGroup = Pmw.Group(interior,
tag_pyclass = Menubutton,
tag_text = 'Position',
tag_font=('MSSansSerif', 14),
tag_activebackground = '#909090',
ring_relief = Tkinter.RIDGE)
posMenubutton = posGroup.component('tag')
self.bind(posMenubutton, 'Position menu operations')
posMenu = Menu(posMenubutton, tearoff = 0)
posMenu.add_command(label = 'Set to zero', command = self.zeroPos)
posMenu.add_command(label = 'Reset initial',
command = self.resetPos)
posMenubutton['menu'] = posMenu
posGroup.pack(side='left', fill = 'both', expand = 1)
posInterior = posGroup.interior()
# Create the dials
self.posX = self.createcomponent('posX', (), None,
Floater, (posInterior,),
text = 'X', relief = Tkinter.FLAT,
value = 0.0,
label_foreground = 'Red')
self.posX['commandData'] = ['x']
self.posX['preCallback'] = self.xformStart
self.posX['postCallback'] = self.xformStop
self.posX['callbackData'] = ['x']
self.posX.pack(expand=1,fill='both')
self.posY = self.createcomponent('posY', (), None,
Floater, (posInterior,),
text = 'Y', relief = Tkinter.FLAT,
value = 0.0,
label_foreground = '#00A000')
self.posY['commandData'] = ['y']
self.posY['preCallback'] = self.xformStart
self.posY['postCallback'] = self.xformStop
self.posY['callbackData'] = ['y']
self.posY.pack(expand=1,fill='both')
self.posZ = self.createcomponent('posZ', (), None,
Floater, (posInterior,),
text = 'Z', relief = Tkinter.FLAT,
value = 0.0,
label_foreground = 'Blue')
self.posZ['commandData'] = ['z']
self.posZ['preCallback'] = self.xformStart
self.posZ['postCallback'] = self.xformStop
self.posZ['callbackData'] = ['z']
self.posZ.pack(expand=1,fill='both')
# Create and pack the Hpr Controls
hprGroup = Pmw.Group(interior,
tag_pyclass = Menubutton,
tag_text = 'Orientation',
tag_font=('MSSansSerif', 14),
tag_activebackground = '#909090',
ring_relief = Tkinter.RIDGE)
hprMenubutton = hprGroup.component('tag')
self.bind(hprMenubutton, 'Orientation menu operations')
hprMenu = Menu(hprMenubutton, tearoff = 0)
hprMenu.add_command(label = 'Set to zero', command = self.zeroHpr)
hprMenu.add_command(label = 'Reset initial', command = self.resetHpr)
hprMenubutton['menu'] = hprMenu
hprGroup.pack(side='left',fill = 'both', expand = 1)
hprInterior = hprGroup.interior()
# Create the dials
self.hprH = self.createcomponent('hprH', (), None,
AngleDial, (hprInterior,),
style = 'mini',
text = 'H', value = 0.0,
relief = Tkinter.FLAT,
label_foreground = 'blue')
self.hprH['commandData'] = ['h']
self.hprH['preCallback'] = self.xformStart
self.hprH['postCallback'] = self.xformStop
self.hprH['callbackData'] = ['h']
self.hprH.pack(expand=1,fill='both')
self.hprP = self.createcomponent('hprP', (), None,
AngleDial, (hprInterior,),
style = 'mini',
text = 'P', value = 0.0,
relief = Tkinter.FLAT,
label_foreground = 'red')
self.hprP['commandData'] = ['p']
self.hprP['preCallback'] = self.xformStart
self.hprP['postCallback'] = self.xformStop
self.hprP['callbackData'] = ['p']
self.hprP.pack(expand=1,fill='both')
self.hprR = self.createcomponent('hprR', (), None,
AngleDial, (hprInterior,),
style = 'mini',
text = 'R', value = 0.0,
relief = Tkinter.FLAT,
label_foreground = '#00A000')
self.hprR['commandData'] = ['r']
self.hprR['preCallback'] = self.xformStart
self.hprR['postCallback'] = self.xformStop
self.hprR['callbackData'] = ['r']
self.hprR.pack(expand=1,fill='both')
# Create and pack the Scale Controls
# The available scaling modes
self.scalingMode = StringVar()
self.scalingMode.set('Scale Uniform')
# The scaling widgets
scaleGroup = Pmw.Group(interior,
tag_text = 'Scale Uniform',
tag_pyclass = Menubutton,
tag_font=('MSSansSerif', 14),
tag_activebackground = '#909090',
ring_relief = Tkinter.RIDGE)
self.scaleMenubutton = scaleGroup.component('tag')
self.bind(self.scaleMenubutton, 'Scale menu operations')
self.scaleMenubutton['textvariable'] = self.scalingMode
# Scaling menu
scaleMenu = Menu(self.scaleMenubutton, tearoff = 0)
scaleMenu.add_command(label = 'Set to unity',
command = self.unitScale)
scaleMenu.add_command(label = 'Reset initial',
command = self.resetScale)
scaleMenu.add_radiobutton(label = 'Scale Free',
variable = self.scalingMode)
scaleMenu.add_radiobutton(label = 'Scale Uniform',
variable = self.scalingMode)
scaleMenu.add_radiobutton(label = 'Scale Proportional',
variable = self.scalingMode)
self.scaleMenubutton['menu'] = scaleMenu
# Pack group widgets
scaleGroup.pack(side='left',fill = 'both', expand = 1)
scaleInterior = scaleGroup.interior()
# Create the dials
self.scaleX = self.createcomponent('scaleX', (), None,
Floater, (scaleInterior,),
text = 'X Scale',
relief = Tkinter.FLAT,
min = 0.0001, value = 1.0,
resetValue = 1.0,
label_foreground = 'Red')
self.scaleX['commandData'] = ['sx']
self.scaleX['callbackData'] = ['sx']
self.scaleX['preCallback'] = self.xformStart
self.scaleX['postCallback'] = self.xformStop
self.scaleX.pack(expand=1,fill='both')
self.scaleY = self.createcomponent('scaleY', (), None,
Floater, (scaleInterior,),
text = 'Y Scale',
relief = Tkinter.FLAT,
min = 0.0001, value = 1.0,
resetValue = 1.0,
label_foreground = '#00A000')
self.scaleY['commandData'] = ['sy']
self.scaleY['callbackData'] = ['sy']
self.scaleY['preCallback'] = self.xformStart
self.scaleY['postCallback'] = self.xformStop
self.scaleY.pack(expand=1,fill='both')
self.scaleZ = self.createcomponent('scaleZ', (), None,
Floater, (scaleInterior,),
text = 'Z Scale',
relief = Tkinter.FLAT,
min = 0.0001, value = 1.0,
resetValue = 1.0,
label_foreground = 'Blue')
self.scaleZ['commandData'] = ['sz']
self.scaleZ['callbackData'] = ['sz']
self.scaleZ['preCallback'] = self.xformStart
self.scaleZ['postCallback'] = self.xformStop
self.scaleZ.pack(expand=1,fill='both')
# Make sure appropriate labels are showing
self.setMovementMode('Relative To:')
# Set up placer for inital node path
self.selectNodePathNamed('init')
self.selectRefNodePathNamed('parent')
# Update place to reflect initial state
self.updatePlacer()
# Now that you're done setting up, attach commands
self.posX['command'] = self.xform
self.posY['command'] = self.xform
self.posZ['command'] = self.xform
self.hprH['command'] = self.xform
self.hprP['command'] = self.xform
self.hprR['command'] = self.xform
self.scaleX['command'] = self.xform
self.scaleY['command'] = self.xform
self.scaleZ['command'] = self.xform
### WIDGET OPERATIONS ###
def setMovementMode(self, movementMode):
# Set prefix
namePrefix = ''
self.movementMode = movementMode
if (movementMode == 'Relative To:'):
namePrefix = 'Relative '
elif (movementMode == 'Orbit:'):
namePrefix = 'Orbit '
# Update pos widgets
self.posX['text'] = namePrefix + 'X'
self.posY['text'] = namePrefix + 'Y'
self.posZ['text'] = namePrefix + 'Z'
# Update hpr widgets
if (movementMode == 'Orbit:'):
namePrefix = 'Orbit delta '
self.hprH['text'] = namePrefix + 'H'
self.hprP['text'] = namePrefix + 'P'
self.hprR['text'] = namePrefix + 'R'
# Update temp cs and initialize widgets
self.updatePlacer()
def setScalingMode(self):
if self['nodePath']:
scale = self['nodePath'].getScale()
if ((scale[0] != scale[1]) or
(scale[0] != scale[2]) or
(scale[1] != scale[2])):
self.scalingMode.set('Scale Free')
def selectNodePathNamed(self, name):
nodePath = None
if name == 'init':
nodePath = self['nodePath']
# Add Combo box entry for the initial node path
self.addNodePath(nodePath)
elif name == 'selected':
nodePath = SEditor.selected.last
# Add Combo box entry for this selected object
self.addNodePath(nodePath)
else:
nodePath = self.nodePathDict.get(name, None)
if (nodePath == None):
# See if this evaluates into a node path
try:
nodePath = eval(name)
if isinstance(nodePath, NodePath):
self.addNodePath(nodePath)
else:
# Good eval but not a node path, give up
nodePath = None
except:
# Bogus eval
nodePath = None
# Clear bogus entry from listbox
listbox = self.nodePathMenu.component('scrolledlist')
listbox.setlist(self.nodePathNames)
else:
if name == 'widget':
# Record relationship between selected nodes and widget
SEditor.selected.getWrtAll()
# Update active node path
self.setActiveNodePath(nodePath)
def setActiveNodePath(self, nodePath):
self['nodePath'] = nodePath
if self['nodePath']:
self.nodePathMenuEntry.configure(
background = self.nodePathMenuBG)
# Check to see if node path and ref node path are the same
if ((self.refCS != None) and
(self.refCS.id() == self['nodePath'].id())):
# Yes they are, use temp CS as ref
# This calls updatePlacer
self.setReferenceNodePath(self.tempCS)
# update listbox accordingly
self.refNodePathMenu.selectitem('parent')
else:
# Record initial value and initialize the widgets
self.updatePlacer()
# Record initial position
self.updateResetValues(self['nodePath'])
# Set scaling mode based on node path's current scale
self.setScalingMode()
else:
# Flash entry
self.nodePathMenuEntry.configure(background = 'Pink')
def selectRefNodePathNamed(self, name):
nodePath = None
if name == 'self':
nodePath = self.tempCS
elif name == 'selected':
nodePath = SEditor.selected.last
# Add Combo box entry for this selected object
self.addRefNodePath(nodePath)
elif name == 'parent':
nodePath = self['nodePath'].getParent()
else:
nodePath = self.refNodePathDict.get(name, None)
if (nodePath == None):
# See if this evaluates into a node path
try:
nodePath = eval(name)
if isinstance(nodePath, NodePath):
self.addRefNodePath(nodePath)
else:
# Good eval but not a node path, give up
nodePath = None
except:
# Bogus eval
nodePath = None
# Clear bogus entry from listbox
listbox = self.refNodePathMenu.component('scrolledlist')
listbox.setlist(self.refNodePathNames)
# Check to see if node path and ref node path are the same
if (nodePath != None) and (nodePath.id() == self['nodePath'].id()):
# Yes they are, use temp CS and update listbox accordingly
nodePath = self.tempCS
self.refNodePathMenu.selectitem('parent')
# Update ref node path
self.setReferenceNodePath(nodePath)
def setReferenceNodePath(self, nodePath):
self.refCS = nodePath
if self.refCS:
self.refNodePathMenuEntry.configure(
background = self.nodePathMenuBG)
# Update placer to reflect new state
self.updatePlacer()
else:
# Flash entry
self.refNodePathMenuEntry.configure(background = 'Pink')
def addNodePath(self, nodePath):
self.addNodePathToDict(nodePath, self.nodePathNames,
self.nodePathMenu, self.nodePathDict)
def addRefNodePath(self, nodePath):
self.addNodePathToDict(nodePath, self.refNodePathNames,
self.refNodePathMenu, self.refNodePathDict)
def addNodePathToDict(self, nodePath, names, menu, dict):
if not nodePath:
return
# Get node path's name
name = nodePath.getName()
if name in ['parent', 'render', 'camera']:
dictName = name
else:
# Generate a unique name for the dict
dictName = name + '-' + `nodePath.id()`
if not dict.has_key(dictName):
# Update combo box to include new item
names.append(dictName)
listbox = menu.component('scrolledlist')
listbox.setlist(names)
# Add new item to dictionary
dict[dictName] = nodePath
menu.selectitem(dictName)
def updatePlacer(self):
pos = Vec3(0)
hpr = Vec3(0)
scale = Vec3(1)
np = self['nodePath']
if (np != None) and isinstance(np, NodePath):
# Update temp CS
self.updateAuxiliaryCoordinateSystems()
# Update widgets
if self.movementMode == 'Orbit:':
pos.assign(self.posOffset)
hpr.assign(ZERO_VEC)
scale.assign(np.getScale())
elif self.refCS:
pos.assign(np.getPos(self.refCS))
hpr.assign(np.getHpr(self.refCS))
scale.assign(np.getScale())
self.updatePosWidgets(pos)
self.updateHprWidgets(hpr)
self.updateScaleWidgets(scale)
def updateAuxiliaryCoordinateSystems(self):
# Temp CS
self.tempCS.setPosHpr(self['nodePath'], 0,0,0,0,0,0)
# Orbit CS
# At reference
self.orbitFromCS.setPos(self.refCS, 0,0,0)
# But aligned with target
self.orbitFromCS.setHpr(self['nodePath'], 0,0,0)
# Also update to CS
self.orbitToCS.setPosHpr(self.orbitFromCS, 0,0,0,0,0,0)
# Get offset from origin
self.posOffset.assign(self['nodePath'].getPos(self.orbitFromCS))
### NODE PATH TRANSFORMATION OPERATIONS ###
def xform(self, value, axis):
if axis in ['sx', 'sy', 'sz']:
self.xformScale(value,axis)
elif self.movementMode == 'Relative To:':
self.xformRelative(value, axis)
elif self.movementMode == 'Orbit:':
self.xformOrbit(value, axis)
if self.nodePathMenu.get() == 'widget':
if SEditor.manipulationControl.fSetCoa:
# Update coa based on current widget position
SEditor.selected.last.mCoa2Dnp.assign(
SEditor.widget.getMat(SEditor.selected.last))
else:
# Move the objects with the widget
SEditor.selected.moveWrtWidgetAll()
def xformStart(self, data):
# Record undo point
self.pushUndo()
# If moving widget kill follow task and update wrts
if self.nodePathMenu.get() == 'widget':
taskMgr.remove('followSelectedNodePath')
# Record relationship between selected nodes and widget
SEditor.selected.getWrtAll()
# Record initial state
self.deltaHpr = self['nodePath'].getHpr(self.refCS)
# Update placer to reflect new state
self.updatePlacer()
def xformStop(self, data):
# Throw event to signal manipulation done
# Send nodepath as a list
messenger.send('DIRECT_manipulateObjectCleanup', [[self['nodePath']]])
# Update placer to reflect new state
self.updatePlacer()
# If moving widget restart follow task
if self.nodePathMenu.get() == 'widget':
# Restart followSelectedNodePath task
SEditor.manipulationControl.spawnFollowSelectedNodePathTask()
def xformRelative(self, value, axis):
nodePath = self['nodePath']
if (nodePath != None) and (self.refCS != None):
if axis == 'x':
nodePath.setX(self.refCS, value)
elif axis == 'y':
nodePath.setY(self.refCS, value)
elif axis == 'z':
nodePath.setZ(self.refCS, value)
else:
if axis == 'h':
self.deltaHpr.setX(value)
elif axis == 'p':
self.deltaHpr.setY(value)
elif axis == 'r':
self.deltaHpr.setZ(value)
# Put node path at new hpr
nodePath.setHpr(self.refCS, self.deltaHpr)
def xformOrbit(self, value, axis):
nodePath = self['nodePath']
if ((nodePath != None) and (self.refCS != None) and
(self.orbitFromCS != None) and (self.orbitToCS != None)):
if axis == 'x':
self.posOffset.setX(value)
elif axis == 'y':
self.posOffset.setY(value)
elif axis == 'z':
self.posOffset.setZ(value)
elif axis == 'h':
self.orbitToCS.setH(self.orbitFromCS, value)
elif axis == 'p':
self.orbitToCS.setP(self.orbitFromCS, value)
elif axis == 'r':
self.orbitToCS.setR(self.orbitFromCS, value)
nodePath.setPosHpr(self.orbitToCS, self.posOffset, ZERO_VEC)
def xformScale(self, value, axis):
if self['nodePath']:
mode = self.scalingMode.get()
scale = self['nodePath'].getScale()
if mode == 'Scale Free':
if axis == 'sx':
scale.setX(value)
elif axis == 'sy':
scale.setY(value)
elif axis == 'sz':
scale.setZ(value)
elif mode == 'Scale Uniform':
scale.set(value,value,value)
elif mode == 'Scale Proportional':
if axis == 'sx':
sf = value/scale[0]
elif axis == 'sy':
sf = value/scale[1]
elif axis == 'sz':
sf = value/scale[2]
scale = scale * sf
self['nodePath'].setScale(scale)
def updatePosWidgets(self, pos):
self.posX.set(pos[0])
self.posY.set(pos[1])
self.posZ.set(pos[2])
def updateHprWidgets(self, hpr):
self.hprH.set(hpr[0])
self.hprP.set(hpr[1])
self.hprR.set(hpr[2])
def updateScaleWidgets(self, scale):
self.scaleX.set(scale[0])
self.scaleY.set(scale[1])
self.scaleZ.set(scale[2])
def zeroAll(self):
self.xformStart(None)
self.updatePosWidgets(ZERO_VEC)
self.updateHprWidgets(ZERO_VEC)
self.updateScaleWidgets(UNIT_VEC)
self.xformStop(None)
def zeroPos(self):
self.xformStart(None)
self.updatePosWidgets(ZERO_VEC)
self.xformStop(None)
def zeroHpr(self):
self.xformStart(None)
self.updateHprWidgets(ZERO_VEC)
self.xformStop(None)
def unitScale(self):
self.xformStart(None)
self.updateScaleWidgets(UNIT_VEC)
self.xformStop(None)
def updateResetValues(self, nodePath):
self.initPos.assign(nodePath.getPos())
self.posX['resetValue'] = self.initPos[0]
self.posY['resetValue'] = self.initPos[1]
self.posZ['resetValue'] = self.initPos[2]
self.initHpr.assign(nodePath.getHpr())
self.hprH['resetValue'] = self.initHpr[0]
self.hprP['resetValue'] = self.initHpr[1]
self.hprR['resetValue'] = self.initHpr[2]
self.initScale.assign(nodePath.getScale())
self.scaleX['resetValue'] = self.initScale[0]
self.scaleY['resetValue'] = self.initScale[1]
self.scaleZ['resetValue'] = self.initScale[2]
def resetAll(self):
if self['nodePath']:
self.xformStart(None)
self['nodePath'].setPosHprScale(
self.initPos, self.initHpr, self.initScale)
self.xformStop(None)
def resetPos(self):
if self['nodePath']:
self.xformStart(None)
self['nodePath'].setPos(self.initPos)
self.xformStop(None)
def resetHpr(self):
if self['nodePath']:
self.xformStart(None)
self['nodePath'].setHpr(self.initHpr)
self.xformStop(None)
def resetScale(self):
if self['nodePath']:
self.xformStart(None)
self['nodePath'].setScale(self.initScale)
self.xformStop(None)
def pushUndo(self, fResetRedo = 1):
SEditor.pushUndo([self['nodePath']])
def undoHook(self, nodePathList = []):
# Reflect new changes
self.updatePlacer()
def pushUndoHook(self):
# Make sure button is reactivated
self.undoButton.configure(state = 'normal')
def undoListEmptyHook(self):
# Make sure button is deactivated
self.undoButton.configure(state = 'disabled')
def pushRedo(self):
SEditor.pushRedo([self['nodePath']])
def redoHook(self, nodePathList = []):
# Reflect new changes
self.updatePlacer()
def pushRedoHook(self):
# Make sure button is reactivated
self.redoButton.configure(state = 'normal')
def redoListEmptyHook(self):
# Make sure button is deactivated
self.redoButton.configure(state = 'disabled')
def printNodePathInfo(self):
np = self['nodePath']
if np:
name = np.getName()
pos = np.getPos()
hpr = np.getHpr()
scale = np.getScale()
posString = '%.2f, %.2f, %.2f' % (pos[0], pos[1], pos[2])
hprString = '%.2f, %.2f, %.2f' % (hpr[0], hpr[1], hpr[2])
scaleString = '%.2f, %.2f, %.2f' % (scale[0], scale[1], scale[2])
print 'NodePath: %s' % name
print 'Pos: %s' % posString
print 'Hpr: %s' % hprString
print 'Scale: %s' % scaleString
print ('%s.setPosHprScale(%s, %s, %s)' %
(name, posString, hprString, scaleString))
def onDestroy(self, event):
# Remove hooks
for event, method in self.undoEvents:
self.ignore(event)
self.tempCS.removeNode()
self.orbitFromCS.removeNode()
self.orbitToCS.removeNode()
# send out a message to let sceneEditor know that placer panel has been closed.
# Also, stop accepting the updata message from sceneEditor
messenger.send('Placer_close')
self.ignore('placerUpdate')
def place(nodePath):
return Placer(nodePath = nodePath)
if __name__ == '__main__':
root = Pmw.initialise()
widget = Placer()
|
'''tzinfo timezone information for America/Inuvik.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Inuvik(DstTzInfo):
'''America/Inuvik timezone definition. See datetime.tzinfo for details'''
zone = 'America/Inuvik'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1918,4,14,10,0,0),
d(1918,10,27,9,0,0),
d(1919,5,25,10,0,0),
d(1919,11,1,7,0,0),
d(1942,2,9,10,0,0),
d(1945,8,14,23,0,0),
d(1945,9,30,9,0,0),
d(1965,4,25,8,0,0),
d(1965,10,31,8,0,0),
d(1979,4,29,10,0,0),
d(1980,4,27,9,0,0),
d(1980,10,26,8,0,0),
d(1981,4,26,9,0,0),
d(1981,10,25,8,0,0),
d(1982,4,25,9,0,0),
d(1982,10,31,8,0,0),
d(1983,4,24,9,0,0),
d(1983,10,30,8,0,0),
d(1984,4,29,9,0,0),
d(1984,10,28,8,0,0),
d(1985,4,28,9,0,0),
d(1985,10,27,8,0,0),
d(1986,4,27,9,0,0),
d(1986,10,26,8,0,0),
d(1987,4,5,9,0,0),
d(1987,10,25,8,0,0),
d(1988,4,3,9,0,0),
d(1988,10,30,8,0,0),
d(1989,4,2,9,0,0),
d(1989,10,29,8,0,0),
d(1990,4,1,9,0,0),
d(1990,10,28,8,0,0),
d(1991,4,7,9,0,0),
d(1991,10,27,8,0,0),
d(1992,4,5,9,0,0),
d(1992,10,25,8,0,0),
d(1993,4,4,9,0,0),
d(1993,10,31,8,0,0),
d(1994,4,3,9,0,0),
d(1994,10,30,8,0,0),
d(1995,4,2,9,0,0),
d(1995,10,29,8,0,0),
d(1996,4,7,9,0,0),
d(1996,10,27,8,0,0),
d(1997,4,6,9,0,0),
d(1997,10,26,8,0,0),
d(1998,4,5,9,0,0),
d(1998,10,25,8,0,0),
d(1999,4,4,9,0,0),
d(1999,10,31,8,0,0),
d(2000,4,2,9,0,0),
d(2000,10,29,8,0,0),
d(2001,4,1,9,0,0),
d(2001,10,28,8,0,0),
d(2002,4,7,9,0,0),
d(2002,10,27,8,0,0),
d(2003,4,6,9,0,0),
d(2003,10,26,8,0,0),
d(2004,4,4,9,0,0),
d(2004,10,31,8,0,0),
d(2005,4,3,9,0,0),
d(2005,10,30,8,0,0),
d(2006,4,2,9,0,0),
d(2006,10,29,8,0,0),
d(2007,3,11,9,0,0),
d(2007,11,4,8,0,0),
d(2008,3,9,9,0,0),
d(2008,11,2,8,0,0),
d(2009,3,8,9,0,0),
d(2009,11,1,8,0,0),
d(2010,3,14,9,0,0),
d(2010,11,7,8,0,0),
d(2011,3,13,9,0,0),
d(2011,11,6,8,0,0),
d(2012,3,11,9,0,0),
d(2012,11,4,8,0,0),
d(2013,3,10,9,0,0),
d(2013,11,3,8,0,0),
d(2014,3,9,9,0,0),
d(2014,11,2,8,0,0),
d(2015,3,8,9,0,0),
d(2015,11,1,8,0,0),
d(2016,3,13,9,0,0),
d(2016,11,6,8,0,0),
d(2017,3,12,9,0,0),
d(2017,11,5,8,0,0),
d(2018,3,11,9,0,0),
d(2018,11,4,8,0,0),
d(2019,3,10,9,0,0),
d(2019,11,3,8,0,0),
d(2020,3,8,9,0,0),
d(2020,11,1,8,0,0),
d(2021,3,14,9,0,0),
d(2021,11,7,8,0,0),
d(2022,3,13,9,0,0),
d(2022,11,6,8,0,0),
d(2023,3,12,9,0,0),
d(2023,11,5,8,0,0),
d(2024,3,10,9,0,0),
d(2024,11,3,8,0,0),
d(2025,3,9,9,0,0),
d(2025,11,2,8,0,0),
d(2026,3,8,9,0,0),
d(2026,11,1,8,0,0),
d(2027,3,14,9,0,0),
d(2027,11,7,8,0,0),
d(2028,3,12,9,0,0),
d(2028,11,5,8,0,0),
d(2029,3,11,9,0,0),
d(2029,11,4,8,0,0),
d(2030,3,10,9,0,0),
d(2030,11,3,8,0,0),
d(2031,3,9,9,0,0),
d(2031,11,2,8,0,0),
d(2032,3,14,9,0,0),
d(2032,11,7,8,0,0),
d(2033,3,13,9,0,0),
d(2033,11,6,8,0,0),
d(2034,3,12,9,0,0),
d(2034,11,5,8,0,0),
d(2035,3,11,9,0,0),
d(2035,11,4,8,0,0),
d(2036,3,9,9,0,0),
d(2036,11,2,8,0,0),
d(2037,3,8,9,0,0),
d(2037,11,1,8,0,0),
]
_transition_info = [
i(-28800,0,'PST'),
i(-25200,3600,'PDT'),
i(-28800,0,'PST'),
i(-25200,3600,'PDT'),
i(-28800,0,'PST'),
i(-25200,3600,'PWT'),
i(-25200,3600,'PPT'),
i(-28800,0,'PST'),
i(-21600,7200,'PDDT'),
i(-28800,0,'PST'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
]
Inuvik = Inuvik()
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.cloudengine import ce_is_is_instance
from units.modules.network.cloudengine.ce_module import TestCloudEngineModule, load_fixture
from units.modules.utils import set_module_args
class TestCloudEngineLacpModule(TestCloudEngineModule):
module = ce_is_is_instance
def setUp(self):
super(TestCloudEngineLacpModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.cloudengine.ce_is_is_instance.get_nc_config')
self.get_nc_config = self.mock_get_config.start()
self.mock_set_config = patch('ansible.modules.network.cloudengine.ce_is_is_instance.set_nc_config')
self.set_nc_config = self.mock_set_config.start()
self.set_nc_config.return_value = None
def tearDown(self):
super(TestCloudEngineLacpModule, self).tearDown()
self.mock_set_config.stop()
self.mock_get_config.stop()
def test_isis_instance_present(self):
xml_existing = load_fixture('ce_is_is_instance', 'before.txt')
xml_end_state = load_fixture('ce_is_is_instance', 'after.txt')
update = ['isis 100', 'vpn-instance __public__']
self.get_nc_config.side_effect = (xml_existing, xml_end_state)
config = dict(
instance_id=100,
vpn_name='__public__',
state='present')
set_module_args(config)
result = self.execute_module(changed=True)
self.assertEquals(sorted(result['updates']), sorted(update))
def test_isis_instance_present(self):
xml_existing = load_fixture('ce_is_is_instance', 'after.txt')
xml_end_state = load_fixture('ce_is_is_instance', 'before.txt')
update = ['undo isis 100']
self.get_nc_config.side_effect = (xml_existing, xml_end_state)
config = dict(
instance_id=100,
vpn_name='__public__',
state='absent')
set_module_args(config)
result = self.execute_module(changed=True)
self.assertEquals(sorted(result['updates']), sorted(update))
|
from . import ir_model
from . import trgm_index
|
#-*- coding: utf-8 -*-
from openerp.osv import fields, osv
class project_issue(osv.osv):
_inherit = 'project.issue'
_columns = {
'project_issue_solution_id': fields.many2one('project.issue.solution', 'Linked Solution'),
'issue_description': fields.html('Issue Description'),
'solution_description': fields.html('Solution Description'),
}
|
"""Unique element dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.data.python.ops import contrib_op_loader # pylint: disable=unused-import
from tensorflow.contrib.data.python.ops import gen_dataset_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
def unique():
"""Creates a `Dataset` from another `Dataset`, discarding duplicates.
Use this transformation to produce a dataset that contains one instance of
each unique element in the input. For example:
```python
dataset = tf.data.Dataset.from_tensor_slices([1, 37, 2, 37, 2, 1])
# Using `unique()` will drop the duplicate elements.
dataset = dataset.apply(tf.contrib.data.unique()) # ==> { 1, 37, 2 }
```
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
"""
def _apply_fn(dataset):
return _UniqueDataset(dataset)
return _apply_fn
class _UniqueDataset(dataset_ops.Dataset):
"""A `Dataset` contains the unique elements from its input."""
def __init__(self, input_dataset):
"""See `unique()` for details."""
super(_UniqueDataset, self).__init__()
self._input_dataset = input_dataset
if input_dataset.output_types not in (dtypes.int32, dtypes.int64,
dtypes.string):
raise TypeError(
"`tf.contrib.data.unique()` only supports inputs with a single "
"`tf.int32`, `tf.int64`, or `tf.string` component.")
def _as_variant_tensor(self):
return gen_dataset_ops.unique_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
**dataset_ops.flat_structure(self))
@property
def output_classes(self):
return self._input_dataset.output_classes
@property
def output_shapes(self):
return self._input_dataset.output_shapes
@property
def output_types(self):
return self._input_dataset.output_types
|
from sqlalchemy import Float
from sqlalchemy import MetaData
from sqlalchemy import Table
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
meter = Table('meter', meta, autoload=True)
meter.c.counter_volume.alter(type=Float(53))
|
from ..broker import Broker
class DiscoverySettingBroker(Broker):
controller = "discovery_settings"
def index(self, **kwargs):
"""Lists the available discovery settings. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for the discovery setting.
:type id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for the discovery setting.
:type id: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param UnitID: The internal NetMRI identifier collector assigned to the discovery setting.
:type UnitID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param UnitID: The internal NetMRI identifier collector assigned to the discovery setting.
:type UnitID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param range_type: The discovery setting range type (CIDR, RANGE, SEED, STATIC, WILDCARD).
:type range_type: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param range_type: The discovery setting range type (CIDR, RANGE, SEED, STATIC, WILDCARD).
:type range_type: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, range_value, range_start, range_end, range_start_numeric, range_end_numeric, range_mask, range_type, discovery_status, created_at, updated_at, UnitID, created_by, updated_by, ping_sweep_ind, smart_ping_sweep_ind, start_blackout_schedule, blackout_duration, virtual_network_id, start_port_control_blackout_schedule, port_control_blackout_duration, cidr_count.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DiscoverySetting. Valid values are id, range_value, range_start, range_end, range_start_numeric, range_end_numeric, range_mask, range_type, discovery_status, created_at, updated_at, UnitID, created_by, updated_by, ping_sweep_ind, smart_ping_sweep_ind, start_blackout_schedule, blackout_duration, virtual_network_id, start_port_control_blackout_schedule, port_control_blackout_duration, cidr_count. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return discovery_settings: An array of the DiscoverySetting objects that match the specified input criteria.
:rtype discovery_settings: Array of DiscoverySetting
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available discovery settings matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.10
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param range_type: The discovery setting range type (CIDR, RANGE, SEED, STATIC, WILDCARD).
:type range_type: Array of String
| ``api version min:`` 2.10
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param UnitID: The internal NetMRI identifier collector assigned to the discovery setting.
:type UnitID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, range_value, range_start, range_end, range_start_numeric, range_end_numeric, range_mask, range_type, discovery_status, created_at, updated_at, UnitID, created_by, updated_by, ping_sweep_ind, smart_ping_sweep_ind, start_blackout_schedule, blackout_duration, virtual_network_id, start_port_control_blackout_schedule, port_control_blackout_duration, cidr_count.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DiscoverySetting. Valid values are id, range_value, range_start, range_end, range_start_numeric, range_end_numeric, range_mask, range_type, discovery_status, created_at, updated_at, UnitID, created_by, updated_by, ping_sweep_ind, smart_ping_sweep_ind, start_blackout_schedule, blackout_duration, virtual_network_id, start_port_control_blackout_schedule, port_control_blackout_duration, cidr_count. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against discovery settings, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: range_value, range_type, ping_sweep_ind (1, 0), discovery_status (INCLUDE, EXCLUDE, IGNORE), VirtualNetworkName.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return discovery_settings: An array of the DiscoverySetting objects that match the specified input criteria.
:rtype discovery_settings: Array of DiscoverySetting
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available discovery settings matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: UnitID, blackout_duration, cidr_count, created_at, created_by, discovery_status, id, ping_sweep_ind, port_control_blackout_duration, range_end, range_end_numeric, range_mask, range_start, range_start_numeric, range_type, range_value, smart_ping_sweep_ind, start_blackout_schedule, start_port_control_blackout_schedule, updated_at, updated_by, virtual_network_id.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_UnitID: The operator to apply to the field UnitID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. UnitID: The internal NetMRI identifier collector assigned to the discovery setting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_UnitID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_UnitID: If op_UnitID is specified, the field named in this input will be compared to the value in UnitID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_UnitID must be specified if op_UnitID is specified.
:type val_f_UnitID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_UnitID: If op_UnitID is specified, this value will be compared to the value in UnitID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_UnitID must be specified if op_UnitID is specified.
:type val_c_UnitID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_blackout_duration: The operator to apply to the field blackout_duration. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. blackout_duration: The blackout duration in minutes. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_blackout_duration: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_blackout_duration: If op_blackout_duration is specified, the field named in this input will be compared to the value in blackout_duration using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_blackout_duration must be specified if op_blackout_duration is specified.
:type val_f_blackout_duration: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_blackout_duration: If op_blackout_duration is specified, this value will be compared to the value in blackout_duration using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_blackout_duration must be specified if op_blackout_duration is specified.
:type val_c_blackout_duration: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_cidr_count: The operator to apply to the field cidr_count. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. cidr_count: Number of CIDRs in discovery range. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_cidr_count: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_cidr_count: If op_cidr_count is specified, the field named in this input will be compared to the value in cidr_count using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_cidr_count must be specified if op_cidr_count is specified.
:type val_f_cidr_count: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_cidr_count: If op_cidr_count is specified, this value will be compared to the value in cidr_count using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_cidr_count must be specified if op_cidr_count is specified.
:type val_c_cidr_count: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_created_at: The operator to apply to the field created_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. created_at: The date and time the discovery setting was created. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_created_at: If op_created_at is specified, the field named in this input will be compared to the value in created_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_created_at must be specified if op_created_at is specified.
:type val_f_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_created_at: If op_created_at is specified, this value will be compared to the value in created_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_created_at must be specified if op_created_at is specified.
:type val_c_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_created_by: The operator to apply to the field created_by. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. created_by: The user that created the discovery setting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_created_by: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_created_by: If op_created_by is specified, the field named in this input will be compared to the value in created_by using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_created_by must be specified if op_created_by is specified.
:type val_f_created_by: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_created_by: If op_created_by is specified, this value will be compared to the value in created_by using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_created_by must be specified if op_created_by is specified.
:type val_c_created_by: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_discovery_status: The operator to apply to the field discovery_status. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. discovery_status: The discovery mode of the discovery setting (INCLUDE, EXCLUDE, IGNORE). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_discovery_status: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_discovery_status: If op_discovery_status is specified, the field named in this input will be compared to the value in discovery_status using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_discovery_status must be specified if op_discovery_status is specified.
:type val_f_discovery_status: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_discovery_status: If op_discovery_status is specified, this value will be compared to the value in discovery_status using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_discovery_status must be specified if op_discovery_status is specified.
:type val_c_discovery_status: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_id: The operator to apply to the field id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. id: The internal NetMRI identifier for the discovery setting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_id: If op_id is specified, the field named in this input will be compared to the value in id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_id must be specified if op_id is specified.
:type val_f_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_id: If op_id is specified, this value will be compared to the value in id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_id must be specified if op_id is specified.
:type val_c_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ping_sweep_ind: The operator to apply to the field ping_sweep_ind. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ping_sweep_ind: A flag indicating if ping sweeps are used on the discovery setting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ping_sweep_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ping_sweep_ind: If op_ping_sweep_ind is specified, the field named in this input will be compared to the value in ping_sweep_ind using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ping_sweep_ind must be specified if op_ping_sweep_ind is specified.
:type val_f_ping_sweep_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ping_sweep_ind: If op_ping_sweep_ind is specified, this value will be compared to the value in ping_sweep_ind using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ping_sweep_ind must be specified if op_ping_sweep_ind is specified.
:type val_c_ping_sweep_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_port_control_blackout_duration: The operator to apply to the field port_control_blackout_duration. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. port_control_blackout_duration: Port Control Blackout duration in minutes For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_port_control_blackout_duration: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_port_control_blackout_duration: If op_port_control_blackout_duration is specified, the field named in this input will be compared to the value in port_control_blackout_duration using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_port_control_blackout_duration must be specified if op_port_control_blackout_duration is specified.
:type val_f_port_control_blackout_duration: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_port_control_blackout_duration: If op_port_control_blackout_duration is specified, this value will be compared to the value in port_control_blackout_duration using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_port_control_blackout_duration must be specified if op_port_control_blackout_duration is specified.
:type val_c_port_control_blackout_duration: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_range_end: The operator to apply to the field range_end. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. range_end: The ending IP address for the discovery setting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_range_end: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_range_end: If op_range_end is specified, the field named in this input will be compared to the value in range_end using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_range_end must be specified if op_range_end is specified.
:type val_f_range_end: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_range_end: If op_range_end is specified, this value will be compared to the value in range_end using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_range_end must be specified if op_range_end is specified.
:type val_c_range_end: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_range_end_numeric: The operator to apply to the field range_end_numeric. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. range_end_numeric: The ending IP address numeric value for the discovery setting.ange_end_numeric. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_range_end_numeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_range_end_numeric: If op_range_end_numeric is specified, the field named in this input will be compared to the value in range_end_numeric using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_range_end_numeric must be specified if op_range_end_numeric is specified.
:type val_f_range_end_numeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_range_end_numeric: If op_range_end_numeric is specified, this value will be compared to the value in range_end_numeric using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_range_end_numeric must be specified if op_range_end_numeric is specified.
:type val_c_range_end_numeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_range_mask: The operator to apply to the field range_mask. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. range_mask: The CIDR mask for the discovery setting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_range_mask: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_range_mask: If op_range_mask is specified, the field named in this input will be compared to the value in range_mask using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_range_mask must be specified if op_range_mask is specified.
:type val_f_range_mask: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_range_mask: If op_range_mask is specified, this value will be compared to the value in range_mask using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_range_mask must be specified if op_range_mask is specified.
:type val_c_range_mask: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_range_start: The operator to apply to the field range_start. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. range_start: The starting IP address for the discovery setting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_range_start: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_range_start: If op_range_start is specified, the field named in this input will be compared to the value in range_start using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_range_start must be specified if op_range_start is specified.
:type val_f_range_start: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_range_start: If op_range_start is specified, this value will be compared to the value in range_start using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_range_start must be specified if op_range_start is specified.
:type val_c_range_start: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_range_start_numeric: The operator to apply to the field range_start_numeric. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. range_start_numeric: The starting IP address numeric value for the discovery setting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_range_start_numeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_range_start_numeric: If op_range_start_numeric is specified, the field named in this input will be compared to the value in range_start_numeric using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_range_start_numeric must be specified if op_range_start_numeric is specified.
:type val_f_range_start_numeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_range_start_numeric: If op_range_start_numeric is specified, this value will be compared to the value in range_start_numeric using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_range_start_numeric must be specified if op_range_start_numeric is specified.
:type val_c_range_start_numeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_range_type: The operator to apply to the field range_type. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. range_type: The discovery setting range type (CIDR, RANGE, SEED, STATIC, WILDCARD). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_range_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_range_type: If op_range_type is specified, the field named in this input will be compared to the value in range_type using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_range_type must be specified if op_range_type is specified.
:type val_f_range_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_range_type: If op_range_type is specified, this value will be compared to the value in range_type using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_range_type must be specified if op_range_type is specified.
:type val_c_range_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_range_value: The operator to apply to the field range_value. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. range_value: The discovery setting value. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_range_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_range_value: If op_range_value is specified, the field named in this input will be compared to the value in range_value using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_range_value must be specified if op_range_value is specified.
:type val_f_range_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_range_value: If op_range_value is specified, this value will be compared to the value in range_value using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_range_value must be specified if op_range_value is specified.
:type val_c_range_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_smart_ping_sweep_ind: The operator to apply to the field smart_ping_sweep_ind. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. smart_ping_sweep_ind: A flag indicating if smart ping sweep should be used on the discovery setting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_smart_ping_sweep_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_smart_ping_sweep_ind: If op_smart_ping_sweep_ind is specified, the field named in this input will be compared to the value in smart_ping_sweep_ind using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_smart_ping_sweep_ind must be specified if op_smart_ping_sweep_ind is specified.
:type val_f_smart_ping_sweep_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_smart_ping_sweep_ind: If op_smart_ping_sweep_ind is specified, this value will be compared to the value in smart_ping_sweep_ind using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_smart_ping_sweep_ind must be specified if op_smart_ping_sweep_ind is specified.
:type val_c_smart_ping_sweep_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_start_blackout_schedule: The operator to apply to the field start_blackout_schedule. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. start_blackout_schedule: The blackout start time in cron format. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_start_blackout_schedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_start_blackout_schedule: If op_start_blackout_schedule is specified, the field named in this input will be compared to the value in start_blackout_schedule using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_start_blackout_schedule must be specified if op_start_blackout_schedule is specified.
:type val_f_start_blackout_schedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_start_blackout_schedule: If op_start_blackout_schedule is specified, this value will be compared to the value in start_blackout_schedule using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_start_blackout_schedule must be specified if op_start_blackout_schedule is specified.
:type val_c_start_blackout_schedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_start_port_control_blackout_schedule: The operator to apply to the field start_port_control_blackout_schedule. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. start_port_control_blackout_schedule: Port Control Blackout schedule in CRON format For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_start_port_control_blackout_schedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_start_port_control_blackout_schedule: If op_start_port_control_blackout_schedule is specified, the field named in this input will be compared to the value in start_port_control_blackout_schedule using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_start_port_control_blackout_schedule must be specified if op_start_port_control_blackout_schedule is specified.
:type val_f_start_port_control_blackout_schedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_start_port_control_blackout_schedule: If op_start_port_control_blackout_schedule is specified, this value will be compared to the value in start_port_control_blackout_schedule using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_start_port_control_blackout_schedule must be specified if op_start_port_control_blackout_schedule is specified.
:type val_c_start_port_control_blackout_schedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_updated_at: The operator to apply to the field updated_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. updated_at: The date and time the discovery setting was updated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_updated_at: If op_updated_at is specified, the field named in this input will be compared to the value in updated_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_updated_at must be specified if op_updated_at is specified.
:type val_f_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_updated_at: If op_updated_at is specified, this value will be compared to the value in updated_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_updated_at must be specified if op_updated_at is specified.
:type val_c_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_updated_by: The operator to apply to the field updated_by. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. updated_by: The user that last updated the discovery setting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_updated_by: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_updated_by: If op_updated_by is specified, the field named in this input will be compared to the value in updated_by using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_updated_by must be specified if op_updated_by is specified.
:type val_f_updated_by: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_updated_by: If op_updated_by is specified, this value will be compared to the value in updated_by using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_updated_by must be specified if op_updated_by is specified.
:type val_c_updated_by: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_virtual_network_id: The operator to apply to the field virtual_network_id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. virtual_network_id: A Virtual Network identifier assigned to the discovery setting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_virtual_network_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_virtual_network_id: If op_virtual_network_id is specified, the field named in this input will be compared to the value in virtual_network_id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_virtual_network_id must be specified if op_virtual_network_id is specified.
:type val_f_virtual_network_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_virtual_network_id: If op_virtual_network_id is specified, this value will be compared to the value in virtual_network_id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_virtual_network_id must be specified if op_virtual_network_id is specified.
:type val_c_virtual_network_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, range_value, range_start, range_end, range_start_numeric, range_end_numeric, range_mask, range_type, discovery_status, created_at, updated_at, UnitID, created_by, updated_by, ping_sweep_ind, smart_ping_sweep_ind, start_blackout_schedule, blackout_duration, virtual_network_id, start_port_control_blackout_schedule, port_control_blackout_duration, cidr_count.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DiscoverySetting. Valid values are id, range_value, range_start, range_end, range_start_numeric, range_end_numeric, range_mask, range_type, discovery_status, created_at, updated_at, UnitID, created_by, updated_by, ping_sweep_ind, smart_ping_sweep_ind, start_blackout_schedule, blackout_duration, virtual_network_id, start_port_control_blackout_schedule, port_control_blackout_duration, cidr_count. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return discovery_settings: An array of the DiscoverySetting objects that match the specified input criteria.
:rtype discovery_settings: Array of DiscoverySetting
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def show(self, **kwargs):
"""Shows the details for the specified discovery setting.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for the discovery setting.
:type id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return discovery_setting: The discovery setting identified by the specified id.
:rtype discovery_setting: DiscoverySetting
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def create(self, **kwargs):
"""Creates a new discovery setting.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param range_value: The discovery setting value.
:type range_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param range_type: The discovery setting range type (CIDR, RANGE, SEED, STATIC, WILDCARD).
:type range_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param discovery_status: The discovery mode of the discovery setting (INCLUDE, EXCLUDE, IGNORE).
:type discovery_status: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param UnitID: The internal NetMRI identifier collector assigned to the discovery setting.
:type UnitID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param ping_sweep_ind: A flag indicating if ping sweeps are used on the discovery setting.
:type ping_sweep_ind: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param virtual_network_id: A Virtual Network identifier assigned to the discovery setting.
:type virtual_network_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:``
:param start_blackout_schedule: The blackout start time in cron format.
:type start_blackout_schedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param blackout_duration: The blackout duration in minutes.
:type blackout_duration: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:``
:param start_port_control_blackout_schedule: Port Control Blackout schedule in CRON format
:type start_port_control_blackout_schedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param port_control_blackout_duration: Port Control Blackout duration in minutes
:type port_control_blackout_duration: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return id: The id of the newly created discovery setting.
:rtype id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return model: The class name of the newly created discovery setting.
:rtype model: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return uri: A URI that may be used to retrieve the newly created discovery setting.
:rtype uri: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return discovery_setting: The newly created discovery setting.
:rtype discovery_setting: DiscoverySetting
"""
return self.api_request(self._get_method_fullname("create"), kwargs)
def update(self, **kwargs):
"""Updates an existing discovery setting.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for the discovery setting.
:type id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param range_value: The discovery setting value. If omitted, this field will not be updated.
:type range_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param range_type: The discovery setting range type (CIDR, RANGE, SEED, STATIC, WILDCARD). If omitted, this field will not be updated.
:type range_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param discovery_status: The discovery mode of the discovery setting (INCLUDE, EXCLUDE, IGNORE). If omitted, this field will not be updated.
:type discovery_status: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param UnitID: The internal NetMRI identifier collector assigned to the discovery setting. If omitted, this field will not be updated.
:type UnitID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ping_sweep_ind: A flag indicating if ping sweeps are used on the discovery setting. If omitted, this field will not be updated.
:type ping_sweep_ind: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param virtual_network_id: A Virtual Network identifier assigned to the discovery setting. If omitted, this field will not be updated.
:type virtual_network_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param start_blackout_schedule: The blackout start time in cron format. If omitted, this field will not be updated.
:type start_blackout_schedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param blackout_duration: The blackout duration in minutes. If omitted, this field will not be updated.
:type blackout_duration: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param start_port_control_blackout_schedule: Port Control Blackout schedule in CRON format If omitted, this field will not be updated.
:type start_port_control_blackout_schedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param port_control_blackout_duration: Port Control Blackout duration in minutes If omitted, this field will not be updated.
:type port_control_blackout_duration: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return id: The id of the updated discovery setting.
:rtype id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return model: The class name of the updated discovery setting.
:rtype model: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return uri: A URI that may be used to retrieve the updated discovery setting.
:rtype uri: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return discovery_setting: The updated discovery setting.
:rtype discovery_setting: DiscoverySetting
"""
return self.api_request(self._get_method_fullname("update"), kwargs)
def destroy(self, **kwargs):
"""Deletes the specified discovery setting from NetMRI.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for the discovery setting.
:type id: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("destroy"), kwargs)
def destroy_many(self, **kwargs):
"""Remove several discovery settings
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param ids: The IDs array of the discovery settings to delete. When sending form encoded use ids[].
:type ids: Array
**Outputs**
"""
return self.api_request(self._get_method_fullname("destroy_many"), kwargs)
def import_settings(self, **kwargs):
"""Imports a list of discovery settings into the database
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param file: The contents of the CSV file with the list of discovery settings to be imported
:type file: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param import_type: The type of discovery settings to import. Valid values are: range, static, seed
:type import_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param UnitID: The UnitID of the collector
:type UnitID: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("import_settings"), kwargs)
def seed_information(self, **kwargs):
"""Returns the following information: if at least one seed exists, if at least one seed has been discovered, if any IPv6 range is missing a seed
**Inputs**
**Outputs**
"""
return self.api_request(self._get_method_fullname("seed_information"), kwargs)
def seed_status(self, **kwargs):
"""List of all Device Seeds and the entire Discovery Status for each one.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` range_start_numeric
:param sort: The data field to use for sorting the output. Default is range_start_numeric.
:type sort: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param UnitID: The UnitID of the collector
:type UnitID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against attribute "range_value" and "VirtualNetworkName". Any DiscoverySetting objects with the passed value contained within one or more of those attributes will be returned.
:type query: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19.
:type limit: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("seed_status"), kwargs)
|
import os
from optparse import OptionParser
from jinja2 import Template
HEADER = '!!AUTO-GENERATED!! Edit bin/crontab/crontab.tpl instead.'
TEMPLATE = open(os.path.join(os.path.dirname(__file__), 'crontab.tpl')).read()
def main():
parser = OptionParser()
parser.add_option('-w', '--webapp',
help='Location of web app (required)')
parser.add_option('-u', '--user',
help=('Prefix cron with this user. '
'Only define for cron.d style crontabs.'))
parser.add_option('-p', '--python', default='/usr/bin/python2.7',
help='Python interpreter to use.')
(opts, args) = parser.parse_args()
if not opts.webapp:
parser.error('-w must be defined')
ctx = {'django': 'cd %s; %s manage.py' % (opts.webapp, opts.python)}
ctx['cron'] = '%s cron' % ctx['django']
if opts.user:
for k, v in ctx.iteritems():
ctx[k] = '%s %s' % (opts.user, v)
# Needs to stay below the opts.user injection.
ctx['python'] = opts.python
ctx['header'] = HEADER
print Template(TEMPLATE).render(**ctx)
if __name__ == '__main__':
main()
|
from __future__ import division, unicode_literals
"""
Script to visualize the model coordination environments
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "david.waroquiers@gmail.com"
__date__ = "Feb 20, 2016"
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import AllCoordinationGeometries
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import SEPARATION_PLANE
from pymatgen.analysis.chemenv.utils.scripts_utils import visualize
from pymatgen.analysis.chemenv.utils.coordination_geometry_utils import Plane
import numpy as np
if __name__ == '__main__':
print('+-------------------------------------------------------+\n'
'| Development script of the ChemEnv utility of pymatgen |\n'
'| Visualization of the model coordination environments |\n'
'+-------------------------------------------------------+\n')
allcg = AllCoordinationGeometries()
vis = None
while True:
cg_symbol = raw_input('Enter symbol of the geometry you want to see, "l" to see the list '
'of existing geometries or "q" to quit : ')
if cg_symbol == 'q':
break
if cg_symbol == 'l':
print(allcg.pretty_print(maxcn=13, additional_info={'nb_hints': True}))
continue
try:
cg = allcg[cg_symbol]
except LookupError:
print('Wrong geometry, try again ...')
continue
print(cg.name)
for ipoint, point in enumerate(cg.points):
print('Point #{:d} : {} {} {}'.format(ipoint, repr(point[0]), repr(point[1]), repr(point[2])))
print('Algorithms used :')
for ialgo, algo in enumerate(cg.algorithms):
print('Algorithm #{:d} :'.format(ialgo))
print(algo)
print('')
# Visualize the separation plane of a given algorithm
sepplane = False
if any([algo.algorithm_type == SEPARATION_PLANE for algo in cg.algorithms]):
test = raw_input('Enter index of the algorithm for which you want to visualize the plane : ')
if test != '':
try:
ialgo = int(test)
algo = cg.algorithms[ialgo]
sepplane = True
except:
print('Unable to determine the algorithm/separation_plane you want '
'to visualize for this geometry. Continues without ...')
myfactor = 3.0
if vis is None:
vis = visualize(cg=cg, zoom=1.0, myfactor=myfactor)
else:
vis = visualize(cg=cg, vis=vis, myfactor=myfactor)
cg_points = [myfactor*np.array(pp) for pp in cg.points]
cg_central_site = myfactor*np.array(cg.central_site)
if sepplane:
pts = [cg_points[ii] for ii in algo.plane_points]
if algo.minimum_number_of_points == 2:
pts.append(cg_central_site)
centre = cg_central_site
else:
centre = np.sum(pts, axis=0) / len(pts)
factor = 1.5
target_dist = max([np.dot(pp-centre, pp-centre) for pp in cg_points])
current_dist = np.dot(pts[0] - centre, pts[0] - centre)
factor = factor * target_dist / current_dist
plane = Plane.from_npoints(points=pts)
p1 = centre + factor * (pts[0] - centre)
perp = factor * np.cross(pts[0] - centre, plane.normal_vector)
p2 = centre + perp
p3 = centre - factor * (pts[0] - centre)
p4 = centre - perp
vis.add_faces([[p1, p2, p3, p4]], [1.0, 0.0, 0.0], opacity=0.5)
target_radius = 0.25
radius = 1.5 * target_radius
if algo.minimum_number_of_points == 2:
vis.add_partial_sphere(coords=cg_central_site, radius=radius,
color=[1.0, 0.0, 0.0], start=0, end=360,
opacity=0.5)
for pp in pts:
vis.add_partial_sphere(coords=pp, radius=radius,
color=[1.0, 0.0, 0.0], start=0, end=360,
opacity=0.5)
ps1 = [cg_points[ii] for ii in algo.point_groups[0]]
ps2 = [cg_points[ii] for ii in algo.point_groups[1]]
for pp in ps1:
vis.add_partial_sphere(coords=pp, radius=radius,
color=[0.0, 1.0, 0.0], start=0, end=360,
opacity=0.5)
for pp in ps2:
vis.add_partial_sphere(coords=pp, radius=radius,
color=[0.0, 0.0, 1.0], start=0, end=360,
opacity=0.5)
vis.show()
|
"""
Copyright (c) 2006-2015 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
try:
import sqlite3
except ImportError:
pass
import logging
from lib.core.convert import utf8encode
from lib.core.data import conf
from lib.core.data import logger
from lib.core.exception import SqlmapConnectionException
from lib.core.exception import SqlmapMissingDependence
from plugins.generic.connector import Connector as GenericConnector
class Connector(GenericConnector):
"""
Homepage: http://pysqlite.googlecode.com/ and http://packages.ubuntu.com/quantal/python-sqlite
User guide: http://docs.python.org/release/2.5/lib/module-sqlite3.html
API: http://docs.python.org/library/sqlite3.html
Debian package: python-sqlite (SQLite 2), python-pysqlite3 (SQLite 3)
License: MIT
Possible connectors: http://wiki.python.org/moin/SQLite
"""
def __init__(self):
GenericConnector.__init__(self)
self.__sqlite = sqlite3
def connect(self):
self.initConnection()
self.checkFileDb()
try:
self.connector = self.__sqlite.connect(database=self.db, check_same_thread=False, timeout=conf.timeout)
cursor = self.connector.cursor()
cursor.execute("SELECT * FROM sqlite_master")
cursor.close()
except (self.__sqlite.DatabaseError, self.__sqlite.OperationalError), msg:
warnMsg = "unable to connect using SQLite 3 library, trying with SQLite 2"
logger.warn(warnMsg)
try:
try:
import sqlite
except ImportError:
errMsg = "sqlmap requires 'python-sqlite' third-party library "
errMsg += "in order to directly connect to the database '%s'" % self.db
raise SqlmapMissingDependence(errMsg)
self.__sqlite = sqlite
self.connector = self.__sqlite.connect(database=self.db, check_same_thread=False, timeout=conf.timeout)
except (self.__sqlite.DatabaseError, self.__sqlite.OperationalError), msg:
raise SqlmapConnectionException(msg[0])
self.initCursor()
self.printConnected()
def fetchall(self):
try:
return self.cursor.fetchall()
except self.__sqlite.OperationalError, msg:
logger.log(logging.WARN if conf.dbmsHandler else logging.DEBUG, "(remote) %s" % msg[0])
return None
def execute(self, query):
try:
self.cursor.execute(utf8encode(query))
except self.__sqlite.OperationalError, msg:
logger.log(logging.WARN if conf.dbmsHandler else logging.DEBUG, "(remote) %s" % msg[0])
except self.__sqlite.DatabaseError, msg:
raise SqlmapConnectionException(msg[0])
self.connector.commit()
def select(self, query):
self.execute(query)
return self.fetchall()
|
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = u'''
---
module: bzr
author: "André Paramés (@andreparames)"
version_added: "1.1"
short_description: Deploy software (or files) from bzr branches
description:
- Manage I(bzr) branches to deploy files or software.
options:
name:
required: true
aliases: [ 'parent' ]
description:
- SSH or HTTP protocol address of the parent branch.
dest:
required: true
description:
- Absolute path of where the branch should be cloned to.
version:
required: false
default: "head"
description:
- What version of the branch to clone. This can be the
bzr revno or revid.
force:
required: false
default: "no"
choices: [ 'yes', 'no' ]
description:
- If C(yes), any modified files in the working
tree will be discarded. Before 1.9 the default
value was "yes".
executable:
required: false
default: null
version_added: "1.4"
description:
- Path to bzr executable to use. If not supplied,
the normal mechanism for resolving binary paths will be used.
'''
EXAMPLES = '''
- bzr:
name: 'bzr+ssh://foosball.example.org/path/to/branch'
dest: /srv/checkout
version: 22
'''
import re
class Bzr(object):
def __init__(self, module, parent, dest, version, bzr_path):
self.module = module
self.parent = parent
self.dest = dest
self.version = version
self.bzr_path = bzr_path
def _command(self, args_list, cwd=None, **kwargs):
(rc, out, err) = self.module.run_command([self.bzr_path] + args_list, cwd=cwd, **kwargs)
return (rc, out, err)
def get_version(self):
'''samples the version of the bzr branch'''
cmd = "%s revno" % self.bzr_path
rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest)
revno = stdout.strip()
return revno
def clone(self):
'''makes a new bzr branch if it does not already exist'''
dest_dirname = os.path.dirname(self.dest)
try:
os.makedirs(dest_dirname)
except:
pass
if self.version.lower() != 'head':
args_list = ["branch", "-r", self.version, self.parent, self.dest]
else:
args_list = ["branch", self.parent, self.dest]
return self._command(args_list, check_rc=True, cwd=dest_dirname)
def has_local_mods(self):
cmd = "%s status -S" % self.bzr_path
rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest)
lines = stdout.splitlines()
lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines)
return len(lines) > 0
def reset(self, force):
'''
Resets the index and working tree to head.
Discards any changes to tracked files in the working
tree since that commit.
'''
if not force and self.has_local_mods():
self.module.fail_json(msg="Local modifications exist in branch (force=no).")
return self._command(["revert"], check_rc=True, cwd=self.dest)
def fetch(self):
'''updates branch from remote sources'''
if self.version.lower() != 'head':
(rc, out, err) = self._command(["pull", "-r", self.version], cwd=self.dest)
else:
(rc, out, err) = self._command(["pull"], cwd=self.dest)
if rc != 0:
self.module.fail_json(msg="Failed to pull")
return (rc, out, err)
def switch_version(self):
'''once pulled, switch to a particular revno or revid'''
if self.version.lower() != 'head':
args_list = ["revert", "-r", self.version]
else:
args_list = ["revert"]
return self._command(args_list, check_rc=True, cwd=self.dest)
def main():
module = AnsibleModule(
argument_spec = dict(
dest=dict(required=True, type='path'),
name=dict(required=True, aliases=['parent']),
version=dict(default='head'),
force=dict(default='no', type='bool'),
executable=dict(default=None),
)
)
dest = module.params['dest']
parent = module.params['name']
version = module.params['version']
force = module.params['force']
bzr_path = module.params['executable'] or module.get_bin_path('bzr', True)
bzrconfig = os.path.join(dest, '.bzr', 'branch', 'branch.conf')
rc, out, err, status = (0, None, None, None)
bzr = Bzr(module, parent, dest, version, bzr_path)
# if there is no bzr configuration, do a branch operation
# else pull and switch the version
before = None
local_mods = False
if not os.path.exists(bzrconfig):
(rc, out, err) = bzr.clone()
else:
# else do a pull
local_mods = bzr.has_local_mods()
before = bzr.get_version()
(rc, out, err) = bzr.reset(force)
if rc != 0:
module.fail_json(msg=err)
(rc, out, err) = bzr.fetch()
if rc != 0:
module.fail_json(msg=err)
# switch to version specified regardless of whether
# we cloned or pulled
(rc, out, err) = bzr.switch_version()
# determine if we changed anything
after = bzr.get_version()
changed = False
if before != after or local_mods:
changed = True
module.exit_json(changed=changed, before=before, after=after)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
import Quartz
from AppKit import NSEvent, NSScreen
from .base import PyMouseMeta, PyMouseEventMeta
pressID = [None, Quartz.kCGEventLeftMouseDown,
Quartz.kCGEventRightMouseDown, Quartz.kCGEventOtherMouseDown]
releaseID = [None, Quartz.kCGEventLeftMouseUp,
Quartz.kCGEventRightMouseUp, Quartz.kCGEventOtherMouseUp]
class PyMouse(PyMouseMeta):
def press(self, x, y, button=1):
event = Quartz.CGEventCreateMouseEvent(None,
pressID[button],
(x, y),
button - 1)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, event)
def release(self, x, y, button=1):
event = Quartz.CGEventCreateMouseEvent(None,
releaseID[button],
(x, y),
button - 1)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, event)
def move(self, x, y):
move = Quartz.CGEventCreateMouseEvent(None, Quartz.kCGEventMouseMoved, (x, y), 0)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, move)
def drag(self, x, y):
drag = Quartz.CGEventCreateMouseEvent(None, Quartz.kCGEventLeftMouseDragged, (x, y), 0)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, drag)
def position(self):
loc = NSEvent.mouseLocation()
return loc.x, Quartz.CGDisplayPixelsHigh(0) - loc.y
def screen_size(self):
return NSScreen.mainScreen().frame().size.width, NSScreen.mainScreen().frame().size.height
def scroll(self, vertical=None, horizontal=None, depth=None):
#Local submethod for generating Mac scroll events in one axis at a time
def scroll_event(y_move=0, x_move=0, z_move=0, n=1):
for _ in range(abs(n)):
scrollWheelEvent = Quartz.CGEventCreateScrollWheelEvent(
None, # No source
Quartz.kCGScrollEventUnitLine, # Unit of measurement is lines
3, # Number of wheels(dimensions)
y_move,
x_move,
z_move)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, scrollWheelEvent)
#Execute vertical then horizontal then depth scrolling events
if vertical is not None:
vertical = int(vertical)
if vertical == 0: # Do nothing with 0 distance
pass
elif vertical > 0: # Scroll up if positive
scroll_event(y_move=1, n=vertical)
else: # Scroll down if negative
scroll_event(y_move=-1, n=abs(vertical))
if horizontal is not None:
horizontal = int(horizontal)
if horizontal == 0: # Do nothing with 0 distance
pass
elif horizontal > 0: # Scroll right if positive
scroll_event(x_move=1, n=horizontal)
else: # Scroll left if negative
scroll_event(x_move=-1, n=abs(horizontal))
if depth is not None:
depth = int(depth)
if depth == 0: # Do nothing with 0 distance
pass
elif vertical > 0: # Scroll "out" if positive
scroll_event(z_move=1, n=depth)
else: # Scroll "in" if negative
scroll_event(z_move=-1, n=abs(depth))
class PyMouseEvent(PyMouseEventMeta):
def run(self):
tap = Quartz.CGEventTapCreate(
Quartz.kCGSessionEventTap,
Quartz.kCGHeadInsertEventTap,
Quartz.kCGEventTapOptionDefault,
Quartz.CGEventMaskBit(Quartz.kCGEventMouseMoved) |
Quartz.CGEventMaskBit(Quartz.kCGEventLeftMouseDown) |
Quartz.CGEventMaskBit(Quartz.kCGEventLeftMouseUp) |
Quartz.CGEventMaskBit(Quartz.kCGEventRightMouseDown) |
Quartz.CGEventMaskBit(Quartz.kCGEventRightMouseUp) |
Quartz.CGEventMaskBit(Quartz.kCGEventOtherMouseDown) |
Quartz.CGEventMaskBit(Quartz.kCGEventOtherMouseUp),
self.handler,
None)
loopsource = Quartz.CFMachPortCreateRunLoopSource(None, tap, 0)
loop = Quartz.CFRunLoopGetCurrent()
Quartz.CFRunLoopAddSource(loop, loopsource, Quartz.kCFRunLoopDefaultMode)
Quartz.CGEventTapEnable(tap, True)
while self.state:
Quartz.CFRunLoopRunInMode(Quartz.kCFRunLoopDefaultMode, 5, False)
def handler(self, proxy, type, event, refcon):
(x, y) = Quartz.CGEventGetLocation(event)
if type in pressID:
self.click(x, y, pressID.index(type), True)
elif type in releaseID:
self.click(x, y, releaseID.index(type), False)
else:
self.move(x, y)
if self.capture:
Quartz.CGEventSetType(event, Quartz.kCGEventNull)
return event
|
"""Gradients for operators defined in linalg_ops.py.
Useful reference for derivative formulas is
An extended collection of matrix derivative results for forward and reverse
mode algorithmic differentiation by Mike Giles:
http://eprints.maths.ox.ac.uk/1079/1/NA-08-01.pdf
A detailed derivation of formulas for backpropagating through spectral layers
(SVD and Eig) by Ionescu, Vantzos & Sminchisescu:
https://arxiv.org/pdf/1509.07838v4.pdf
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as _linalg
@ops.RegisterGradient("MatrixInverse")
def _MatrixInverseGrad(op, grad):
"""Gradient for MatrixInverse."""
ainv = op.outputs[0]
return -math_ops.matmul(
ainv, math_ops.matmul(grad, ainv, adjoint_b=True), adjoint_a=True)
@ops.RegisterGradient("MatrixDeterminant")
def _MatrixDeterminantGrad(op, grad):
"""Gradient for MatrixDeterminant."""
a = op.inputs[0]
c = op.outputs[0]
a_adj_inv = linalg_ops.matrix_inverse(a, adjoint=True)
multipliers = array_ops.reshape(grad * c,
array_ops.concat([array_ops.shape(c), [1, 1]],
0))
return multipliers * a_adj_inv
@ops.RegisterGradient("Cholesky")
def _CholeskyGrad(op, grad):
"""Gradient for Cholesky."""
# Gradient is l^{-H} @ ((l^{H} @ grad) * (tril(ones)-1/2*eye)) @ l^{-1}
l = op.outputs[0]
num_rows = array_ops.shape(l)[-1]
batch_shape = array_ops.shape(l)[:-2]
l_inverse = linalg_ops.matrix_triangular_solve(l,
linalg_ops.eye(
num_rows,
batch_shape=batch_shape,
dtype=l.dtype))
middle = math_ops.matmul(l, grad, adjoint_a=True)
middle = array_ops.matrix_set_diag(middle,
0.5 * array_ops.matrix_diag_part(middle))
middle = array_ops.matrix_band_part(middle, -1, 0)
grad_a = math_ops.matmul(
math_ops.matmul(l_inverse, middle, adjoint_a=True), l_inverse)
grad_a += _linalg.adjoint(grad_a)
return grad_a * 0.5
@ops.RegisterGradient("Qr")
def _QrGrad(op, dq, dr):
"""Gradient for Qr."""
q, r = op.outputs
if q.dtype.is_complex:
raise NotImplementedError("QrGrad not implemented for dtype: %s" % q.dtype)
if (r.shape.ndims is None or r.shape.as_list()[-2] is None or
r.shape.as_list()[-1] is None):
raise NotImplementedError("QrGrad not implemented with dynamic shapes.")
if r.shape[-2].value != r.shape[-1].value:
raise NotImplementedError("QrGrad not implemented when ncols > nrows "
"or full_matrices is true and ncols != nrows.")
qdq = math_ops.matmul(q, dq, adjoint_a=True)
qdq_ = qdq - _linalg.adjoint(qdq)
rdr = math_ops.matmul(r, dr, adjoint_b=True)
rdr_ = rdr - _linalg.adjoint(rdr)
tril = array_ops.matrix_band_part(qdq_ + rdr_, -1, 0)
def _TriangularSolve(x, r):
"""Equiv to matmul(x, adjoint(matrix_inverse(r))) if r is upper-tri."""
return _linalg.adjoint(
linalg_ops.matrix_triangular_solve(
r, _linalg.adjoint(x), lower=False, adjoint=False))
grad_a = math_ops.matmul(q, dr + _TriangularSolve(tril, r))
grad_b = _TriangularSolve(dq - math_ops.matmul(q, qdq), r)
return grad_a + grad_b
@ops.RegisterGradient("MatrixSolve")
def _MatrixSolveGrad(op, grad):
"""Gradient for MatrixSolve."""
a = op.inputs[0]
adjoint_a = op.get_attr("adjoint")
c = op.outputs[0]
grad_b = linalg_ops.matrix_solve(a, grad, adjoint=not adjoint_a)
if adjoint_a:
grad_a = -math_ops.matmul(c, grad_b, adjoint_b=True)
else:
grad_a = -math_ops.matmul(grad_b, c, adjoint_b=True)
return (grad_a, grad_b)
@ops.RegisterGradient("MatrixSolveLs")
def _MatrixSolveLsGrad(op, grad):
"""Gradients for MatrixSolveLs."""
# TODO(rmlarsen): The implementation could be more efficient:
# a) Output the Cholesky factorization from forward op instead of
# recomputing it here.
# b) Implement a symmetric rank-k update op instead of computing
# x*z + transpose(x*z). This pattern occurs other places in TensorFlow.
def _Overdetermined(op, grad):
"""Gradients for the overdetermined case of MatrixSolveLs.
This is the backprop for the solution to the normal equations of the first
kind:
X = F(A, B) = (A^T * A + lambda * I)^{-1} * A^T * B
which solve the least squares problem
min ||A * X - B||_F^2 + lambda ||X||_F^2.
"""
a = op.inputs[0]
b = op.inputs[1]
x = op.outputs[0]
l2_regularizer = math_ops.cast(op.inputs[2], a.dtype.base_dtype)
# pylint: disable=protected-access
chol = linalg_ops._RegularizedGramianCholesky(
a, l2_regularizer=l2_regularizer, first_kind=True)
# pylint: enable=protected-access
# Temporary z = (A^T * A + lambda * I)^{-1} * grad.
z = linalg_ops.cholesky_solve(chol, grad)
xzt = math_ops.matmul(x, z, adjoint_b=True)
zx_sym = xzt + array_ops.matrix_transpose(xzt)
grad_a = -math_ops.matmul(a, zx_sym) + math_ops.matmul(b, z, adjoint_b=True)
grad_b = math_ops.matmul(a, z)
return (grad_a, grad_b, None)
def _Underdetermined(op, grad):
"""Gradients for the underdetermined case of MatrixSolveLs.
This is the backprop for the solution to the normal equations of the second
kind:
X = F(A, B) = A * (A*A^T + lambda*I)^{-1} * B
that (for lambda=0) solve the least squares problem
min ||X||_F subject to A*X = B.
"""
a = op.inputs[0]
b = op.inputs[1]
l2_regularizer = math_ops.cast(op.inputs[2], a.dtype.base_dtype)
# pylint: disable=protected-access
chol = linalg_ops._RegularizedGramianCholesky(
a, l2_regularizer=l2_regularizer, first_kind=False)
# pylint: enable=protected-access
grad_b = linalg_ops.cholesky_solve(chol, math_ops.matmul(a, grad))
# Temporary tmp = (A * A^T + lambda * I)^{-1} * B.
tmp = linalg_ops.cholesky_solve(chol, b)
a1 = math_ops.matmul(tmp, a, adjoint_a=True)
a1 = -math_ops.matmul(grad_b, a1)
a2 = grad - math_ops.matmul(a, grad_b, adjoint_a=True)
a2 = math_ops.matmul(tmp, a2, adjoint_b=True)
grad_a = a1 + a2
return (grad_a, grad_b, None)
fast = op.get_attr("fast")
if fast is False:
raise ValueError("Gradient not defined for fast=False")
matrix_shape = op.inputs[0].get_shape()[-2:]
if matrix_shape.is_fully_defined():
if matrix_shape[-2] >= matrix_shape[-1]:
return _Overdetermined(op, grad)
else:
return _Underdetermined(op, grad)
else:
# We have to defer determining the shape to runtime and use
# conditional execution of the appropriate graph.
matrix_shape = array_ops.shape(op.inputs[0])[-2:]
return control_flow_ops.cond(matrix_shape[-2] >= matrix_shape[-1],
lambda: _Overdetermined(op, grad),
lambda: _Underdetermined(op, grad))
@ops.RegisterGradient("MatrixTriangularSolve")
def _MatrixTriangularSolveGrad(op, grad):
"""Gradient for MatrixTriangularSolve."""
a = op.inputs[0]
adjoint_a = op.get_attr("adjoint")
lower_a = op.get_attr("lower")
c = op.outputs[0]
grad_b = linalg_ops.matrix_triangular_solve(
a, grad, lower=lower_a, adjoint=not adjoint_a)
if adjoint_a:
grad_a = -math_ops.matmul(c, grad_b, adjoint_b=True)
else:
grad_a = -math_ops.matmul(grad_b, c, adjoint_b=True)
if lower_a:
grad_a = array_ops.matrix_band_part(grad_a, -1, 0)
else:
grad_a = array_ops.matrix_band_part(grad_a, 0, -1)
return (grad_a, grad_b)
@ops.RegisterGradient("SelfAdjointEigV2")
def _SelfAdjointEigV2Grad(op, grad_e, grad_v):
"""Gradient for SelfAdjointEigV2."""
e = op.outputs[0]
compute_v = op.get_attr("compute_v")
# a = op.inputs[0], which satisfies
# a[...,:,:] * v[...,:,i] = e[...,i] * v[...,i]
with ops.control_dependencies([grad_e, grad_v]):
if compute_v:
v = op.outputs[1]
# Construct the matrix f(i,j) = (i != j ? 1 / (e_i - e_j) : 0).
# Notice that because of the term involving f, the gradient becomes
# infinite (or NaN in practice) when eigenvalues are not unique.
# Mathematically this should not be surprising, since for (k-fold)
# degenerate eigenvalues, the corresponding eigenvectors are only defined
# up to arbitrary rotation in a (k-dimensional) subspace.
f = array_ops.matrix_set_diag(
math_ops.reciprocal(
array_ops.expand_dims(e, -2) - array_ops.expand_dims(e, -1)),
array_ops.zeros_like(e))
grad_a = math_ops.matmul(
v,
math_ops.matmul(
array_ops.matrix_diag(grad_e) +
f * math_ops.matmul(v, grad_v, adjoint_a=True),
v,
adjoint_b=True))
else:
_, v = linalg_ops.self_adjoint_eig(op.inputs[0])
grad_a = math_ops.matmul(v,
math_ops.matmul(
array_ops.matrix_diag(grad_e),
v,
adjoint_b=True))
# The forward op only depends on the lower triangular part of a, so here we
# symmetrize and take the lower triangle
grad_a = array_ops.matrix_band_part(grad_a + _linalg.adjoint(grad_a), -1, 0)
grad_a = array_ops.matrix_set_diag(grad_a,
0.5 * array_ops.matrix_diag_part(grad_a))
return grad_a
@ops.RegisterGradient("Svd")
def _SvdGrad(op, grad_s, grad_u, grad_v):
"""Gradient for the singular value decomposition."""
# The derivation for the compute_uv=False case, and most of
# the derivation for the full_matrices=True case, are in
# Giles' paper (see reference at top of file). A derivation for
# the full_matrices=False case is available at
# https://j-towns.github.io/papers/svd-derivative.pdf
a = op.inputs[0]
a_shape = a.get_shape().with_rank_at_least(2)
grad_s_mat = array_ops.matrix_diag(grad_s)
if not op.get_attr("compute_uv"):
s, u, v = linalg_ops.svd(a, compute_uv=True)
grad_a = math_ops.matmul(u, math_ops.matmul(grad_s_mat, v, adjoint_b=True))
grad_a.set_shape(a_shape)
return grad_a
full_matrices = op.get_attr("full_matrices")
# TODO(rmlarsen): Make this work with complex types.
if a.dtype.is_complex:
raise NotImplementedError(
"SVD gradient is not implemented for complex types and "
"compute_uv=True.")
grad_u_shape = grad_u.get_shape().with_rank_at_least(2)
grad_v_shape = grad_v.get_shape().with_rank_at_least(2)
m = a_shape[-2].merge_with(grad_u_shape[-2])
n = a_shape[-1].merge_with(grad_v_shape[-2])
batch_shape = a_shape[:-2].merge_with(grad_u_shape[:-2]).merge_with(
grad_v_shape[:-2])
a_shape = batch_shape.concatenate([m, n])
m = a_shape[-2].value
n = a_shape[-1].value
# TODO(rmlarsen): Make this work with placeholders.
if m is None or n is None:
raise NotImplementedError(
"SVD gradient has not been implemented for input with unknown "
"inner matrix shape.")
s = op.outputs[0]
u = op.outputs[1]
v = op.outputs[2]
use_adjoint = False
if m > n:
# Compute the gradient for A^H = V * S^T * U^H, and (implicitly) take the
# Hermitian transpose of the gradient at the end.
use_adjoint = True
m, n = n, m
u, v = v, u
grad_u, grad_v = grad_v, grad_u
with ops.control_dependencies([grad_s, grad_u, grad_v]):
if full_matrices and abs(m - n) > 1:
raise NotImplementedError(
"svd gradient is not implemented for abs(m - n) > 1 "
"when full_matrices is True")
s_mat = array_ops.matrix_diag(s)
s2 = math_ops.square(s)
# NOTICE: Because of the term involving f, the gradient becomes
# infinite (or NaN in practice) when singular values are not unique.
# Mathematically this should not be surprising, since for (k-fold)
# degenerate singular values, the corresponding singular vectors are
# only defined up a (k-dimensional) subspace. In practice, this can
# lead to numerical instability when singular values are close but not
# exactly equal.
f = array_ops.matrix_set_diag(
math_ops.reciprocal(
array_ops.expand_dims(s2, -2) - array_ops.expand_dims(s2, -1)),
array_ops.zeros_like(s))
s_inv_mat = array_ops.matrix_diag(math_ops.reciprocal(s))
v1 = v[..., :, :m]
grad_v1 = grad_v[..., :, :m]
u_gu = math_ops.matmul(u, grad_u, adjoint_a=True)
v_gv = math_ops.matmul(v1, grad_v1, adjoint_a=True)
f_u = f * u_gu
f_v = f * v_gv
term1_nouv = (
grad_s_mat + math_ops.matmul(f_u + _linalg.adjoint(f_u), s_mat) +
math_ops.matmul(s_mat, f_v + _linalg.adjoint(f_v)))
term1 = math_ops.matmul(u, math_ops.matmul(term1_nouv, v1, adjoint_b=True))
if m == n:
grad_a_before_transpose = term1
else:
gv1t = array_ops.matrix_transpose(grad_v1)
gv1t_v1 = math_ops.matmul(gv1t, v1)
term2_nous = gv1t - math_ops.matmul(gv1t_v1, v1, adjoint_b=True)
if full_matrices:
v2 = v[..., :, m:n]
grad_v2 = grad_v[..., :, m:n]
v1t_gv2 = math_ops.matmul(v1, grad_v2, adjoint_a=True)
term2_nous -= math_ops.matmul(v1t_gv2, v2, adjoint_b=True)
u_s_inv = math_ops.matmul(u, s_inv_mat)
term2 = math_ops.matmul(u_s_inv, term2_nous)
grad_a_before_transpose = term1 + term2
if use_adjoint:
grad_a = array_ops.matrix_transpose(grad_a_before_transpose)
else:
grad_a = grad_a_before_transpose
grad_a.set_shape(a_shape)
return grad_a
|
"""Constants for the Aftership integration."""
from __future__ import annotations
from datetime import timedelta
from typing import Final
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
DOMAIN: Final = "aftership"
ATTRIBUTION: Final = "Information provided by AfterShip"
ATTR_TRACKINGS: Final = "trackings"
BASE: Final = "https://track.aftership.com/"
CONF_SLUG: Final = "slug"
CONF_TITLE: Final = "title"
CONF_TRACKING_NUMBER: Final = "tracking_number"
DEFAULT_NAME: Final = "aftership"
UPDATE_TOPIC: Final = f"{DOMAIN}_update"
ICON: Final = "mdi:package-variant-closed"
MIN_TIME_BETWEEN_UPDATES: Final = timedelta(minutes=15)
SERVICE_ADD_TRACKING: Final = "add_tracking"
SERVICE_REMOVE_TRACKING: Final = "remove_tracking"
ADD_TRACKING_SERVICE_SCHEMA: Final = vol.Schema(
{
vol.Required(CONF_TRACKING_NUMBER): cv.string,
vol.Optional(CONF_TITLE): cv.string,
vol.Optional(CONF_SLUG): cv.string,
}
)
REMOVE_TRACKING_SERVICE_SCHEMA: Final = vol.Schema(
{vol.Required(CONF_SLUG): cv.string, vol.Required(CONF_TRACKING_NUMBER): cv.string}
)
|
"""Describe group states."""
from homeassistant.components.group import GroupIntegrationRegistry
from homeassistant.const import STATE_OK, STATE_PROBLEM
from homeassistant.core import HomeAssistant, callback
@callback
def async_describe_on_off_states(
hass: HomeAssistant, registry: GroupIntegrationRegistry
) -> None:
"""Describe group on off states."""
registry.on_off_states({STATE_PROBLEM}, STATE_OK)
|
import operator
import unittest
import numpy
import six
from cupy import testing
@testing.gpu
class TestArrayElementwiseOp(unittest.TestCase):
_multiprocess_can_split_ = True
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def check_array_scalar_op(self, op, xp, dtype, swap=False):
a = testing.shaped_arange((2, 3), xp, dtype)
if swap:
return op(dtype(2), a)
else:
return op(a, dtype(2))
def test_add_scalar(self):
self.check_array_scalar_op(operator.add)
def test_radd_scalar(self):
self.check_array_scalar_op(operator.add, swap=True)
def test_iadd_scalar(self):
self.check_array_scalar_op(operator.iadd)
def test_sub_scalar(self):
self.check_array_scalar_op(operator.sub)
def test_rsub_scalar(self):
self.check_array_scalar_op(operator.sub, swap=True)
def test_isub_scalar(self):
self.check_array_scalar_op(operator.isub)
def test_mul_scalar(self):
self.check_array_scalar_op(operator.mul)
def test_rmul_scalar(self):
self.check_array_scalar_op(operator.mul, swap=True)
def test_imul_scalar(self):
self.check_array_scalar_op(operator.imul)
def test_truediv_scalar(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.truediv)
def test_rtruediv_scalar(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.truediv, swap=True)
def test_itruediv_scalar(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.itruediv)
def test_div_scalar(self):
if six.PY3:
return
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.div)
def test_rdiv_scalar(self):
if six.PY3:
return
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.div, swap=True)
def test_idiv_scalar(self):
if six.PY3:
return
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.idiv)
def test_floordiv_scalar(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.floordiv)
def test_rfloordiv_scalar(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.floordiv, swap=True)
def test_ifloordiv_scalar(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.ifloordiv)
def test_pow_scalar(self):
self.check_array_scalar_op(operator.pow)
def test_rpow_scalar(self):
self.check_array_scalar_op(operator.pow, swap=True)
def test_ipow_scalar(self):
self.check_array_scalar_op(operator.ipow)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def check_array_array_op(self, op, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
b = testing.shaped_reverse_arange((2, 3), xp, dtype)
return op(a, b)
def test_add_array(self):
self.check_array_scalar_op(operator.add)
def test_iadd_array(self):
self.check_array_scalar_op(operator.iadd)
def test_sub_array(self):
self.check_array_scalar_op(operator.sub)
def test_isub_array(self):
self.check_array_scalar_op(operator.isub)
def test_mul_array(self):
self.check_array_scalar_op(operator.mul)
def test_imul_array(self):
self.check_array_scalar_op(operator.imul)
def test_truediv_array(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.truediv)
def test_itruediv_array(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.itruediv)
def test_div_array(self):
if six.PY3:
return
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.div)
def test_idiv_array(self):
if six.PY3:
return
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.idiv)
def test_floordiv_array(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.floordiv)
def test_ifloordiv_array(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.ifloordiv)
def test_pow_array(self):
self.check_array_scalar_op(operator.pow)
def test_ipow_array(self):
self.check_array_scalar_op(operator.ipow)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def check_array_broadcasted_op(self, op, xp, dtype):
a = testing.shaped_arange((2, 3), dtype=dtype)
b = testing.shaped_arange((2, 1), dtype=dtype)
return op(a, b)
def test_broadcasted_add(self):
self.check_array_broadcasted_op(operator.add)
def test_broadcasted_iadd(self):
self.check_array_broadcasted_op(operator.iadd)
def test_broadcasted_sub(self):
self.check_array_broadcasted_op(operator.sub)
def test_broadcasted_isub(self):
self.check_array_broadcasted_op(operator.isub)
def test_broadcasted_mul(self):
self.check_array_broadcasted_op(operator.mul)
def test_broadcasted_imul(self):
self.check_array_broadcasted_op(operator.imul)
def test_broadcasted_truediv(self):
numpy.seterr(divide='ignore')
self.check_array_broadcasted_op(operator.truediv)
def test_broadcasted_itruediv(self):
numpy.seterr(divide='ignore')
self.check_array_broadcasted_op(operator.itruediv)
def test_broadcasted_div(self):
if six.PY3:
return
numpy.seterr(divide='ignore')
self.check_array_broadcasted_op(operator.div)
def test_broadcasted_idiv(self):
if six.PY3:
return
numpy.seterr(divide='ignore')
self.check_array_broadcasted_op(operator.idiv)
def test_broadcasted_floordiv(self):
numpy.seterr(divide='ignore')
self.check_array_broadcasted_op(operator.floordiv)
def test_broadcasted_ifloordiv(self):
numpy.seterr(divide='ignore')
self.check_array_broadcasted_op(operator.ifloordiv)
def test_broadcasted_pow(self):
self.check_array_broadcasted_op(operator.pow)
def test_broadcasted_ipow(self):
self.check_array_broadcasted_op(operator.ipow)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def check_array_doubly_broadcasted_op(self, op, xp, dtype):
a = testing.shaped_arange((2, 1, 3), xp, dtype)
b = testing.shaped_arange((3, 1), xp, dtype)
return op(a, b)
def test_doubly_broadcasted_add(self):
self.check_array_doubly_broadcasted_op(operator.add)
def test_doubly_broadcasted_sub(self):
self.check_array_doubly_broadcasted_op(operator.sub)
def test_doubly_broadcasted_mul(self):
self.check_array_doubly_broadcasted_op(operator.mul)
def test_doubly_broadcasted_truediv(self):
numpy.seterr(divide='ignore', invalid='ignore')
self.check_array_doubly_broadcasted_op(operator.truediv)
def test_doubly_broadcasted_floordiv(self):
numpy.seterr(divide='ignore')
self.check_array_doubly_broadcasted_op(operator.floordiv)
def test_doubly_broadcasted_div(self):
if six.PY3:
return
numpy.seterr(divide='ignore')
self.check_array_doubly_broadcasted_op(operator.div)
def test_doubly_broadcasted_pow(self):
self.check_array_doubly_broadcasted_op(operator.pow)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def check_array_reversed_op(self, op, xp, dtype):
a = testing.shaped_arange((5,), xp, dtype)
return op(a, a[::-1])
def test_array_reversed_add(self):
self.check_array_reversed_op(operator.add)
def test_array_reversed_sub(self):
self.check_array_reversed_op(operator.sub)
def test_array_reversed_mul(self):
self.check_array_reversed_op(operator.mul)
|
from itertools import chain
from django.contrib.sites.models import Site
from django.core.urlresolvers import NoReverseMatch, reverse_lazy
from django.forms.widgets import Select, MultiWidget, TextInput
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from cms.forms.utils import get_site_choices, get_page_choices
from cms.models import Page, PageUser
from cms.templatetags.cms_admin import CMS_ADMIN_ICON_BASE
from cms.utils.compat.dj import force_unicode
class PageSelectWidget(MultiWidget):
"""A widget that allows selecting a page by first selecting a site and then
a page on that site in a two step process.
"""
def __init__(self, site_choices=None, page_choices=None, attrs=None):
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
self.choices = []
super(PageSelectWidget, self).__init__((Select, Select, Select), attrs)
def decompress(self, value):
"""
receives a page_id in value and returns the site_id and page_id
of that page or the current site_id and None if no page_id is given.
"""
if value:
page = Page.objects.get(pk=value)
site = page.site
return [site.pk, page.pk, page.pk]
site = Site.objects.get_current()
return [site.pk,None,None]
def _has_changed(self, initial, data):
# THIS IS A COPY OF django.forms.widgets.Widget._has_changed()
# (except for the first if statement)
"""
Return True if data differs from initial.
"""
# For purposes of seeing whether something has changed, None is
# the same as an empty string, if the data or inital value we get
# is None, replace it w/ u''.
if data is None or (len(data)>=2 and data[1] in [None,'']):
data_value = u''
else:
data_value = data
if initial is None:
initial_value = u''
else:
initial_value = initial
if force_unicode(initial_value) != force_unicode(data_value):
return True
return False
def render(self, name, value, attrs=None):
# THIS IS A COPY OF django.forms.widgets.MultiWidget.render()
# (except for the last line)
# value is a list of values, each corresponding to a widget
# in self.widgets.
site_choices = get_site_choices()
page_choices = get_page_choices()
self.site_choices = site_choices
self.choices = page_choices
self.widgets = (Select(choices=site_choices ),
Select(choices=[('', '----')]),
Select(choices=self.choices, attrs={'style': "display:none;"} ),
)
if not isinstance(value, list):
value = self.decompress(value)
output = []
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id', None)
for i, widget in enumerate(self.widgets):
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
final_attrs = dict(final_attrs, id='%s_%s' % (id_, i))
output.append(widget.render(name + '_%s' % i, widget_value, final_attrs))
output.append(r'''<script type="text/javascript">
(function($) {
var handleSiteChange = function(site_name, selected_id) {
$("#id_%(name)s_1 optgroup").remove();
var myOptions = $("#id_%(name)s_2 optgroup[label='" + site_name + "']").clone();
$("#id_%(name)s_1").append(myOptions);
$("#id_%(name)s_1").change();
};
var handlePageChange = function(page_id) {
if (page_id) {
$("#id_%(name)s_2 option").removeAttr('selected');
$("#id_%(name)s_2 option[value=" + page_id + "]").attr('selected','selected');
} else {
$("#id_%(name)s_2 option[value=]").attr('selected','selected');
};
};
$("#id_%(name)s_0").change(function(){
var site_label = $("#id_%(name)s_0").children(":selected").text();
handleSiteChange( site_label );
});
$("#id_%(name)s_1").change(function(){
var page_id = $(this).find('option:selected').val();
handlePageChange( page_id );
});
$(function(){
handleSiteChange( $("#id_%(name)s_0").children(":selected").text() );
$("#add_id_%(name)s").hide();
});
})(django.jQuery);
</script>''' % {'name': name})
return mark_safe(self.format_output(output))
def format_output(self, rendered_widgets):
return u' '.join(rendered_widgets)
class PageSmartLinkWidget(TextInput):
def __init__(self, attrs=None, ajax_view=None):
super(PageSmartLinkWidget, self).__init__(attrs)
self.ajax_url = self.get_ajax_url(ajax_view=ajax_view)
def get_ajax_url(self, ajax_view):
try:
return reverse_lazy(ajax_view)
except NoReverseMatch:
raise Exception(
'You should provide an ajax_view argument that can be reversed to the PageSmartLinkWidget'
)
def render(self, name=None, value=None, attrs=None):
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id', None)
output = [r'''<script type="text/javascript">
(function($){
$(function(){
$("#%(element_id)s").select2({
placeholder: "%(placeholder_text)s",
allowClear: true,
minimumInputLength: 3,
ajax: {
url: "%(ajax_url)s",
dataType: 'json',
data: function (term, page) {
return {
q: term, // search term
language_code: '%(language_code)s'
};
},
results: function (data, page) {
return {
more: false,
results: $.map(data, function(item, i){
return {
'id':item.redirect_url,
'text': item.title + ' (/' + item.path + ')'}
}
)
};
}
},
// Allow creation of new entries
createSearchChoice:function(term, data) { if ($(data).filter(function() { return this.text.localeCompare(term)===0; }).length===0) {return {id:term, text:term};} },
multiple: false,
initSelection : function (element, callback) {
var initialValue = element.val()
callback({id:initialValue, text: initialValue});
}
});
})
})(django.jQuery);
</script>''' % {
'element_id': id_,
'placeholder_text': final_attrs.get('placeholder_text', ''),
'language_code': self.language,
'ajax_url': force_unicode(self.ajax_url)
}]
output.append(super(PageSmartLinkWidget, self).render(name, value, attrs))
return mark_safe(u''.join(output))
class Media:
css = {
'all': ('cms/js/select2/select2.css',
'cms/js/select2/select2-bootstrap.css',)
}
js = (#'cms/js/libs/jquery.min.js',
'cms/js/select2/select2.js',)
class UserSelectAdminWidget(Select):
"""Special widget used in page permission inlines, because we have to render
an add user (plus) icon, but point it somewhere else - to special user creation
view, which is accessible only if user haves "add user" permissions.
Current user should be assigned to widget in form constructor as an user
attribute.
"""
def render(self, name, value, attrs=None, choices=()):
output = [super(UserSelectAdminWidget, self).render(name, value, attrs, choices)]
if hasattr(self, 'user') and (self.user.is_superuser or \
self.user.has_perm(PageUser._meta.app_label + '.' + PageUser._meta.get_add_permission())):
# append + icon
add_url = '../../../cms/pageuser/add/'
output.append(u'<a href="%s" class="add-another" id="add_id_%s" onclick="return showAddAnotherPopup(this);"> ' % \
(add_url, name))
output.append(u'<img src="%sicon_addlink.gif" width="10" height="10" alt="%s"/></a>' % (CMS_ADMIN_ICON_BASE, _('Add Another')))
return mark_safe(u''.join(output))
class AppHookSelect(Select):
"""Special widget used for the App Hook selector in the Advanced Settings
of the Page Admin. It adds support for a data attribute per option and
includes supporting JS into the page.
"""
class Media:
js = ('cms/js/modules/cms.base.js', 'cms/js/modules/cms.app_hook_select.js', )
def __init__(self, attrs=None, choices=(), app_namespaces={}):
self.app_namespaces = app_namespaces
super(AppHookSelect, self).__init__(attrs, choices)
def render_option(self, selected_choices, option_value, option_label):
if option_value is None:
option_value = ''
option_value = force_text(option_value)
if option_value in selected_choices:
selected_html = mark_safe(' selected="selected"')
if not self.allow_multiple_selected:
# Only allow for a single selection.
selected_choices.remove(option_value)
else:
selected_html = ''
if option_value in self.app_namespaces:
data_html = mark_safe(' data-namespace="%s"' % self.app_namespaces[option_value])
else:
data_html = ''
return '<option value="%s"%s%s>%s</option>' % (
option_value,
selected_html,
data_html,
force_text(option_label),
)
def render_options(self, choices, selected_choices):
selected_choices = set(force_text(v) for v in selected_choices)
output = []
for option_value, option_label in chain(self.choices, choices):
output.append(self.render_option(selected_choices, option_value, option_label))
return '\n'.join(output)
|
"""
Serializer for user API
"""
from rest_framework import serializers
from rest_framework.reverse import reverse
from django.template import defaultfilters
from courseware.access import has_access
from student.models import CourseEnrollment, User
from certificates.models import certificate_status_for_student, CertificateStatuses
from xmodule.course_module import DEFAULT_START_DATE
class CourseOverviewField(serializers.RelatedField):
"""Custom field to wrap a CourseDescriptor object. Read-only."""
def to_representation(self, course_overview):
course_id = unicode(course_overview.id)
request = self.context.get('request', None)
if request:
video_outline_url = reverse(
'video-summary-list',
kwargs={'course_id': course_id},
request=request
)
course_updates_url = reverse(
'course-updates-list',
kwargs={'course_id': course_id},
request=request
)
course_handouts_url = reverse(
'course-handouts-list',
kwargs={'course_id': course_id},
request=request
)
discussion_url = reverse(
'discussion_course',
kwargs={'course_id': course_id},
request=request
) if course_overview.is_discussion_tab_enabled() else None
else:
video_outline_url = None
course_updates_url = None
course_handouts_url = None
discussion_url = None
if course_overview.advertised_start is not None:
start_type = "string"
start_display = course_overview.advertised_start
elif course_overview.start != DEFAULT_START_DATE:
start_type = "timestamp"
start_display = defaultfilters.date(course_overview.start, "DATE_FORMAT")
else:
start_type = "empty"
start_display = None
return {
"id": course_id,
"name": course_overview.display_name,
"number": course_overview.display_number_with_default,
"org": course_overview.display_org_with_default,
"start": course_overview.start,
"start_display": start_display,
"start_type": start_type,
"end": course_overview.end,
"course_image": course_overview.course_image_url,
"social_urls": {
"facebook": course_overview.facebook_url,
},
"latest_updates": {
"video": None
},
"video_outline": video_outline_url,
"course_updates": course_updates_url,
"course_handouts": course_handouts_url,
"discussion_url": discussion_url,
"subscription_id": course_overview.clean_id(padding_char='_'),
"courseware_access": has_access(request.user, 'load_mobile', course_overview).to_json() if request else None
}
class CourseEnrollmentSerializer(serializers.ModelSerializer):
"""
Serializes CourseEnrollment models
"""
course = CourseOverviewField(source="course_overview", read_only=True)
certificate = serializers.SerializerMethodField()
def get_certificate(self, model):
"""Returns the information about the user's certificate in the course."""
certificate_info = certificate_status_for_student(model.user, model.course_id)
if certificate_info['status'] == CertificateStatuses.downloadable:
return {
"url": certificate_info['download_url'],
}
else:
return {}
class Meta(object):
model = CourseEnrollment
fields = ('created', 'mode', 'is_active', 'course', 'certificate')
lookup_field = 'username'
class UserSerializer(serializers.HyperlinkedModelSerializer):
"""
Serializes User models
"""
name = serializers.ReadOnlyField(source='profile.name')
course_enrollments = serializers.HyperlinkedIdentityField(
view_name='courseenrollment-detail',
lookup_field='username'
)
class Meta(object):
model = User
fields = ('id', 'username', 'email', 'name', 'course_enrollments')
lookup_field = 'username'
|
"""Downloads the necessary NLTK corpora for TextBlob.
Usage: ::
$ python -m textblob.download_corpora
If you only intend to use TextBlob's default models, you can use the "lite"
option: ::
$ python -m textblob.download_corpora lite
"""
import sys
import nltk
MIN_CORPORA = [
'brown', # Required for FastNPExtractor
'punkt', # Required for WordTokenizer
'wordnet' # Required for lemmatization
]
ADDITIONAL_CORPORA = [
'conll2000', # Required for ConllExtractor
'maxent_treebank_pos_tagger', # Required for NLTKTagger
'movie_reviews', # Required for NaiveBayesAnalyzer
]
ALL_CORPORA = MIN_CORPORA + ADDITIONAL_CORPORA
def download_lite():
for each in MIN_CORPORA:
nltk.download(each)
def download_all():
for each in ALL_CORPORA:
nltk.download(each)
def main():
if 'lite' in sys.argv:
download_lite()
else:
download_all()
print("Finished.")
if __name__ == '__main__':
main()
|
from django.db import transaction
from denorm.db import base
class RandomBigInt(base.RandomBigInt):
def sql(self):
return '(9223372036854775806::INT8 * ((RANDOM()-0.5)*2.0) )::INT8'
class TriggerNestedSelect(base.TriggerNestedSelect):
def sql(self):
columns = self.columns
table = self.table
where = ",".join(["%s = %s" % (k, v) for k, v in self.kwargs.iteritems()])
return 'SELECT DISTINCT %(columns)s FROM %(table)s WHERE %(where)s' % locals(), tuple()
class TriggerActionInsert(base.TriggerActionInsert):
def sql(self):
table = self.model._meta.db_table
columns = "(" + ",".join(self.columns) + ")"
params = []
if isinstance(self.values, TriggerNestedSelect):
sql, nested_params = self.values.sql()
values = "(" + sql + ")"
params.extend(nested_params)
else:
values = "VALUES(" + ",".join(self.values) + ")"
sql = (
'BEGIN\n'
'INSERT INTO %(table)s %(columns)s %(values)s;\n'
'EXCEPTION WHEN unique_violation THEN -- do nothing\n'
'END\n'
) % locals()
return sql, params
class TriggerActionUpdate(base.TriggerActionUpdate):
def sql(self):
table = self.model._meta.db_table
params = []
updates = ','.join(["%s=%s" % (k, v) for k, v in zip(self.columns, self.values)])
if isinstance(self.where, tuple):
where, where_params = self.where
else:
where, where_params = self.where, []
params.extend(where_params)
return 'UPDATE %(table)s SET %(updates)s WHERE %(where)s' % locals(), params
class Trigger(base.Trigger):
def name(self):
name = base.Trigger.name(self)
if self.content_type_field:
name += "_%s" % self.content_type
return name
def sql(self):
name = self.name()
params = []
action_set = set()
for a in self.actions:
sql, action_params = a.sql()
if sql:
action_set.add(sql)
params.extend(action_params)
actions = ";\n ".join(action_set) + ';'
table = self.db_table
time = self.time.upper()
event = self.event.upper()
content_type = self.content_type
ct_field = self.content_type_field
conditions = []
if event == "UPDATE":
for field, native_type in self.fields:
if native_type is None:
# If Django didn't know what this field type should be
# then compare it as text - Fixes a problem of trying to
# compare PostGIS geometry fields.
conditions.append("(OLD.%(f)s::%(t)s IS DISTINCT FROM NEW.%(f)s::%(t)s)" % {'f': field, 't': 'text'})
else:
conditions.append("( OLD.%(f)s IS DISTINCT FROM NEW.%(f)s )" % {'f': field})
conditions = ["(%s)" % "OR".join(conditions)]
if ct_field:
if event == "UPDATE":
conditions.append("(OLD.%(ctf)s=%(ct)s)OR(NEW.%(ctf)s=%(ct)s)" % {'ctf': ct_field, 'ct': content_type})
elif event == "INSERT":
conditions.append("(NEW.%s=%s)" % (ct_field, content_type))
elif event == "DELETE":
conditions.append("(OLD.%s=%s)" % (ct_field, content_type))
if not conditions:
cond = "TRUE"
else:
cond = "AND".join(conditions)
sql = """
CREATE OR REPLACE FUNCTION func_%(name)s()
RETURNS TRIGGER AS $$
BEGIN
IF %(cond)s THEN
%(actions)s
END IF;
RETURN NULL;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER %(name)s
%(time)s %(event)s ON %(table)s
FOR EACH ROW EXECUTE PROCEDURE func_%(name)s();
""" % locals()
return sql, params
class TriggerSet(base.TriggerSet):
def drop(self):
cursor = self.cursor()
cursor.execute("SELECT pg_class.relname, pg_trigger.tgname FROM pg_trigger LEFT JOIN pg_class ON (pg_trigger.tgrelid = pg_class.oid) WHERE pg_trigger.tgname LIKE 'denorm_%%';")
for table_name, trigger_name in cursor.fetchall():
cursor.execute('DROP TRIGGER %s ON %s;' % (trigger_name, table_name))
transaction.commit_unless_managed(using=self.using)
def install(self):
cursor = self.cursor()
cursor.execute("SELECT lanname FROM pg_catalog.pg_language WHERE lanname ='plpgsql'")
if not cursor.fetchall():
cursor.execute('CREATE LANGUAGE plpgsql')
for name, trigger in self.triggers.iteritems():
sql, args = trigger.sql()
cursor.execute(sql, args)
transaction.commit_unless_managed(using=self.using)
|
"""BibFormat element - Prints brief HTML picture and links to resources
"""
__revision__ = "$Id$"
def format_element(bfo):
"""
Prints html image and link to photo resources.
"""
from invenio.config import CFG_SITE_URL, CFG_SITE_RECORD
resources = bfo.fields("8564_")
out = ""
for resource in resources:
if resource.get("x", "") == "icon":
out += '<a href="'+CFG_SITE_URL+'/'+ CFG_SITE_RECORD +'/'+bfo.control_field("001")+ \
'?ln='+ bfo.lang + '"><img src="' + resource.get("u", "").replace(" ","") \
+ '" alt="" border="0"/></a>'
return out
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.modules.cloud.amazon.aws_acm import pem_chain_split, chain_compare
from ansible.module_utils._text import to_bytes, to_text
from pprint import pprint
def test_chain_compare():
# The functions we're testing take module as an argument
# Just so they can call module.fail_json
# Let's just use None for the unit tests,
# Because they shouldn't fail
# And if they do, fail_json is not applicable
module = None
fixture_suffix = 'test/units/modules/cloud/amazon/fixtures/certs'
# Test chain split function on super simple (invalid) certs
expected = ['aaa', 'bbb', 'ccc']
for fname in ['simple-chain-a.cert', 'simple-chain-b.cert']:
path = fixture_suffix + '/' + fname
with open(path, 'r') as f:
pem = to_text(f.read())
actual = pem_chain_split(module, pem)
actual = [a.strip() for a in actual]
if actual != expected:
print("Expected:")
pprint(expected)
print("Actual:")
pprint(actual)
raise AssertionError("Failed to properly split %s" % fname)
# Now test real chains
# chains with same same_as should be considered equal
test_chains = [
{ # Original Cert chain
'path': fixture_suffix + '/chain-1.0.cert',
'same_as': 1,
'length': 3
},
{ # Same as 1.0, but longer PEM lines
'path': fixture_suffix + '/chain-1.1.cert',
'same_as': 1,
'length': 3
},
{ # Same as 1.0, but without the stuff before each --------
'path': fixture_suffix + '/chain-1.2.cert',
'same_as': 1,
'length': 3
},
{ # Same as 1.0, but in a different order, so should be considered different
'path': fixture_suffix + '/chain-1.3.cert',
'same_as': 2,
'length': 3
},
{ # Same as 1.0, but with last link missing
'path': fixture_suffix + '/chain-1.4.cert',
'same_as': 3,
'length': 2
},
{ # Completely different cert chain to all the others
'path': fixture_suffix + '/chain-4.cert',
'same_as': 4,
'length': 3
},
{ # Single cert
'path': fixture_suffix + '/a.pem',
'same_as': 5,
'length': 1
},
{ # a different, single cert
'path': fixture_suffix + '/b.pem',
'same_as': 6,
'length': 1
}
]
for chain in test_chains:
with open(chain['path'], 'r') as f:
chain['pem_text'] = to_text(f.read())
# Test to make sure our regex isn't too greedy
chain['split'] = pem_chain_split(module, chain['pem_text'])
if len(chain['split']) != chain['length']:
print("Cert before split")
print(chain['pem_text'])
print("Cert after split")
pprint(chain['split'])
print("path: %s" % chain['path'])
print("Expected chain length: %d" % chain['length'])
print("Actual chain length: %d" % len(chain['split']))
raise AssertionError("Chain %s was not split properly" % chain['path'])
for chain_a in test_chains:
for chain_b in test_chains:
expected = (chain_a['same_as'] == chain_b['same_as'])
# Now test the comparison function
actual = chain_compare(module, chain_a['pem_text'], chain_b['pem_text'])
if expected != actual:
print("Error, unexpected comparison result between \n%s\nand\n%s" % (chain_a['path'], chain_b['path']))
print("Expected %s got %s" % (str(expected), str(actual)))
assert(expected == actual)
|
'''
Simple monitoring script to collect per process cpu percentage
and mem usage in bytes (vms or virt and rss)
usage:
cron-send-cpu-mem-stats process_name openshift.whatever.zabbix.key
or
cron-send-cpu-mem-stats 'something parameter more params' openshift.something.parameter.more.params
The script will attach .cpu and .mem.{vms|rss} to the end of the zabbix key name for the values
Future enhancement can be to add multiple instances, that would add pid to the key, but those
would have to be dynamic items in zabbix
'''
import argparse
import psutil
from openshift_tools.monitoring.metric_sender import MetricSender
def parse_args():
""" parse the args from the cli """
parser = argparse.ArgumentParser(description='CPU and Memory per process stats collector')
parser.add_argument('--debug', action='store_true', default=None, help='Debug?')
parser.add_argument('process_str', help='The process command line string to match')
parser.add_argument('zabbix_key_prefix', help='Prefix for the key that will be sent \
to zabbix with this data, will get a .cpu and .mem suffix')
return parser.parse_args()
def main():
""" Main function to run the check """
argz = parse_args()
proc_parts = argz.process_str.split()
zagg_data = {}
for proc in psutil.process_iter():
try:
if proc_parts[0] == proc.name():
proc.dict = proc.as_dict(['cmdline', 'memory_info'])
cmdline = proc.dict['cmdline']
if len(proc_parts) > 1 and len(cmdline) > 1:
part_count = len(proc_parts[1:])
# This call might be confusing, (I know I will be in 2 weeks) so quick explanation:
# if the process name matches above, it will check the rest of the strings
# against the /proc/<pid>/cmdline contents, order shouldn't matter since all have to match
if len(set(proc_parts[1:]).intersection(set(cmdline[1:1+part_count]))) != part_count:
continue
if argz.debug:
print cmdline
cpu_percent = '{0:.2f}'.format(proc.cpu_percent(interval=0.5))
mem_vms = '{0}'.format(getattr(proc.dict['memory_info'], 'vms'))
mem_rss = '{0}'.format(getattr(proc.dict['memory_info'], 'rss'))
zagg_data = {'{0}.cpu'.format(argz.zabbix_key_prefix) : cpu_percent,
'{0}.mem.vms'.format(argz.zabbix_key_prefix) : mem_vms,
'{0}.mem.rss'.format(argz.zabbix_key_prefix) : mem_rss}
except psutil.NoSuchProcess:
pass
if argz.debug:
try:
print 'Process ({0}) is using {1} CPU and {2} {3} memory'.format(argz.process_str,
cpu_percent,
mem_vms,
mem_rss)
print 'Zagg will receive: {0}'.format(zagg_data)
except NameError as ex:
print 'No values: {0}'.format(ex)
if zagg_data:
ms = MetricSender(debug=argz.debug)
ms.add_metric(zagg_data)
ms.send_metrics()
if __name__ == '__main__':
main()
|
def f(s):
s = s[::-1]
return s.swapcase()
result = f(f(f(f(f('abcdef'))))) # breakpoint
|
"""
Secrets framework provides means of getting connection objects from various sources, e.g. the following:
* Environment variables
* Metastore database
* AWS SSM Parameter store
"""
__all__ = ['BaseSecretsBackend', 'DEFAULT_SECRETS_SEARCH_PATH']
from airflow.secrets.base_secrets import BaseSecretsBackend
DEFAULT_SECRETS_SEARCH_PATH = [
"airflow.secrets.environment_variables.EnvironmentVariablesBackend",
"airflow.secrets.metastore.MetastoreBackend",
]
|
from buck import format_watchman_query_params, glob_internal, LazyBuildEnvPartial
from buck import subdir_glob, BuildFileContext
from pathlib import Path, PurePosixPath, PureWindowsPath
import os
import shutil
import tempfile
import unittest
class FakePathMixin(object):
def glob(self, pattern):
return self.glob_results.get(pattern)
def is_file(self):
return True
class FakePosixPath(FakePathMixin, PurePosixPath):
pass
class FakeWindowsPath(FakePathMixin, PureWindowsPath):
pass
def fake_path(fake_path_class, path, glob_results={}):
# Path does magic in __new__ with its args; it's hard to add more without
# changing that class. So we use a wrapper function to diddle with
# FakePath's members.
result = fake_path_class(path)
result.glob_results = {}
for pattern, paths in glob_results.iteritems():
result.glob_results[pattern] = [result / fake_path_class(p) for p in paths]
return result
class TestBuckPlatformBase(object):
def test_glob_includes_simple(self):
search_base = self.fake_path(
'foo',
glob_results={'*.java': ['A.java', 'B.java']})
self.assertGlobMatches(
['A.java', 'B.java'],
glob_internal(
includes=['*.java'],
excludes=[],
include_dotfiles=False,
search_base=search_base))
def test_glob_includes_sort(self):
search_base = self.fake_path(
'foo',
glob_results={'*.java': ['A.java', 'E.java', 'D.java', 'C.java', 'B.java']})
self.assertGlobMatches(
['A.java', 'B.java', 'C.java', 'D.java', 'E.java'],
glob_internal(
includes=['*.java'],
excludes=[],
include_dotfiles=False,
search_base=search_base))
def test_glob_includes_multi(self):
search_base = self.fake_path(
'foo',
glob_results={
'bar/*.java': ['bar/A.java', 'bar/B.java'],
'baz/*.java': ['baz/C.java', 'baz/D.java'],
})
self.assertGlobMatches(
['bar/A.java', 'bar/B.java', 'baz/C.java', 'baz/D.java'],
glob_internal(
includes=['bar/*.java', 'baz/*.java'],
excludes=[],
include_dotfiles=False,
search_base=search_base))
def test_glob_excludes_double_star(self):
search_base = self.fake_path(
'foo',
glob_results={
'**/*.java': ['A.java', 'B.java', 'Test.java'],
})
self.assertGlobMatches(
['A.java', 'B.java'],
glob_internal(
includes=['**/*.java'],
excludes=['**/*Test.java'],
include_dotfiles=False,
search_base=search_base))
def test_glob_excludes_multi(self):
search_base = self.fake_path(
'foo',
glob_results={
'bar/*.java': ['bar/A.java', 'bar/B.java'],
'baz/*.java': ['baz/C.java', 'baz/D.java'],
})
self.assertGlobMatches(
['bar/B.java', 'baz/D.java'],
glob_internal(
includes=['bar/*.java', 'baz/*.java'],
excludes=['*/[AC].java'],
include_dotfiles=False,
search_base=search_base))
def test_subdir_glob(self):
build_env = BuildFileContext(None, None, None, None, None, None, None, None)
search_base = self.fake_path(
'foo',
glob_results={
'lib/bar/*.h': ['lib/bar/A.h', 'lib/bar/B.h'],
'lib/baz/*.h': ['lib/baz/C.h', 'lib/baz/D.h'],
})
self.assertGlobMatches(
{
'bar/B.h': 'lib/bar/B.h',
'bar/A.h': 'lib/bar/A.h',
'baz/D.h': 'lib/baz/D.h',
'baz/C.h': 'lib/baz/C.h',
},
subdir_glob([
('lib', 'bar/*.h'),
('lib', 'baz/*.h')],
build_env=build_env,
search_base=search_base))
def test_subdir_glob_with_prefix(self):
build_env = BuildFileContext(None, None, None, None, None, None, None, None)
search_base = self.fake_path(
'foo',
glob_results={
'lib/bar/*.h': ['lib/bar/A.h', 'lib/bar/B.h'],
})
self.assertGlobMatches(
{
'Prefix/bar/B.h': 'lib/bar/B.h',
'Prefix/bar/A.h': 'lib/bar/A.h',
},
subdir_glob([('lib', 'bar/*.h')],
prefix='Prefix',
build_env=build_env,
search_base=search_base))
def test_glob_excludes_relative(self):
search_base = self.fake_path(
'foo',
glob_results={
'**/*.java': ['foo/A.java', 'foo/bar/B.java', 'bar/C.java'],
})
self.assertGlobMatches(
['foo/A.java', 'foo/bar/B.java'],
glob_internal(
includes=['**/*.java'],
excludes=['bar/*.java'],
include_dotfiles=False,
search_base=search_base))
def test_glob_includes_skips_dotfiles(self):
search_base = self.fake_path(
'foo',
glob_results={'*.java': ['A.java', '.B.java']})
self.assertGlobMatches(
['A.java'],
glob_internal(
includes=['*.java'],
excludes=[],
include_dotfiles=False,
search_base=search_base))
def test_glob_includes_does_not_skip_dotfiles_if_include_dotfiles(self):
search_base = self.fake_path(
'foo',
glob_results={'*.java': ['A.java', '.B.java']})
self.assertGlobMatches(
['.B.java', 'A.java'],
glob_internal(
includes=['*.java'],
excludes=[],
include_dotfiles=True,
search_base=search_base))
def test_lazy_build_env_partial(self):
def cobol_binary(
name,
deps=[],
build_env=None):
return (name, deps, build_env)
testLazy = LazyBuildEnvPartial(cobol_binary)
testLazy.build_env = {}
self.assertEqual(
('HAL', [1, 2, 3], {}),
testLazy.invoke(name='HAL', deps=[1, 2, 3]))
testLazy.build_env = {'abc': 789}
self.assertEqual(
('HAL', [1, 2, 3], {'abc': 789}),
testLazy.invoke(name='HAL', deps=[1, 2, 3]))
def test_explicit_exclude_with_file_separator_excludes(self):
search_base = self.fake_path(
'foo',
glob_results={'java/**/*.java': ['java/Include.java', 'java/Exclude.java']})
self.assertGlobMatches(
['java/Include.java'],
glob_internal(
includes=['java/**/*.java'],
excludes=['java/Exclude.java'],
include_dotfiles=False,
search_base=search_base))
class TestBuckPosix(TestBuckPlatformBase, unittest.TestCase):
@staticmethod
def fake_path(*args, **kwargs):
return fake_path(FakePosixPath, *args, **kwargs)
def assertGlobMatches(self, expected, actual):
self.assertEqual(expected, actual)
class TestBuckWindows(TestBuckPlatformBase, unittest.TestCase):
@staticmethod
def fake_path(*args, **kwargs):
return fake_path(FakeWindowsPath, *args, **kwargs)
def assertGlobMatches(self, expected, actual):
# Fix the path separator to make test writing easier
fixed_expected = None
if isinstance(expected, list):
fixed_expected = []
for path in expected:
fixed_expected.append(path.replace('/', '\\'))
else:
fixed_expected = {}
for key, value in expected.items():
fixed_expected.update({key.replace('/', '\\'): value.replace('/', '\\')})
self.assertEqual(fixed_expected, actual)
class TestBuck(unittest.TestCase):
def test_glob_double_star_integration(self):
d = tempfile.mkdtemp()
try:
subdir = os.path.join(d, 'b', 'a', 'c', 'a')
os.makedirs(subdir)
f = open(os.path.join(subdir, 'A.java'), 'w')
f.close()
f = open(os.path.join(subdir, 'B.java'), 'w')
f.close()
f = open(os.path.join(subdir, 'Test.java'), 'w')
f.close()
f = open(os.path.join(subdir, '.tmp.java'), 'w')
f.close()
os.makedirs(os.path.join(subdir, 'NotAFile.java'))
self.assertEquals(
[
os.path.join('b', 'a', 'c', 'a', 'A.java'),
os.path.join('b', 'a', 'c', 'a', 'B.java'),
],
glob_internal(
includes=['b/a/**/*.java'],
excludes=['**/*Test.java'],
include_dotfiles=False,
search_base=Path(d)))
finally:
shutil.rmtree(d)
def test_case_preserved(self):
d = tempfile.mkdtemp()
try:
subdir = os.path.join(d, 'java')
os.makedirs(subdir)
open(os.path.join(subdir, 'Main.java'), 'w').close()
self.assertEquals(
[
os.path.join('java', 'Main.java'),
],
glob_internal(
includes=['java/Main.java'],
excludes=[],
include_dotfiles=False,
search_base=Path(d)))
finally:
shutil.rmtree(d)
def test_watchman_query_params_includes(self):
query_params = format_watchman_query_params(
['**/*.java'],
[],
False,
'/path/to/glob')
self.assertEquals(
{
'relative_root': '/path/to/glob',
'path': [''],
'fields': ['name'],
'expression': [
'allof',
'exists',
['anyof', ['type', 'f'], ['type', 'l']],
['anyof', ['match', '**/*.java', 'wholename', {}]],
]
},
query_params)
def test_watchman_query_params_includes_and_excludes(self):
query_params = format_watchman_query_params(
['**/*.java'],
['**/*Test.java'],
False,
'/path/to/glob')
self.assertEquals(
{
'relative_root': '/path/to/glob',
'path': [''],
'fields': ['name'],
'expression': [
'allof',
'exists',
['anyof', ['type', 'f'], ['type', 'l']],
['anyof', ['match', '**/*.java', 'wholename', {}]],
['not', ['anyof', ['match', '**/*Test.java', 'wholename', {}]]],
]
},
query_params)
if __name__ == '__main__':
unittest.main()
|
import sys
from contextlib import contextmanager
from StringIO import StringIO
import mock
from nose.tools import raises
from . import prompt as _
class TestValueToStr():
def test_none(self):
# pass none to value_to_str
assert _.value_to_str(None) == '', 'passing None should return an empty string'
def test_nonstring(self):
# pass a non-string value to value_to_str
assert _.value_to_str(1) == '1', 'passing 1 should return the string "1"'
class TestSuggestion():
@raises(ValueError)
def test_new_bad_char_type(self):
# pass a non-string type as char to suggestion
_.Suggestion(None, 1)
@raises(ValueError)
def test_new_bad_multichar(self):
# pass multiple chars where one is expected
_.Suggestion(None, 'badvalue')
def test_str_method(self):
# test __str__ method of Suggestion
suggestion = _.Suggestion('alpha', 'a', 'test', True)
strval = str(suggestion)
expect = '<Suggestion char="a" desc="test" value="alpha" default>'
assert strval == expect, 'Suggestion is not producing the correct string value %s' % expect
@contextmanager
def mockInput(fn):
original = __builtins__['raw_input']
__builtins__['raw_input'] = fn
yield
__builtins__['raw_input'] = original
class TestGetInput():
def setUp(self):
self.suggestions = [_.Suggestion('alpha', 'a', 'test', False)]
@raises(SystemExit)
def test_get_input_sys_exit(self):
# bad input from user
def temp(_):
raise KeyboardInterrupt
with mockInput(temp):
_.get_input('Test', lambda _: True, self.suggestions)
def test_get_input_empty_then_full(self):
# test both major paths of get_input
# Python 2 does not have the 'nonlocal' keyword, so we fudge the closure with an object.
class Temp:
def __init__(self):
self.flag = False
def __call__(self, _):
if not self.flag:
self.flag = True
return ''
else:
return 'a'
with mockInput(Temp()):
assert _.get_input('Test', lambda x: x, self.suggestions) == 'alpha', 'get_input should return "alpha" for input "a"'
def test_get_input_empty_default(self):
# empty input should choose the default
self.suggestions[0].default = True
with mockInput(lambda _: ''):
assert _.get_input('Test', lambda x: x+'_validated', self.suggestions) == 'alpha_validated', 'get_input should return the default value "alpha"'
def test_get_input_empty_default_no_validator(self):
# empty input should choose the default and not validate
self.suggestions[0].default = True
with mockInput(lambda _: ''):
assert _.get_input('Test', suggestions=self.suggestions) == 'alpha', 'get_input should return the default value "alpha"'
@mock.patch('os.path.expanduser')
def test_get_input_path(self, mock_expanduser):
# should correctly validate path
mock_expanduser.side_effect = lambda x: '/path'+x
with mockInput(lambda _: '/test'):
assert _.get_input(validator=lambda x: x, is_path=True) == '/path/test', 'get_input should return the default value "alpha"'
|
class ExceptionFinishBreakpoint(gdb.FinishBreakpoint):
def __init__(self, frame):
gdb.FinishBreakpoint.__init__ (self, frame, internal=1)
self.silent = True
print ("init ExceptionFinishBreakpoint")
def stop(self):
print ("stopped at ExceptionFinishBreakpoint")
return True
def out_of_scope(self):
print ("exception did not finish ...")
print ("Python script imported")
|
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ovirt_disks
short_description: "Module to manage Virtual Machine and floating disks in oVirt"
version_added: "2.2"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage Virtual Machine and floating disks in oVirt."
options:
id:
description:
- "ID of the disk to manage. Either C(id) or C(name) is required."
name:
description:
- "Name of the disk to manage. Either C(id) or C(name)/C(alias) is required."
aliases: ['alias']
vm_name:
description:
- "Name of the Virtual Machine to manage. Either C(vm_id) or C(vm_name) is required if C(state) is I(attached) or I(detached)."
vm_id:
description:
- "ID of the Virtual Machine to manage. Either C(vm_id) or C(vm_name) is required if C(state) is I(attached) or I(detached)."
state:
description:
- "Should the Virtual Machine disk be present/absent/attached/detached."
choices: ['present', 'absent', 'attached', 'detached']
default: 'present'
image_path:
description:
- "Path to disk image, which should be uploaded."
- "Note that currently we support only compability version 0.10 of the qcow disk."
- "Note that you must have an valid oVirt engine CA in your system trust store
or you must provide it in C(ca_file) parameter."
- "Note that there is no reliable way to achieve idempotency, so
if you want to upload the disk even if the disk with C(id) or C(name) exists,
then please use C(force) I(true). If you will use C(force) I(false), which
is default, then the disk image won't be uploaded."
version_added: "2.3"
size:
description:
- "Size of the disk. Size should be specified using IEC standard units.
For example 10GiB, 1024MiB, etc."
- "Size can be only increased, not decreased."
interface:
description:
- "Driver of the storage interface."
choices: ['virtio', 'ide', 'virtio_scsi']
default: 'virtio'
format:
description:
- Specify format of the disk.
- If (cow) format is used, disk will by created as sparse, so space will be allocated for the volume as needed, also known as I(thin provision).
- If (raw) format is used, disk storage will be allocated right away, also known as I(preallocated).
- Note that this option isn't idempotent as it's not currently possible to change format of the disk via API.
choices: ['raw', 'cow']
storage_domain:
description:
- "Storage domain name where disk should be created. By default storage is chosen by oVirt engine."
storage_domains:
description:
- "Storage domain names where disk should be copied."
- "C(**IMPORTANT**)"
- "There is no reliable way to achieve idempotency, so every time
you specify this parameter the disks are copied, so please handle
your playbook accordingly to not copy the disks all the time. This
is valid only for VM and floating disks, template disks works
as expected."
version_added: "2.3"
force:
description:
- "Please take a look at C(image_path) documentation to see the correct
usage of this parameter."
version_added: "2.3"
profile:
description:
- "Disk profile name to be attached to disk. By default profile is chosen by oVirt engine."
bootable:
description:
- "I(True) if the disk should be bootable. By default when disk is created it isn't bootable."
shareable:
description:
- "I(True) if the disk should be shareable. By default when disk is created it isn't shareable."
logical_unit:
description:
- "Dictionary which describes LUN to be directly attached to VM:"
- "C(address) - Address of the storage server. Used by iSCSI."
- "C(port) - Port of the storage server. Used by iSCSI."
- "C(target) - iSCSI target."
- "C(lun_id) - LUN id."
- "C(username) - CHAP Username to be used to access storage server. Used by iSCSI."
- "C(password) - CHAP Password of the user to be used to access storage server. Used by iSCSI."
- "C(storage_type) - Storage type either I(fcp) or I(iscsi)."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
- ovirt_disks:
name: myvm_disk
vm_name: rhel7
size: 10GiB
format: cow
interface: virtio
- ovirt_disks:
vm_name: rhel7
logical_unit:
target: iqn.2016-08-09.brq.str-01:omachace
id: 1IET_000d0001
address: 10.34.63.204
interface: virtio
- ovirt_disks:
state: detached
name: myvm_disk
vm_name: rhel7
size: 10GiB
format: cow
interface: virtio
- ovirt_disks:
name: mydisk
vm_name: myvm
interface: virtio
size: 10GiB
format: cow
image_path: /path/to/mydisk.qcow2
storage_domain: data
'''
RETURN = '''
id:
description: "ID of the managed disk"
returned: "On success if disk is found."
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
disk:
description: "Dictionary of all the disk attributes. Disk attributes can be found on your oVirt instance
at following url: https://ovirt.example.com/ovirt-engine/api/model#types/disk."
returned: "On success if disk is found and C(vm_id) or C(vm_name) wasn't passed."
disk_attachment:
description: "Dictionary of all the disk attachment attributes. Disk attachment attributes can be found
on your oVirt instance at following url:
https://ovirt.example.com/ovirt-engine/api/model#types/disk_attachment."
returned: "On success if disk is found and C(vm_id) or C(vm_name) was passed and VM was found."
'''
import os
import time
import traceback
import ssl
from httplib import HTTPSConnection
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
check_params,
create_connection,
convert_to_bytes,
equal,
follow_link,
ovirt_full_argument_spec,
search_by_name,
wait,
)
def _search_by_lun(disks_service, lun_id):
"""
Find disk by LUN ID.
"""
res = [
disk for disk in disks_service.list(search='disk_type=lun') if (
disk.lun_storage.id == lun_id
)
]
return res[0] if res else None
def upload_disk_image(connection, module):
size = os.path.getsize(module.params['image_path'])
transfers_service = connection.system_service().image_transfers_service()
transfer = transfers_service.add(
otypes.ImageTransfer(
image=otypes.Image(
id=module.params['id'],
)
)
)
transfer_service = transfers_service.image_transfer_service(transfer.id)
try:
# After adding a new transfer for the disk, the transfer's status will be INITIALIZING.
# Wait until the init phase is over. The actual transfer can start when its status is "Transferring".
while transfer.phase == otypes.ImageTransferPhase.INITIALIZING:
time.sleep(module.params['poll_interval'])
transfer = transfer_service.get()
# Set needed headers for uploading:
upload_headers = {
'Authorization': transfer.signed_ticket,
}
proxy_url = urlparse(transfer.proxy_url)
context = ssl.create_default_context()
auth = module.params['auth']
if auth.get('insecure'):
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
elif auth.get('ca_file'):
context.load_verify_locations(cafile=auth.get('ca_file'))
proxy_connection = HTTPSConnection(
proxy_url.hostname,
proxy_url.port,
context=context,
)
with open(module.params['image_path'], "rb") as disk:
chunk_size = 1024 * 1024 * 8
pos = 0
while pos < size:
transfer_service.extend()
upload_headers['Content-Range'] = "bytes %d-%d/%d" % (pos, min(pos + chunk_size, size) - 1, size)
proxy_connection.request(
'PUT',
proxy_url.path,
disk.read(chunk_size),
headers=upload_headers,
)
r = proxy_connection.getresponse()
if r.status >= 400:
raise Exception("Failed to upload disk image.")
pos += chunk_size
finally:
transfer_service.finalize()
while transfer.phase in [
otypes.ImageTransferPhase.TRANSFERRING,
otypes.ImageTransferPhase.FINALIZING_SUCCESS,
]:
time.sleep(module.params['poll_interval'])
transfer = transfer_service.get()
if transfer.phase in [
otypes.ImageTransferPhase.UNKNOWN,
otypes.ImageTransferPhase.FINISHED_FAILURE,
otypes.ImageTransferPhase.FINALIZING_FAILURE,
otypes.ImageTransferPhase.CANCELLED,
]:
raise Exception(
"Error occured while uploading image. The transfer is in %s" % transfer.phase
)
if module.params.get('logical_unit'):
disks_service = connection.system_service().disks_service()
wait(
service=disks_service.service(module.params['id']),
condition=lambda d: d.status == otypes.DiskStatus.OK,
wait=module.params['wait'],
timeout=module.params['timeout'],
)
return True
class DisksModule(BaseModule):
def build_entity(self):
logical_unit = self._module.params.get('logical_unit')
return otypes.Disk(
id=self._module.params.get('id'),
name=self._module.params.get('name'),
description=self._module.params.get('description'),
format=otypes.DiskFormat(
self._module.params.get('format')
) if self._module.params.get('format') else None,
sparse=self._module.params.get('format') != 'raw',
provisioned_size=convert_to_bytes(
self._module.params.get('size')
),
storage_domains=[
otypes.StorageDomain(
name=self._module.params.get('storage_domain'),
),
],
shareable=self._module.params.get('shareable'),
lun_storage=otypes.HostStorage(
type=otypes.StorageType(
logical_unit.get('storage_type', 'iscsi')
),
logical_units=[
otypes.LogicalUnit(
address=logical_unit.get('address'),
port=logical_unit.get('port', 3260),
target=logical_unit.get('target'),
id=logical_unit.get('id'),
username=logical_unit.get('username'),
password=logical_unit.get('password'),
)
],
) if logical_unit else None,
)
def update_storage_domains(self, disk_id):
changed = False
disk_service = self._service.service(disk_id)
disk = disk_service.get()
sds_service = self._connection.system_service().storage_domains_service()
# We don't support move© for non file based storages:
if disk.storage_type != otypes.DiskStorageType.IMAGE:
return changed
# Initiate move:
if self._module.params['storage_domain']:
new_disk_storage = search_by_name(sds_service, self._module.params['storage_domain'])
changed = self.action(
action='move',
entity=disk,
action_condition=lambda d: new_disk_storage.id != d.storage_domains[0].id,
wait_condition=lambda d: d.status == otypes.DiskStatus.OK,
storage_domain=otypes.StorageDomain(
id=new_disk_storage.id,
),
post_action=lambda _: time.sleep(self._module.params['poll_interval']),
)['changed']
if self._module.params['storage_domains']:
for sd in self._module.params['storage_domains']:
new_disk_storage = search_by_name(sds_service, sd)
changed = changed or self.action(
action='copy',
entity=disk,
action_condition=(
lambda disk: new_disk_storage.id not in [sd.id for sd in disk.storage_domains]
),
wait_condition=lambda disk: disk.status == otypes.DiskStatus.OK,
storage_domain=otypes.StorageDomain(
id=new_disk_storage.id,
),
)['changed']
return changed
def _update_check(self, entity):
return (
equal(self._module.params.get('description'), entity.description) and
equal(convert_to_bytes(self._module.params.get('size')), entity.provisioned_size) and
equal(self._module.params.get('shareable'), entity.shareable)
)
class DiskAttachmentsModule(DisksModule):
def build_entity(self):
return otypes.DiskAttachment(
disk=super(DiskAttachmentsModule, self).build_entity(),
interface=otypes.DiskInterface(
self._module.params.get('interface')
) if self._module.params.get('interface') else None,
bootable=self._module.params.get('bootable'),
active=True,
)
def update_check(self, entity):
return (
super(DiskAttachmentsModule, self)._update_check(follow_link(self._connection, entity.disk)) and
equal(self._module.params.get('interface'), str(entity.interface)) and
equal(self._module.params.get('bootable'), entity.bootable)
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent', 'attached', 'detached'],
default='present'
),
id=dict(default=None),
name=dict(default=None, aliases=['alias']),
vm_name=dict(default=None),
vm_id=dict(default=None),
size=dict(default=None),
interface=dict(default=None,),
storage_domain=dict(default=None),
storage_domains=dict(default=None, type='list'),
profile=dict(default=None),
format=dict(default='cow', choices=['raw', 'cow']),
bootable=dict(default=None, type='bool'),
shareable=dict(default=None, type='bool'),
logical_unit=dict(default=None, type='dict'),
image_path=dict(default=None),
force=dict(default=False, type='bool'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
check_params(module)
try:
disk = None
state = module.params['state']
connection = create_connection(module.params.get('auth'))
disks_service = connection.system_service().disks_service()
disks_module = DisksModule(
connection=connection,
module=module,
service=disks_service,
)
lun = module.params.get('logical_unit')
if lun:
disk = _search_by_lun(disks_service, lun.get('id'))
ret = None
# First take care of creating the VM, if needed:
if state == 'present' or state == 'detached' or state == 'attached':
ret = disks_module.create(
entity=disk,
result_state=otypes.DiskStatus.OK if lun is None else None,
)
is_new_disk = ret['changed']
ret['changed'] = ret['changed'] or disks_module.update_storage_domains(ret['id'])
# We need to pass ID to the module, so in case we want detach/attach disk
# we have this ID specified to attach/detach method:
module.params['id'] = ret['id'] if disk is None else disk.id
# Upload disk image in case it's new disk or force parameter is passed:
if module.params['image_path'] and (is_new_disk or module.params['force']):
uploaded = upload_disk_image(connection, module)
ret['changed'] = ret['changed'] or uploaded
elif state == 'absent':
ret = disks_module.remove()
# If VM was passed attach/detach disks to/from the VM:
if module.params.get('vm_id') is not None or module.params.get('vm_name') is not None and state != 'absent':
vms_service = connection.system_service().vms_service()
# If `vm_id` isn't specified, find VM by name:
vm_id = module.params['vm_id']
if vm_id is None:
vm_id = getattr(search_by_name(vms_service, module.params['vm_name']), 'id', None)
if vm_id is None:
module.fail_json(
msg="VM don't exists, please create it first."
)
disk_attachments_service = vms_service.vm_service(vm_id).disk_attachments_service()
disk_attachments_module = DiskAttachmentsModule(
connection=connection,
module=module,
service=disk_attachments_service,
changed=ret['changed'] if ret else False,
)
if state == 'present' or state == 'attached':
ret = disk_attachments_module.create()
if lun is None:
wait(
service=disk_attachments_service.service(ret['id']),
condition=lambda d:follow_link(connection, d.disk).status == otypes.DiskStatus.OK,
wait=module.params['wait'],
timeout=module.params['timeout'],
)
elif state == 'detached':
ret = disk_attachments_module.remove()
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=False)
if __name__ == "__main__":
main()
|
import json
import re
import os.path
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
import tmp.benchmarks_pb2 as benchmarks_pb2
__file_size_map = {}
def __get_data_size(filename):
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + "/../" + filename
if filename in __file_size_map:
return __file_size_map[filename]
benchmark_dataset = benchmarks_pb2.BenchmarkDataset()
benchmark_dataset.ParseFromString(
open(filename, "rb").read())
size = 0
count = 0
for payload in benchmark_dataset.payload:
size += len(payload)
count += 1
__file_size_map[filename] = (size, 1.0 * size / count)
return size, 1.0 * size / count
def __extract_file_name(file_name):
name_list = re.split(r"[/\.]", file_name)
short_file_name = ""
for name in name_list:
if name[:14] == "google_message":
short_file_name = name
return short_file_name
__results = []
def __parse_cpp_result(filename):
if filename == "":
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
with open(filename, "rb") as f:
results = json.loads(f.read())
for benchmark in results["benchmarks"]:
data_filename = "".join(
re.split("(_parse_|_serialize)", benchmark["name"])[0])
behavior = benchmark["name"][len(data_filename) + 1:]
if data_filename[:2] == "BM":
data_filename = data_filename[3:]
__results.append({
"language": "cpp",
"dataFilename": data_filename,
"behavior": behavior,
"throughput": benchmark["bytes_per_second"] / 2.0 ** 20
})
def __parse_synthetic_result(filename):
if filename == "":
return
if filename[0] != "/":
filename = os.path.dirname(os.path.abspath(__file__)) + "/" + filename
with open(filename, "rb") as f:
results = json.loads(f.read())
for benchmark in results["benchmarks"]:
__results.append({
"language": "cpp",
"dataFilename": "",
"behavior": "synthetic",
"throughput": 10.0**9 / benchmark["cpu_time_ns"]
})
def __parse_python_result(filename):
if filename == "":
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
with open(filename, "rb") as f:
results_list = json.loads(f.read())
for results in results_list:
for result in results:
_, avg_size = __get_data_size(result["filename"])
for behavior in result["benchmarks"]:
__results.append({
"language": "python",
"dataFilename": __extract_file_name(result["filename"]),
"behavior": behavior,
"throughput": result["benchmarks"][behavior]
})
def __parse_java_result(filename):
if filename == "":
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
with open(filename, "rb") as f:
results = json.loads(f.read())
for result in results:
total_weight = 0
total_value = 0
for measurement in result["measurements"]:
total_weight += measurement["weight"]
total_value += measurement["value"]["magnitude"]
avg_time = total_value * 1.0 / total_weight
total_size, _ = __get_data_size(
result["scenario"]["benchmarkSpec"]["parameters"]["dataFile"])
__results.append({
"language": "java",
"throughput": total_size / avg_time * 1e9 / 2 ** 20,
"behavior": result["scenario"]["benchmarkSpec"]["methodName"],
"dataFilename": __extract_file_name(
result["scenario"]["benchmarkSpec"]["parameters"]["dataFile"])
})
def __parse_go_result(filename):
if filename == "":
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
with open(filename, "rb") as f:
for line in f:
result_list = re.split(r"[\ \t]+", line)
if result_list[0][:9] != "Benchmark":
continue
first_slash_index = result_list[0].find('/')
last_slash_index = result_list[0].rfind('/')
full_filename = result_list[0][first_slash_index+1:last_slash_index]
total_bytes, _ = __get_data_size(full_filename)
behavior_with_suffix = result_list[0][last_slash_index+1:]
last_dash = behavior_with_suffix.rfind("-")
if last_dash == -1:
behavior = behavior_with_suffix
else:
behavior = behavior_with_suffix[:last_dash]
__results.append({
"dataFilename": __extract_file_name(full_filename),
"throughput": total_bytes / float(result_list[2]) * 1e9 / 2 ** 20,
"behavior": behavior,
"language": "go"
})
def __parse_custom_result(filename, language):
if filename == "":
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
with open(filename, "rb") as f:
results = json.loads(f.read())
for result in results:
_, avg_size = __get_data_size(result["filename"])
for behavior in result["benchmarks"]:
__results.append({
"language": language,
"dataFilename": __extract_file_name(result["filename"]),
"behavior": behavior,
"throughput": result["benchmarks"][behavior]
})
def __parse_js_result(filename, language):
return __parse_custom_result(filename, language)
def __parse_php_result(filename, language):
return __parse_custom_result(filename, language)
def get_result_from_file(cpp_file="",
java_file="",
python_file="",
go_file="",
synthetic_file="",
node_file="",
php_c_file="",
php_file=""):
results = {}
if cpp_file != "":
__parse_cpp_result(cpp_file)
if java_file != "":
__parse_java_result(java_file)
if python_file != "":
__parse_python_result(python_file)
if go_file != "":
__parse_go_result(go_file)
if synthetic_file != "":
__parse_synthetic_result(synthetic_file)
if node_file != "":
__parse_js_result(node_file, "node")
if php_file != "":
__parse_php_result(php_file, "php")
if php_c_file != "":
__parse_php_result(php_c_file, "php")
return __results
|
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import warnings
from argparse import ArgumentParser
import django
from django.apps import apps
from django.conf import settings
from django.db import connection
from django.test import TestCase, TransactionTestCase
from django.test.utils import get_runner
from django.utils import six
from django.utils._os import upath
from django.utils.deprecation import (
RemovedInDjango20Warning, RemovedInDjango110Warning,
)
from django.utils.log import DEFAULT_LOGGING
warnings.simplefilter("error", RemovedInDjango110Warning)
warnings.simplefilter("error", RemovedInDjango20Warning)
RUNTESTS_DIR = os.path.abspath(os.path.dirname(upath(__file__)))
TEMPLATE_DIR = os.path.join(RUNTESTS_DIR, 'templates')
TMPDIR = tempfile.mkdtemp(prefix='django_')
tempfile.tempdir = os.environ['TMPDIR'] = TMPDIR
SUBDIRS_TO_SKIP = [
'data',
'import_error_package',
'test_discovery_sample',
'test_discovery_sample2',
'test_runner_deprecation_app',
]
ALWAYS_INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.admin.apps.SimpleAdminConfig',
'django.contrib.staticfiles',
]
ALWAYS_MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
CONTRIB_TESTS_TO_APPS = {
'flatpages_tests': 'django.contrib.flatpages',
'redirects_tests': 'django.contrib.redirects',
}
def get_test_modules():
modules = []
discovery_paths = [
(None, RUNTESTS_DIR),
# GIS tests are in nested apps
('gis_tests', os.path.join(RUNTESTS_DIR, 'gis_tests')),
]
for modpath, dirpath in discovery_paths:
for f in os.listdir(dirpath):
if ('.' in f or
os.path.basename(f) in SUBDIRS_TO_SKIP or
os.path.isfile(f) or
not os.path.exists(os.path.join(dirpath, f, '__init__.py'))):
continue
modules.append((modpath, f))
return modules
def get_installed():
return [app_config.name for app_config in apps.get_app_configs()]
def setup(verbosity, test_labels):
if verbosity >= 1:
print("Testing against Django installed in '%s'" % os.path.dirname(django.__file__))
# Force declaring available_apps in TransactionTestCase for faster tests.
def no_available_apps(self):
raise Exception("Please define available_apps in TransactionTestCase "
"and its subclasses.")
TransactionTestCase.available_apps = property(no_available_apps)
TestCase.available_apps = None
state = {
'INSTALLED_APPS': settings.INSTALLED_APPS,
'ROOT_URLCONF': getattr(settings, "ROOT_URLCONF", ""),
# Remove the following line in Django 1.10.
'TEMPLATE_DIRS': settings.TEMPLATE_DIRS,
'TEMPLATES': settings.TEMPLATES,
'LANGUAGE_CODE': settings.LANGUAGE_CODE,
'STATIC_URL': settings.STATIC_URL,
'STATIC_ROOT': settings.STATIC_ROOT,
'MIDDLEWARE_CLASSES': settings.MIDDLEWARE_CLASSES,
}
# Redirect some settings for the duration of these tests.
settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS
settings.ROOT_URLCONF = 'urls'
settings.STATIC_URL = '/static/'
settings.STATIC_ROOT = os.path.join(TMPDIR, 'static')
# Remove the following line in Django 1.10.
settings.TEMPLATE_DIRS = [TEMPLATE_DIR]
settings.TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
}]
settings.LANGUAGE_CODE = 'en'
settings.SITE_ID = 1
settings.MIDDLEWARE_CLASSES = ALWAYS_MIDDLEWARE_CLASSES
settings.MIGRATION_MODULES = {
# these 'tests.migrations' modules don't actually exist, but this lets
# us skip creating migrations for the test models.
'auth': 'django.contrib.auth.tests.migrations',
'contenttypes': 'contenttypes_tests.migrations',
}
log_config = DEFAULT_LOGGING
# Filter out non-error logging so we don't have to capture it in lots of
# tests.
log_config['loggers']['django']['level'] = 'ERROR'
settings.LOGGING = log_config
if verbosity > 0:
# Ensure any warnings captured to logging are piped through a verbose
# logging handler. If any -W options were passed explicitly on command
# line, warnings are not captured, and this has no effect.
logger = logging.getLogger('py.warnings')
handler = logging.StreamHandler()
logger.addHandler(handler)
warnings.filterwarnings(
'ignore',
'django.contrib.webdesign will be removed in Django 1.10.',
RemovedInDjango110Warning
)
warnings.filterwarnings(
'ignore',
'The GeoManager class is deprecated.',
RemovedInDjango20Warning
)
# Load all the ALWAYS_INSTALLED_APPS.
django.setup()
# Load all the test model apps.
test_modules = get_test_modules()
# Reduce given test labels to just the app module path
test_labels_set = set()
for label in test_labels:
bits = label.split('.')[:1]
test_labels_set.add('.'.join(bits))
installed_app_names = set(get_installed())
for modpath, module_name in test_modules:
if modpath:
module_label = '.'.join([modpath, module_name])
else:
module_label = module_name
# if the module (or an ancestor) was named on the command line, or
# no modules were named (i.e., run all), import
# this module and add it to INSTALLED_APPS.
if not test_labels:
module_found_in_labels = True
else:
module_found_in_labels = any(
# exact match or ancestor match
module_label == label or module_label.startswith(label + '.')
for label in test_labels_set)
if module_name in CONTRIB_TESTS_TO_APPS and module_found_in_labels:
settings.INSTALLED_APPS.append(CONTRIB_TESTS_TO_APPS[module_name])
if module_found_in_labels and module_label not in installed_app_names:
if verbosity >= 2:
print("Importing application %s" % module_name)
settings.INSTALLED_APPS.append(module_label)
# Add contrib.gis to INSTALLED_APPS if needed (rather than requiring
# @override_settings(INSTALLED_APPS=...) on all test cases.
gis = 'django.contrib.gis'
if connection.features.gis_enabled and gis not in settings.INSTALLED_APPS:
if verbosity >= 2:
print("Importing application %s" % gis)
settings.INSTALLED_APPS.append(gis)
apps.set_installed_apps(settings.INSTALLED_APPS)
return state
def teardown(state):
try:
# Removing the temporary TMPDIR. Ensure we pass in unicode
# so that it will successfully remove temp trees containing
# non-ASCII filenames on Windows. (We're assuming the temp dir
# name itself does not contain non-ASCII characters.)
shutil.rmtree(six.text_type(TMPDIR))
except OSError:
print('Failed to remove temp directory: %s' % TMPDIR)
# Restore the old settings.
for key, value in state.items():
setattr(settings, key, value)
def django_tests(verbosity, interactive, failfast, keepdb, reverse, test_labels, debug_sql):
state = setup(verbosity, test_labels)
extra_tests = []
# Run the test suite, including the extra validation tests.
if not hasattr(settings, 'TEST_RUNNER'):
settings.TEST_RUNNER = 'django.test.runner.DiscoverRunner'
TestRunner = get_runner(settings)
test_runner = TestRunner(
verbosity=verbosity,
interactive=interactive,
failfast=failfast,
keepdb=keepdb,
reverse=reverse,
debug_sql=debug_sql,
)
failures = test_runner.run_tests(
test_labels or get_installed(),
extra_tests=extra_tests,
)
teardown(state)
return failures
def bisect_tests(bisection_label, options, test_labels):
state = setup(options.verbosity, test_labels)
test_labels = test_labels or get_installed()
print('***** Bisecting test suite: %s' % ' '.join(test_labels))
# Make sure the bisection point isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [bisection_label, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = [
sys.executable, upath(__file__), '--settings=%s' % options.settings]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
iteration = 1
while len(test_labels) > 1:
midpoint = len(test_labels) // 2
test_labels_a = test_labels[:midpoint] + [bisection_label]
test_labels_b = test_labels[midpoint:] + [bisection_label]
print('***** Pass %da: Running the first half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_a))
failures_a = subprocess.call(subprocess_args + test_labels_a)
print('***** Pass %db: Running the second half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_b))
print('')
failures_b = subprocess.call(subprocess_args + test_labels_b)
if failures_a and not failures_b:
print("***** Problem found in first half. Bisecting again...")
iteration = iteration + 1
test_labels = test_labels_a[:-1]
elif failures_b and not failures_a:
print("***** Problem found in second half. Bisecting again...")
iteration = iteration + 1
test_labels = test_labels_b[:-1]
elif failures_a and failures_b:
print("***** Multiple sources of failure found")
break
else:
print("***** No source of failure found... try pair execution (--pair)")
break
if len(test_labels) == 1:
print("***** Source of error: %s" % test_labels[0])
teardown(state)
def paired_tests(paired_test, options, test_labels):
state = setup(options.verbosity, test_labels)
test_labels = test_labels or get_installed()
print('***** Trying paired execution')
# Make sure the constant member of the pair isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [paired_test, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = [
sys.executable, upath(__file__), '--settings=%s' % options.settings]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
for i, label in enumerate(test_labels):
print('***** %d of %d: Check test pairing with %s' % (
i + 1, len(test_labels), label))
failures = subprocess.call(subprocess_args + [label, paired_test])
if failures:
print('***** Found problem pair with %s' % label)
return
print('***** No problem pair found')
teardown(state)
if __name__ == "__main__":
parser = ArgumentParser(description="Run the Django test suite.")
parser.add_argument('modules', nargs='*', metavar='module',
help='Optional path(s) to test modules; e.g. "i18n" or '
'"i18n.tests.TranslationTests.test_lazy_objects".')
parser.add_argument(
'-v', '--verbosity', default=1, type=int, choices=[0, 1, 2, 3],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output')
parser.add_argument(
'--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_argument(
'--failfast', action='store_true', dest='failfast', default=False,
help='Tells Django to stop running the test suite after first failed '
'test.')
parser.add_argument(
'-k', '--keepdb', action='store_true', dest='keepdb', default=False,
help='Tells Django to preserve the test database between runs.')
parser.add_argument(
'--settings',
help='Python path to settings module, e.g. "myproject.settings". If '
'this isn\'t provided, either the DJANGO_SETTINGS_MODULE '
'environment variable or "test_sqlite" will be used.')
parser.add_argument('--bisect',
help='Bisect the test suite to discover a test that causes a test '
'failure when combined with the named test.')
parser.add_argument('--pair',
help='Run the test suite in pairs with the named test to find problem '
'pairs.')
parser.add_argument('--reverse', action='store_true', default=False,
help='Sort test suites and test cases in opposite order to debug '
'test side effects not apparent with normal execution lineup.')
parser.add_argument('--liveserver',
help='Overrides the default address where the live server (used with '
'LiveServerTestCase) is expected to run from. The default value '
'is localhost:8081.')
parser.add_argument(
'--selenium', action='store_true', dest='selenium', default=False,
help='Run the Selenium tests as well (if Selenium is installed)')
parser.add_argument(
'--debug-sql', action='store_true', dest='debug_sql', default=False,
help='Turn on the SQL query logger within tests')
options = parser.parse_args()
# mock is a required dependency
try:
from django.test import mock # NOQA
except ImportError:
print(
"Please install test dependencies first: \n"
"$ pip install -r requirements/py%s.txt" % sys.version_info.major
)
sys.exit(1)
# Allow including a trailing slash on app_labels for tab completion convenience
options.modules = [os.path.normpath(labels) for labels in options.modules]
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
else:
if "DJANGO_SETTINGS_MODULE" not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = 'test_sqlite'
options.settings = os.environ['DJANGO_SETTINGS_MODULE']
if options.liveserver is not None:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = options.liveserver
if options.selenium:
os.environ['DJANGO_SELENIUM_TESTS'] = '1'
if options.bisect:
bisect_tests(options.bisect, options, options.modules)
elif options.pair:
paired_tests(options.pair, options, options.modules)
else:
failures = django_tests(options.verbosity, options.interactive,
options.failfast, options.keepdb,
options.reverse, options.modules,
options.debug_sql)
if failures:
sys.exit(bool(failures))
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: apk
short_description: Manages apk packages
description:
- Manages I(apk) packages for Alpine Linux.
author: "Kevin Brebanov (@kbrebanov)"
version_added: "2.0"
options:
available:
description:
- During upgrade, reset versioned world dependencies and change logic to prefer replacing or downgrading packages (instead of holding them)
if the currently installed package is no longer available from any repository.
required: false
default: no
choices: [ "yes", "no" ]
version_added: "2.4"
name:
description:
- A package name, like C(foo), or multiple packages, like C(foo, bar).
required: false
default: null
repository:
description:
- A package repository or multiple repositories.
Unlike with the underlying apk command, this list will override the system repositories rather than supplement them.
required: false
default: null
version_added: "2.4"
state:
description:
- Indicates the desired package(s) state.
- C(present) ensures the package(s) is/are present.
- C(absent) ensures the package(s) is/are absent.
- C(latest) ensures the package(s) is/are present and the latest version(s).
required: false
default: present
choices: [ "present", "absent", "latest" ]
update_cache:
description:
- Update repository indexes. Can be run with other steps or on it's own.
required: false
default: no
choices: [ "yes", "no" ]
upgrade:
description:
- Upgrade all installed packages to their latest version.
required: false
default: no
choices: [ "yes", "no" ]
notes:
- '"name" and "upgrade" are mutually exclusive.'
- When used with a `loop:` each package will be processed individually, it is much more efficient to pass the list directly to the `name` option.
'''
EXAMPLES = '''
- apk:
name: foo
update_cache: yes
- apk:
name: foo,bar
update_cache: yes
- apk:
name: foo
state: absent
- apk:
name: foo,bar
state: absent
- apk:
name: foo
state: present
- apk:
name: foo,bar
state: present
- apk:
name: foo
state: latest
update_cache: yes
- apk:
name: foo,bar
state: latest
update_cache: yes
- apk:
upgrade: yes
- apk:
available: yes
upgrade: yes
- apk:
update_cache: yes
- apk:
name: foo
state: latest
update_cache: yes
repository: http://dl-3.alpinelinux.org/alpine/edge/main
'''
RETURN = '''
packages:
description: a list of packages that have been changed
returned: when packages have changed
type: list
sample: ['package', 'other-package']
'''
import re
from ansible.module_utils.basic import AnsibleModule
def parse_for_packages(stdout):
packages = []
data = stdout.split('\n')
regex = re.compile(r'^\(\d+/\d+\)\s+\S+\s+(\S+)')
for l in data:
p = regex.search(l)
if p:
packages.append(p.group(1))
return packages
def update_package_db(module, exit):
cmd = "%s update" % (APK_PATH)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="could not update package db", stdout=stdout, stderr=stderr)
elif exit:
module.exit_json(changed=True, msg='updated repository indexes', stdout=stdout, stderr=stderr)
else:
return True
def query_toplevel(module, name):
# /etc/apk/world contains a list of top-level packages separated by ' ' or \n
# packages may contain repository (@) or version (=<>~) separator characters or start with negation !
regex = re.compile(r'^' + re.escape(name) + r'([@=<>~].+)?$')
with open('/etc/apk/world') as f:
content = f.read().split()
for p in content:
if regex.search(p):
return True
return False
def query_package(module, name):
cmd = "%s -v info --installed %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
return True
else:
return False
def query_latest(module, name):
cmd = "%s version %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
search_pattern = r"(%s)-[\d\.\w]+-[\d\w]+\s+(.)\s+[\d\.\w]+-[\d\w]+\s+" % (re.escape(name))
match = re.search(search_pattern, stdout)
if match and match.group(2) == "<":
return False
return True
def query_virtual(module, name):
cmd = "%s -v info --description %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
search_pattern = r"^%s: virtual meta package" % (re.escape(name))
if re.search(search_pattern, stdout):
return True
return False
def get_dependencies(module, name):
cmd = "%s -v info --depends %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
dependencies = stdout.split()
if len(dependencies) > 1:
return dependencies[1:]
else:
return []
def upgrade_packages(module, available):
if module.check_mode:
cmd = "%s upgrade --simulate" % (APK_PATH)
else:
cmd = "%s upgrade" % (APK_PATH)
if available:
cmd = "%s --available" % cmd
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
packagelist = parse_for_packages(stdout)
if rc != 0:
module.fail_json(msg="failed to upgrade packages", stdout=stdout, stderr=stderr, packages=packagelist)
if re.search(r'^OK', stdout):
module.exit_json(changed=False, msg="packages already upgraded", stdout=stdout, stderr=stderr, packages=packagelist)
module.exit_json(changed=True, msg="upgraded packages", stdout=stdout, stderr=stderr, packages=packagelist)
def install_packages(module, names, state):
upgrade = False
to_install = []
to_upgrade = []
for name in names:
# Check if virtual package
if query_virtual(module, name):
# Get virtual package dependencies
dependencies = get_dependencies(module, name)
for dependency in dependencies:
if state == 'latest' and not query_latest(module, dependency):
to_upgrade.append(dependency)
else:
if not query_toplevel(module, name):
to_install.append(name)
elif state == 'latest' and not query_latest(module, name):
to_upgrade.append(name)
if to_upgrade:
upgrade = True
if not to_install and not upgrade:
module.exit_json(changed=False, msg="package(s) already installed")
packages = " ".join(to_install + to_upgrade)
if upgrade:
if module.check_mode:
cmd = "%s add --upgrade --simulate %s" % (APK_PATH, packages)
else:
cmd = "%s add --upgrade %s" % (APK_PATH, packages)
else:
if module.check_mode:
cmd = "%s add --simulate %s" % (APK_PATH, packages)
else:
cmd = "%s add %s" % (APK_PATH, packages)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
packagelist = parse_for_packages(stdout)
if rc != 0:
module.fail_json(msg="failed to install %s" % (packages), stdout=stdout, stderr=stderr, packages=packagelist)
module.exit_json(changed=True, msg="installed %s package(s)" % (packages), stdout=stdout, stderr=stderr, packages=packagelist)
def remove_packages(module, names):
installed = []
for name in names:
if query_package(module, name):
installed.append(name)
if not installed:
module.exit_json(changed=False, msg="package(s) already removed")
names = " ".join(installed)
if module.check_mode:
cmd = "%s del --purge --simulate %s" % (APK_PATH, names)
else:
cmd = "%s del --purge %s" % (APK_PATH, names)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
packagelist = parse_for_packages(stdout)
# Check to see if packages are still present because of dependencies
for name in installed:
if query_package(module, name):
rc = 1
break
if rc != 0:
module.fail_json(msg="failed to remove %s package(s)" % (names), stdout=stdout, stderr=stderr, packages=packagelist)
module.exit_json(changed=True, msg="removed %s package(s)" % (names), stdout=stdout, stderr=stderr, packages=packagelist)
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'installed', 'absent', 'removed', 'latest']),
name=dict(type='list'),
repository=dict(type='list'),
update_cache=dict(default='no', type='bool'),
upgrade=dict(default='no', type='bool'),
available=dict(default='no', type='bool'),
),
required_one_of=[['name', 'update_cache', 'upgrade']],
mutually_exclusive=[['name', 'upgrade']],
supports_check_mode=True
)
# Set LANG env since we parse stdout
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
global APK_PATH
APK_PATH = module.get_bin_path('apk', required=True)
p = module.params
# add repositories to the APK_PATH
if p['repository']:
for r in p['repository']:
APK_PATH = "%s --repository %s --repositories-file /dev/null" % (APK_PATH, r)
# normalize the state parameter
if p['state'] in ['present', 'installed']:
p['state'] = 'present'
if p['state'] in ['absent', 'removed']:
p['state'] = 'absent'
if p['update_cache']:
update_package_db(module, not p['name'] and not p['upgrade'])
if p['upgrade']:
upgrade_packages(module, p['available'])
if p['state'] in ['present', 'latest']:
install_packages(module, p['name'], p['state'])
elif p['state'] == 'absent':
remove_packages(module, p['name'])
if __name__ == '__main__':
main()
|
"""The tests for the Rfxtrx component."""
import unittest
import pytest
from homeassistant.core import callback
from homeassistant.bootstrap import setup_component
from homeassistant.components import rfxtrx as rfxtrx
from tests.common import get_test_home_assistant
@pytest.mark.skipif("os.environ.get('RFXTRX') != 'RUN'")
class TestRFXTRX(unittest.TestCase):
"""Test the Rfxtrx component."""
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self):
"""Stop everything that was started."""
rfxtrx.RECEIVED_EVT_SUBSCRIBERS = []
rfxtrx.RFX_DEVICES = {}
if rfxtrx.RFXOBJECT:
rfxtrx.RFXOBJECT.close_connection()
self.hass.stop()
def test_default_config(self):
"""Test configuration."""
self.assertTrue(setup_component(self.hass, 'rfxtrx', {
'rfxtrx': {
'device': '/dev/serial/by-id/usb' +
'-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0',
'dummy': True}
}))
self.assertTrue(setup_component(self.hass, 'sensor', {
'sensor': {'platform': 'rfxtrx',
'automatic_add': True,
'devices': {}}}))
self.assertEqual(len(rfxtrx.RFXOBJECT.sensors()), 2)
def test_valid_config(self):
"""Test configuration."""
self.assertTrue(setup_component(self.hass, 'rfxtrx', {
'rfxtrx': {
'device': '/dev/serial/by-id/usb' +
'-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0',
'dummy': True}}))
self.hass.config.components.remove('rfxtrx')
self.assertTrue(setup_component(self.hass, 'rfxtrx', {
'rfxtrx': {
'device': '/dev/serial/by-id/usb' +
'-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0',
'dummy': True,
'debug': True}}))
def test_invalid_config(self):
"""Test configuration."""
self.assertFalse(setup_component(self.hass, 'rfxtrx', {
'rfxtrx': {}
}))
self.assertFalse(setup_component(self.hass, 'rfxtrx', {
'rfxtrx': {
'device': '/dev/serial/by-id/usb' +
'-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0',
'invalid_key': True}}))
def test_fire_event(self):
"""Test fire event."""
self.assertTrue(setup_component(self.hass, 'rfxtrx', {
'rfxtrx': {
'device': '/dev/serial/by-id/usb' +
'-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0',
'dummy': True}
}))
self.assertTrue(setup_component(self.hass, 'switch', {
'switch': {'platform': 'rfxtrx',
'automatic_add': True,
'devices':
{'0b1100cd0213c7f210010f51': {
'name': 'Test',
rfxtrx.ATTR_FIREEVENT: True}
}}}))
calls = []
@callback
def record_event(event):
"""Add recorded event to set."""
calls.append(event)
self.hass.bus.listen(rfxtrx.EVENT_BUTTON_PRESSED, record_event)
self.hass.block_till_done()
entity = rfxtrx.RFX_DEVICES['213c7f216']
self.assertEqual('Test', entity.name)
self.assertEqual('off', entity.state)
self.assertTrue(entity.should_fire_event)
event = rfxtrx.get_rfx_object('0b1100cd0213c7f210010f51')
event.data = bytearray([0x0b, 0x11, 0x00, 0x10, 0x01, 0x18,
0xcd, 0xea, 0x01, 0x01, 0x0f, 0x70])
rfxtrx.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.hass.block_till_done()
self.assertEqual(event.values['Command'], "On")
self.assertEqual('on', entity.state)
self.assertEqual(self.hass.states.get('switch.test').state, 'on')
self.assertEqual(1, len(calls))
self.assertEqual(calls[0].data,
{'entity_id': 'switch.test', 'state': 'on'})
def test_fire_event_sensor(self):
"""Test fire event."""
self.assertTrue(setup_component(self.hass, 'rfxtrx', {
'rfxtrx': {
'device': '/dev/serial/by-id/usb' +
'-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0',
'dummy': True}
}))
self.assertTrue(setup_component(self.hass, 'sensor', {
'sensor': {'platform': 'rfxtrx',
'automatic_add': True,
'devices':
{'0a520802060100ff0e0269': {
'name': 'Test',
rfxtrx.ATTR_FIREEVENT: True}
}}}))
calls = []
@callback
def record_event(event):
"""Add recorded event to set."""
calls.append(event)
self.hass.bus.listen("signal_received", record_event)
self.hass.block_till_done()
event = rfxtrx.get_rfx_object('0a520802060101ff0f0269')
event.data = bytearray(b'\nR\x08\x01\x07\x01\x00\xb8\x1b\x02y')
rfxtrx.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.hass.block_till_done()
self.assertEqual(1, len(calls))
self.assertEqual(calls[0].data,
{'entity_id': 'sensor.test'})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.