gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
#!/usr/bin/env python
'''
a2x - A toolchain manager for AsciiDoc (converts Asciidoc text files to other
file formats)
Copyright: Stuart Rackham (c) 2009
License: MIT
Email: srackham@gmail.com
'''
import os
import fnmatch
import HTMLParser
import re
import shutil
import subprocess
import sys
import traceback
import urlparse
import zipfile
import xml.dom.minidom
import mimetypes
PROG = os.path.basename(os.path.splitext(__file__)[0])
VERSION = '8.6.6'
# AsciiDoc global configuration file directory.
# NOTE: CONF_DIR is "fixed up" by Makefile -- don't rename or change syntax.
CONF_DIR = '/usr/local/etc/asciidoc'
######################################################################
# Default configuration file parameters.
######################################################################
# Optional environment variable dictionary passed to
# executing programs. If set to None the existing
# environment is used.
ENV = None
# External executables.
ASCIIDOC = 'asciidoc'
XSLTPROC = 'xsltproc'
DBLATEX = 'dblatex' # pdf generation.
FOP = 'fop' # pdf generation (--fop option).
W3M = 'w3m' # text generation.
LYNX = 'lynx' # text generation (if no w3m).
XMLLINT = 'xmllint' # Set to '' to disable.
EPUBCHECK = 'epubcheck' # Set to '' to disable.
# External executable default options.
ASCIIDOC_OPTS = ''
DBLATEX_OPTS = ''
FOP_OPTS = ''
XSLTPROC_OPTS = ''
######################################################################
# End of configuration file parameters.
######################################################################
#####################################################################
# Utility functions
#####################################################################
OPTIONS = None # These functions read verbose and dry_run command options.
def errmsg(msg):
sys.stderr.write('%s: %s\n' % (PROG,msg))
def warning(msg):
errmsg('WARNING: %s' % msg)
def infomsg(msg):
print '%s: %s' % (PROG,msg)
def die(msg, exit_code=1):
errmsg('ERROR: %s' % msg)
sys.exit(exit_code)
def trace():
"""Print traceback to stderr."""
errmsg('-'*60)
traceback.print_exc(file=sys.stderr)
errmsg('-'*60)
def verbose(msg):
if OPTIONS.verbose or OPTIONS.dry_run:
infomsg(msg)
class AttrDict(dict):
"""
Like a dictionary except values can be accessed as attributes i.e. obj.foo
can be used in addition to obj['foo'].
If self._default has been set then it will be returned if a non-existant
attribute is accessed (instead of raising an AttributeError).
"""
def __getattr__(self, key):
try:
return self[key]
except KeyError, k:
if self.has_key('_default'):
return self['_default']
else:
raise AttributeError, k
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
try: del self[key]
except KeyError, k: raise AttributeError, k
def __repr__(self):
return '<AttrDict ' + dict.__repr__(self) + '>'
def __getstate__(self):
return dict(self)
def __setstate__(self,value):
for k,v in value.items(): self[k]=v
def isexecutable(file_name):
return os.path.isfile(file_name) and os.access(file_name, os.X_OK)
def find_executable(file_name):
'''
Search for executable file_name in the system PATH.
Return full path name or None if not found.
'''
def _find_executable(file_name):
if os.path.split(file_name)[0] != '':
# file_name includes directory so don't search path.
if not isexecutable(file_name):
return None
else:
return file_name
for p in os.environ.get('PATH', os.defpath).split(os.pathsep):
f = os.path.join(p, file_name)
if isexecutable(f):
return os.path.realpath(f)
return None
if os.name == 'nt' and os.path.splitext(file_name)[1] == '':
for ext in ('.cmd','.bat','.exe'):
result = _find_executable(file_name + ext)
if result: break
else:
result = _find_executable(file_name)
return result
def shell_cd(path):
verbose('chdir %s' % path)
if not OPTIONS.dry_run:
os.chdir(path)
def shell_makedirs(path):
if os.path.isdir(path):
return
verbose('creating %s' % path)
if not OPTIONS.dry_run:
os.makedirs(path)
def shell_copy(src, dst):
verbose('copying "%s" to "%s"' % (src,dst))
if not OPTIONS.dry_run:
shutil.copy(src, dst)
def shell_rm(path):
if not os.path.exists(path):
return
verbose('deleting %s' % path)
if not OPTIONS.dry_run:
os.unlink(path)
def shell_rmtree(path):
if not os.path.isdir(path):
return
verbose('deleting %s' % path)
if not OPTIONS.dry_run:
shutil.rmtree(path)
def shell(cmd, raise_error=True):
'''
Execute command cmd in shell and return resulting subprocess.Popen object.
If raise_error is True then a non-zero return terminates the application.
'''
if os.name == 'nt':
# TODO: this is probably unnecessary, see:
# http://groups.google.com/group/asciidoc/browse_frm/thread/9442ee0c419f1242
# Windows doesn't like running scripts directly so explicitly
# specify python interpreter.
# Extract first (quoted or unquoted) argument.
mo = re.match(r'^\s*"\s*(?P<arg0>[^"]+)\s*"', cmd)
if not mo:
mo = re.match(r'^\s*(?P<arg0>[^ ]+)', cmd)
if mo.group('arg0').endswith('.py'):
cmd = 'python ' + cmd
# Remove redundant quoting -- this is not just costmetic, quoting seems to
# dramatically decrease the allowed command length in Windows XP.
cmd = re.sub(r'"([^ ]+?)"', r'\1', cmd)
verbose('executing: %s' % cmd)
if OPTIONS.dry_run:
return
if OPTIONS.verbose:
stdout = stderr = None
else:
stdout = stderr = subprocess.PIPE
try:
popen = subprocess.Popen(cmd, stdout=stdout, stderr=stderr,
shell=True, env=ENV)
except OSError, e:
die('failed: %s: %s' % (cmd, e))
popen.wait()
if popen.returncode != 0 and raise_error:
die('%s returned non-zero exit status %d' % (cmd, popen.returncode))
return popen
def find_resources(files, tagname, attrname, filter=None):
'''
Search all files and return a list of local URIs from attrname attribute
values in tagname tags.
Handles HTML open and XHTML closed tags.
Non-local URIs are skipped.
files can be a file name or a list of file names.
The filter function takes a dictionary of tag attributes and returns True if
the URI is to be included.
'''
class FindResources(HTMLParser.HTMLParser):
# Nested parser class shares locals with enclosing function.
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
def handle_starttag(self, tag, attrs):
attrs = dict(attrs)
if tag == tagname and (filter is None or filter(attrs)):
# Accept only local URIs.
uri = urlparse.urlparse(attrs[attrname])
if uri[0] in ('','file') and not uri[1] and uri[2]:
result.append(uri[2])
if isinstance(files, str):
files = [files]
result = []
for f in files:
verbose('finding resources in: %s' % f)
if OPTIONS.dry_run:
continue
parser = FindResources()
# HTMLParser has problems with non-ASCII strings.
# See http://bugs.python.org/issue3932
mo = re.search(r'^<\?xml.* encoding="(.*?)"', open(f).readline())
if mo:
encoding = mo.group(1)
parser.feed(open(f).read().decode(encoding))
else:
parser.feed(open(f).read())
parser.close()
result = list(set(result)) # Drop duplicate values.
result.sort()
return result
# NOT USED.
def copy_files(files, src_dir, dst_dir):
'''
Copy list of relative file names from src_dir to dst_dir.
'''
for f in files:
f = os.path.normpath(f)
if os.path.isabs(f):
continue
src = os.path.join(src_dir, f)
dst = os.path.join(dst_dir, f)
if not os.path.exists(dst):
if not os.path.isfile(src):
warning('missing file: %s' % src)
continue
dstdir = os.path.dirname(dst)
shell_makedirs(dstdir)
shell_copy(src, dst)
def find_files(path, pattern):
'''
Return list of file names matching pattern in directory path.
'''
result = []
for (p,dirs,files) in os.walk(path):
for f in files:
if fnmatch.fnmatch(f, pattern):
result.append(os.path.normpath(os.path.join(p,f)))
return result
def exec_xsltproc(xsl_file, xml_file, dst_dir, opts = ''):
cwd = os.getcwd()
shell_cd(dst_dir)
try:
shell('"%s" %s "%s" "%s"' % (XSLTPROC, opts, xsl_file, xml_file))
finally:
shell_cd(cwd)
def get_source_options(asciidoc_file):
'''
Look for a2x command options in AsciiDoc source file.
Limitation: options cannot contain double-quote characters.
'''
def parse_options():
# Parse options to result sequence.
inquotes = False
opt = ''
for c in options:
if c == '"':
if inquotes:
result.append(opt)
opt = ''
inquotes = False
else:
inquotes = True
elif c == ' ':
if inquotes:
opt += c
elif opt:
result.append(opt)
opt = ''
else:
opt += c
if opt:
result.append(opt)
result = []
if os.path.isfile(asciidoc_file):
options = ''
for line in open(asciidoc_file):
mo = re.search(r'^//\s*a2x:', line)
if mo:
options += ' ' + line[mo.end():].strip()
parse_options()
return result
#####################################################################
# Application class
#####################################################################
class A2X(AttrDict):
'''
a2x options and conversion functions.
'''
def execute(self):
'''
Process a2x command.
'''
self.process_options()
# Append configuration file options.
self.asciidoc_opts += ' ' + ASCIIDOC_OPTS
self.dblatex_opts += ' ' + DBLATEX_OPTS
self.fop_opts += ' ' + FOP_OPTS
self.xsltproc_opts += ' ' + XSLTPROC_OPTS
# Execute to_* functions.
self.__getattribute__('to_'+self.format)()
if not (self.keep_artifacts or self.format == 'docbook' or self.skip_asciidoc):
shell_rm(self.dst_path('.xml'))
def load_conf(self):
'''
Load a2x configuration file from default locations and --conf-file
option.
'''
global ASCIIDOC
CONF_FILE = 'a2x.conf'
a2xdir = os.path.dirname(os.path.realpath(__file__))
conf_files = []
# From a2x.py directory.
conf_files.append(os.path.join(a2xdir, CONF_FILE))
# If the asciidoc executable and conf files are in the a2x directory
# then use the local copy of asciidoc and skip the global a2x conf.
asciidoc = os.path.join(a2xdir, 'asciidoc.py')
asciidoc_conf = os.path.join(a2xdir, 'asciidoc.conf')
if os.path.isfile(asciidoc) and os.path.isfile(asciidoc_conf):
self.asciidoc = asciidoc
else:
self.asciidoc = None
# From global conf directory.
conf_files.append(os.path.join(CONF_DIR, CONF_FILE))
# From $HOME directory.
home_dir = os.environ.get('HOME')
if home_dir is not None:
conf_files.append(os.path.join(home_dir, '.asciidoc', CONF_FILE))
# From --conf-file option.
if self.conf_file is not None:
if not os.path.isfile(self.conf_file):
die('missing configuration file: %s' % self.conf_file)
conf_files.append(self.conf_file)
# From --xsl-file option.
if self.xsl_file is not None:
if not os.path.isfile(self.xsl_file):
die('missing XSL file: %s' % self.xsl_file)
self.xsl_file = os.path.abspath(self.xsl_file)
# Load ordered files.
for f in conf_files:
if os.path.isfile(f):
verbose('loading conf file: %s' % f)
execfile(f, globals())
# If asciidoc is not local to a2x then search the PATH.
if not self.asciidoc:
self.asciidoc = find_executable(ASCIIDOC)
if not self.asciidoc:
die('unable to find asciidoc: %s' % ASCIIDOC)
def process_options(self):
'''
Validate and command options and set defaults.
'''
if not os.path.isfile(self.asciidoc_file):
die('missing SOURCE_FILE: %s' % self.asciidoc_file)
self.asciidoc_file = os.path.abspath(self.asciidoc_file)
if not self.destination_dir:
self.destination_dir = os.path.dirname(self.asciidoc_file)
else:
if not os.path.isdir(self.destination_dir):
die('missing --destination-dir: %s' % self.destination_dir)
self.destination_dir = os.path.abspath(self.destination_dir)
self.resource_dirs = []
self.resource_files = []
if self.resource_manifest:
if not os.path.isfile(self.resource_manifest):
die('missing --resource-manifest: %s' % self.resource_manifest)
for r in open(self.resource_manifest):
self.resources.append(r.strip())
for r in self.resources:
r = os.path.expanduser(r)
r = os.path.expandvars(r)
if r.endswith(('/','\\')):
if os.path.isdir(r):
self.resource_dirs.append(r)
else:
die('missing resource directory: %s' % r)
elif os.path.isdir(r):
self.resource_dirs.append(r)
elif r.startswith('.') and '=' in r:
ext, mimetype = r.split('=')
mimetypes.add_type(mimetype, ext)
else:
self.resource_files.append(r)
for p in (os.path.dirname(self.asciidoc), CONF_DIR):
for d in ('images','stylesheets'):
d = os.path.join(p,d)
if os.path.isdir(d):
self.resource_dirs.append(d)
verbose('resource files: %s' % self.resource_files)
verbose('resource directories: %s' % self.resource_dirs)
if not self.doctype and self.format == 'manpage':
self.doctype = 'manpage'
if self.doctype:
self.asciidoc_opts += ' --doctype %s' % self.doctype
for attr in self.attributes:
self.asciidoc_opts += ' --attribute "%s"' % attr
# self.xsltproc_opts += ' --nonet'
if self.verbose:
self.asciidoc_opts += ' --verbose'
self.dblatex_opts += ' -V'
if self.icons or self.icons_dir:
params = [
'callout.graphics 1',
'navig.graphics 1',
'admon.textlabel 0',
'admon.graphics 1',
]
if self.icons_dir:
params += [
'admon.graphics.path "%s/"' % self.icons_dir,
'callout.graphics.path "%s/callouts/"' % self.icons_dir,
'navig.graphics.path "%s/"' % self.icons_dir,
]
else:
params = [
'callout.graphics 0',
'navig.graphics 0',
'admon.textlabel 1',
'admon.graphics 0',
]
if self.stylesheet:
params += ['html.stylesheet "%s"' % self.stylesheet]
if self.format == 'htmlhelp':
params += ['htmlhelp.chm "%s"' % self.basename('.chm'),
'htmlhelp.hhp "%s"' % self.basename('.hhp'),
'htmlhelp.hhk "%s"' % self.basename('.hhk'),
'htmlhelp.hhc "%s"' % self.basename('.hhc')]
if self.doctype == 'book':
params += ['toc.section.depth 1']
# Books are chunked at chapter level.
params += ['chunk.section.depth 0']
for o in params:
if o.split()[0]+' ' not in self.xsltproc_opts:
self.xsltproc_opts += ' --stringparam ' + o
if self.fop_opts:
self.fop = True
if os.path.splitext(self.asciidoc_file)[1].lower() == '.xml':
self.skip_asciidoc = True
else:
self.skip_asciidoc = False
def dst_path(self, ext):
'''
Return name of file or directory in the destination directory with
the same name as the asciidoc source file but with extension ext.
'''
return os.path.join(self.destination_dir, self.basename(ext))
def basename(self, ext):
'''
Return the base name of the asciidoc source file but with extension
ext.
'''
return os.path.basename(os.path.splitext(self.asciidoc_file)[0]) + ext
def asciidoc_conf_file(self, path):
'''
Return full path name of file in asciidoc configuration files directory.
Search first the directory containing the asciidoc executable then
the global configuration file directory.
'''
f = os.path.join(os.path.dirname(self.asciidoc), path)
if not os.path.isfile(f):
f = os.path.join(CONF_DIR, path)
if not os.path.isfile(f):
die('missing configuration file: %s' % f)
return os.path.normpath(f)
def xsl_stylesheet(self, file_name=None):
'''
Return full path name of file in asciidoc docbook-xsl configuration
directory.
If an XSL file was specified with the --xsl-file option then it is
returned.
'''
if self.xsl_file is not None:
return self.xsl_file
if not file_name:
file_name = self.format + '.xsl'
return self.asciidoc_conf_file(os.path.join('docbook-xsl', file_name))
def copy_resources(self, html_files, src_dir, dst_dir, resources=[]):
'''
Search html_files for images and CSS resource URIs (html_files can be a
list of file names or a single file name).
Copy them from the src_dir to the dst_dir.
If not found in src_dir then recursively search all specified
resource directories.
Optional additional resources files can be passed in the resources list.
'''
resources = resources[:]
resources += find_resources(html_files, 'link', 'href',
lambda attrs: attrs.get('type') == 'text/css')
resources += find_resources(html_files, 'img', 'src')
resources += self.resource_files
resources = list(set(resources)) # Drop duplicates.
resources.sort()
for f in resources:
if '=' in f:
src, dst = f.split('=')
if not dst:
dst = src
else:
src = dst = f
src = os.path.normpath(src)
dst = os.path.normpath(dst)
if os.path.isabs(dst):
die('absolute resource file name: %s' % dst)
if dst.startswith(os.pardir):
die('resource file outside destination directory: %s' % dst)
src = os.path.join(src_dir, src)
dst = os.path.join(dst_dir, dst)
if not os.path.isfile(src):
for d in self.resource_dirs:
d = os.path.join(src_dir, d)
found = find_files(d, os.path.basename(src))
if found:
src = found[0]
break
else:
if not os.path.isfile(dst):
die('missing resource: %s' % src)
continue
# Arrive here if resource file has been found.
if os.path.normpath(src) != os.path.normpath(dst):
dstdir = os.path.dirname(dst)
shell_makedirs(dstdir)
shell_copy(src, dst)
def to_docbook(self):
'''
Use asciidoc to convert asciidoc_file to DocBook.
args is a string containing additional asciidoc arguments.
'''
docbook_file = self.dst_path('.xml')
if self.skip_asciidoc:
if not os.path.isfile(docbook_file):
die('missing docbook file: %s' % docbook_file)
return
shell('"%s" --backend docbook -a "a2x-format=%s" %s --out-file "%s" "%s"' %
(self.asciidoc, self.format, self.asciidoc_opts, docbook_file, self.asciidoc_file))
if not self.no_xmllint and XMLLINT:
shell('"%s" --nonet --noout --valid "%s"' % (XMLLINT, docbook_file))
def to_xhtml(self):
self.to_docbook()
docbook_file = self.dst_path('.xml')
xhtml_file = self.dst_path('.html')
opts = '%s --output "%s"' % (self.xsltproc_opts, xhtml_file)
exec_xsltproc(self.xsl_stylesheet(), docbook_file, self.destination_dir, opts)
src_dir = os.path.dirname(self.asciidoc_file)
self.copy_resources(xhtml_file, src_dir, self.destination_dir)
def to_manpage(self):
self.to_docbook()
docbook_file = self.dst_path('.xml')
opts = self.xsltproc_opts
exec_xsltproc(self.xsl_stylesheet(), docbook_file, self.destination_dir, opts)
def to_pdf(self):
if self.fop:
self.exec_fop()
else:
self.exec_dblatex()
def exec_fop(self):
self.to_docbook()
docbook_file = self.dst_path('.xml')
xsl = self.xsl_stylesheet('fo.xsl')
fo = self.dst_path('.fo')
pdf = self.dst_path('.pdf')
opts = '%s --output "%s"' % (self.xsltproc_opts, fo)
exec_xsltproc(xsl, docbook_file, self.destination_dir, opts)
shell('"%s" %s -fo "%s" -pdf "%s"' % (FOP, self.fop_opts, fo, pdf))
if not self.keep_artifacts:
shell_rm(fo)
def exec_dblatex(self):
self.to_docbook()
docbook_file = self.dst_path('.xml')
xsl = self.asciidoc_conf_file(os.path.join('dblatex','asciidoc-dblatex.xsl'))
sty = self.asciidoc_conf_file(os.path.join('dblatex','asciidoc-dblatex.sty'))
shell('"%s" -t %s -p "%s" -s "%s" %s "%s"' %
(DBLATEX, self.format, xsl, sty, self.dblatex_opts, docbook_file))
def to_dvi(self):
self.exec_dblatex()
def to_ps(self):
self.exec_dblatex()
def to_tex(self):
self.exec_dblatex()
def to_htmlhelp(self):
self.to_chunked()
def to_chunked(self):
self.to_docbook()
docbook_file = self.dst_path('.xml')
opts = self.xsltproc_opts
xsl_file = self.xsl_stylesheet()
if self.format == 'chunked':
dst_dir = self.dst_path('.chunked')
elif self.format == 'htmlhelp':
dst_dir = self.dst_path('.htmlhelp')
if not 'base.dir ' in opts:
opts += ' --stringparam base.dir "%s/"' % os.path.basename(dst_dir)
# Create content.
shell_rmtree(dst_dir)
shell_makedirs(dst_dir)
exec_xsltproc(xsl_file, docbook_file, self.destination_dir, opts)
html_files = find_files(dst_dir, '*.html')
src_dir = os.path.dirname(self.asciidoc_file)
self.copy_resources(html_files, src_dir, dst_dir)
def update_epub_manifest(self, opf_file):
'''
Scan the OEBPS directory for any files that have not been registered in
the OPF manifest then add them to the manifest.
'''
opf_dir = os.path.dirname(opf_file)
resource_files = []
for (p,dirs,files) in os.walk(os.path.dirname(opf_file)):
for f in files:
f = os.path.join(p,f)
if os.path.isfile(f):
assert f.startswith(opf_dir)
f = '.' + f[len(opf_dir):]
f = os.path.normpath(f)
if f not in ['content.opf']:
resource_files.append(f)
opf = xml.dom.minidom.parseString(open(opf_file).read())
manifest_files = []
manifest = opf.getElementsByTagName('manifest')[0]
for el in manifest.getElementsByTagName('item'):
f = el.getAttribute('href')
f = os.path.normpath(f)
manifest_files.append(f)
count = 0
for f in resource_files:
if f not in manifest_files:
count += 1
verbose('adding to manifest: %s' % f)
item = opf.createElement('item')
item.setAttribute('href', f.replace(os.path.sep, '/'))
item.setAttribute('id', 'a2x-%d' % count)
mimetype = mimetypes.guess_type(f)[0]
if mimetype is None:
die('unknown mimetype: %s' % f)
item.setAttribute('media-type', mimetype)
manifest.appendChild(item)
if count > 0:
open(opf_file, 'w').write(opf.toxml())
def to_epub(self):
self.to_docbook()
xsl_file = self.xsl_stylesheet()
docbook_file = self.dst_path('.xml')
epub_file = self.dst_path('.epub')
build_dir = epub_file + '.d'
shell_rmtree(build_dir)
shell_makedirs(build_dir)
# Create content.
exec_xsltproc(xsl_file, docbook_file, build_dir, self.xsltproc_opts)
# Copy resources referenced in the OPF and resources referenced by the
# generated HTML (in theory DocBook XSL should ensure they are
# identical but this is not always the case).
src_dir = os.path.dirname(self.asciidoc_file)
dst_dir = os.path.join(build_dir, 'OEBPS')
opf_file = os.path.join(dst_dir, 'content.opf')
opf_resources = find_resources(opf_file, 'item', 'href')
html_files = find_files(dst_dir, '*.html')
self.copy_resources(html_files, src_dir, dst_dir, opf_resources)
# Register any unregistered resources.
self.update_epub_manifest(opf_file)
# Build epub archive.
cwd = os.getcwd()
shell_cd(build_dir)
try:
if not self.dry_run:
zip = zipfile.ZipFile(epub_file, 'w')
try:
# Create and add uncompressed mimetype file.
verbose('archiving: mimetype')
open('mimetype','w').write('application/epub+zip')
zip.write('mimetype', compress_type=zipfile.ZIP_STORED)
# Compress all remaining files.
for (p,dirs,files) in os.walk('.'):
for f in files:
f = os.path.normpath(os.path.join(p,f))
if f != 'mimetype':
verbose('archiving: %s' % f)
zip.write(f, compress_type=zipfile.ZIP_DEFLATED)
finally:
zip.close()
verbose('created archive: %s' % epub_file)
finally:
shell_cd(cwd)
if not self.keep_artifacts:
shell_rmtree(build_dir)
if self.epubcheck and EPUBCHECK:
if not find_executable(EPUBCHECK):
warning('epubcheck skipped: unable to find executable: %s' % EPUBCHECK)
else:
shell('"%s" "%s"' % (EPUBCHECK, epub_file))
def to_text(self):
text_file = self.dst_path('.text')
html_file = self.dst_path('.text.html')
if self.lynx:
shell('"%s" %s --conf-file "%s" -b html4 -a "a2x-format=%s" -o "%s" "%s"' %
(self.asciidoc, self.asciidoc_opts, self.asciidoc_conf_file('text.conf'),
self.format, html_file, self.asciidoc_file))
shell('"%s" -dump "%s" > "%s"' %
(LYNX, html_file, text_file))
else:
# Use w3m(1).
self.to_docbook()
docbook_file = self.dst_path('.xml')
opts = '%s --output "%s"' % (self.xsltproc_opts, html_file)
exec_xsltproc(self.xsl_stylesheet(), docbook_file,
self.destination_dir, opts)
shell('"%s" -cols 70 -dump -T text/html -no-graph "%s" > "%s"' %
(W3M, html_file, text_file))
if not self.keep_artifacts:
shell_rm(html_file)
#####################################################################
# Script main line.
#####################################################################
if __name__ == '__main__':
description = '''A toolchain manager for AsciiDoc (converts Asciidoc text files to other file formats)'''
from optparse import OptionParser
parser = OptionParser(usage='usage: %prog [OPTIONS] SOURCE_FILE',
version='%s %s' % (PROG,VERSION),
description=description)
parser.add_option('-a', '--attribute',
action='append', dest='attributes', default=[], metavar='ATTRIBUTE',
help='set asciidoc attribute value')
parser.add_option('--asciidoc-opts',
action='append', dest='asciidoc_opts', default=[],
metavar='ASCIIDOC_OPTS', help='asciidoc options')
#DEPRECATED
parser.add_option('--copy',
action='store_true', dest='copy', default=False,
help='DEPRECATED: does nothing')
parser.add_option('--conf-file',
dest='conf_file', default=None, metavar='CONF_FILE',
help='configuration file')
parser.add_option('-D', '--destination-dir',
action='store', dest='destination_dir', default=None, metavar='PATH',
help='output directory (defaults to SOURCE_FILE directory)')
parser.add_option('-d','--doctype',
action='store', dest='doctype', metavar='DOCTYPE',
choices=('article','manpage','book'),
help='article, manpage, book')
parser.add_option('--epubcheck',
action='store_true', dest='epubcheck', default=False,
help='check EPUB output with epubcheck')
parser.add_option('-f','--format',
action='store', dest='format', metavar='FORMAT', default = 'pdf',
choices=('chunked','epub','htmlhelp','manpage','pdf', 'text',
'xhtml','dvi','ps','tex','docbook'),
help='chunked, epub, htmlhelp, manpage, pdf, text, xhtml, dvi, ps, tex, docbook')
parser.add_option('--icons',
action='store_true', dest='icons', default=False,
help='use admonition, callout and navigation icons')
parser.add_option('--icons-dir',
action='store', dest='icons_dir',
default=None, metavar='PATH',
help='admonition and navigation icon directory')
parser.add_option('-k', '--keep-artifacts',
action='store_true', dest='keep_artifacts', default=False,
help='do not delete temporary build files')
parser.add_option('--lynx',
action='store_true', dest='lynx', default=False,
help='use lynx to generate text files')
parser.add_option('-L', '--no-xmllint',
action='store_true', dest='no_xmllint', default=False,
help='do not check asciidoc output with xmllint')
parser.add_option('-n','--dry-run',
action='store_true', dest='dry_run', default=False,
help='just print the commands that would have been executed')
parser.add_option('-r','--resource',
action='append', dest='resources', default=[],
metavar='PATH',
help='resource file or directory containing resource files')
parser.add_option('-m', '--resource-manifest',
action='store', dest='resource_manifest', default=None, metavar='FILE',
help='read resources from FILE')
#DEPRECATED
parser.add_option('--resource-dir',
action='append', dest='resources', default=[],
metavar='PATH',
help='DEPRECATED: use --resource')
#DEPRECATED
parser.add_option('-s','--skip-asciidoc',
action='store_true', dest='skip_asciidoc', default=False,
help='DEPRECATED: redundant')
parser.add_option('--stylesheet',
action='store', dest='stylesheet', default=None,
metavar='STYLESHEET',
help='HTML CSS stylesheet file name')
#DEPRECATED
parser.add_option('--safe',
action='store_true', dest='safe', default=False,
help='DEPRECATED: does nothing')
parser.add_option('--dblatex-opts',
action='append', dest='dblatex_opts', default=[],
metavar='DBLATEX_OPTS', help='dblatex options')
parser.add_option('--fop',
action='store_true', dest='fop', default=False,
help='use FOP to generate PDF files')
parser.add_option('--fop-opts',
action='append', dest='fop_opts', default=[],
metavar='FOP_OPTS', help='options for FOP pdf generation')
parser.add_option('--xsltproc-opts',
action='append', dest='xsltproc_opts', default=[],
metavar='XSLTPROC_OPTS', help='xsltproc options for XSL stylesheets')
parser.add_option('--xsl-file',
action='store', dest='xsl_file', metavar='XSL_FILE',
help='custom XSL stylesheet')
parser.add_option('-v', '--verbose',
action='count', dest='verbose', default=0,
help='increase verbosity')
if len(sys.argv) == 1:
parser.parse_args(['--help'])
source_options = get_source_options(sys.argv[-1])
argv = source_options + sys.argv[1:]
opts, args = parser.parse_args(argv)
if len(args) != 1:
parser.error('incorrect number of arguments')
opts.asciidoc_opts = ' '.join(opts.asciidoc_opts)
opts.dblatex_opts = ' '.join(opts.dblatex_opts)
opts.fop_opts = ' '.join(opts.fop_opts)
opts.xsltproc_opts = ' '.join(opts.xsltproc_opts)
opts = eval(str(opts)) # Convert optparse.Values to dict.
a2x = A2X(opts)
OPTIONS = a2x # verbose and dry_run used by utility functions.
verbose('args: %r' % argv)
a2x.asciidoc_file = args[0]
try:
a2x.load_conf()
a2x.execute()
except KeyboardInterrupt:
exit(1)
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import pkgutil
import subprocess
from contextlib import contextmanager
from six import string_types
from twitter.common import log
from pants.base.revision import Revision
from pants.util.contextutil import temporary_dir
class Distribution(object):
"""Represents a java distribution - either a JRE or a JDK installed on the local system.
In particular provides access to the distribution's binaries; ie: java while ensuring basic
constraints are met. For example a minimum version can be specified if you know need to compile
source code or run bytecode that exercise features only available in that version forward.
"""
class Error(Exception):
"""Indicates an invalid java distribution."""
_CACHE = {}
@classmethod
def cached(cls, minimum_version=None, maximum_version=None, jdk=False):
def scan_constraint_match():
# Convert strings to Revision objects for apples-to-apples comparison.
max_version = cls._parse_java_version("maximum_version", maximum_version)
min_version = cls._parse_java_version("minimum_version", minimum_version)
for dist in cls._CACHE.values():
if min_version and dist.version < min_version:
continue
if max_version and dist.version > max_version:
continue
if jdk and not dist.jdk:
continue
return dist
key = (minimum_version, maximum_version, jdk)
dist = cls._CACHE.get(key)
if not dist:
dist = scan_constraint_match()
if not dist:
dist = cls.locate(minimum_version=minimum_version, maximum_version=maximum_version, jdk=jdk)
cls._CACHE[key] = dist
return dist
@classmethod
def locate(cls, minimum_version=None, maximum_version=None, jdk=False):
"""Finds a java distribution that meets any given constraints and returns it.
First looks in JDK_HOME and JAVA_HOME if defined falling back to a search on the PATH.
Raises Distribution.Error if no suitable java distribution could be found.
"""
def home_bin_path(home_env_var):
home = os.environ.get(home_env_var)
return os.path.join(home, 'bin') if home else None
def search_path():
yield home_bin_path('JDK_HOME')
yield home_bin_path('JAVA_HOME')
path = os.environ.get('PATH')
if path:
for p in path.strip().split(os.pathsep):
yield p
for path in filter(None, search_path()):
try:
dist = cls(bin_path=path, minimum_version=minimum_version,
maximum_version=maximum_version, jdk=jdk)
dist.validate()
log.debug('Located %s for constraints: minimum_version'
' %s, maximum_version %s, jdk %s' % (dist, minimum_version, maximum_version, jdk))
return dist
except (ValueError, cls.Error):
pass
raise cls.Error('Failed to locate a %s distribution with minimum_version %s, maximum_version %s'
% ('JDK' if jdk else 'JRE', minimum_version, maximum_version))
@staticmethod
def _parse_java_version(name, version):
# Java version strings have been well defined since release 1.3.1 as defined here:
# http://www.oracle.com/technetwork/java/javase/versioning-naming-139433.html
# These version strings comply with semver except that the traditional pre-release semver
# slot (the 4th) can be delimited by an _ in the case of update releases of the jdk.
# We accommodate that difference here.
if isinstance(version, string_types):
version = Revision.semver(version.replace('_', '-'))
if version and not isinstance(version, Revision):
raise ValueError('%s must be a string or a Revision object, given: %s' % (name, version))
return version
@staticmethod
def _is_executable(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
def __init__(self, bin_path='/usr/bin', minimum_version=None, maximum_version=None, jdk=False):
"""Creates a distribution wrapping the given bin_path.
:param string bin_path: the path to the java distributions bin dir
:param minimum_version: a modified semantic version string or else a Revision object
:param maximum_version: a modified semantic version string or else a Revision object
:param bool jdk: ``True`` to require the distribution be a JDK vs a JRE
"""
if not os.path.isdir(bin_path):
raise ValueError('The specified distribution path is invalid: %s' % bin_path)
self._bin_path = bin_path
self._minimum_version = self._parse_java_version("minimum_version", minimum_version)
self._maximum_version = self._parse_java_version("maximum_version", maximum_version)
self._jdk = jdk
self._is_jdk = False
self._system_properties = None
self._version = None
self._validated_binaries = {}
@property
def jdk(self):
self.validate()
return self._is_jdk
@property
def system_properties(self):
"""Returns a dict containing the system properties of this java distribution."""
return dict(self._get_system_properties(self.java))
@property
def version(self):
"""Returns the distribution version.
Raises Distribution.Error if this distribution is not valid according to the configured
constraints.
"""
return self._get_version(self.java)
@property
def home(self):
"""Returns the distribution JAVA_HOME."""
return self._get_system_properties(self.java)['java.home']
@property
def java(self):
"""Returns the path to this distribution's java command.
If this distribution has no valid java command raises Distribution.Error.
"""
return self.binary('java')
def binary(self, name):
"""Returns the path to the command of the given name for this distribution.
For example: ::
>>> d = Distribution()
>>> jar = d.binary('jar')
>>> jar
'/usr/bin/jar'
>>>
If this distribution has no valid command of the given name raises Distribution.Error.
"""
if not isinstance(name, string_types):
raise ValueError('name must be a binary name, given %s of type %s' % (name, type(name)))
self.validate()
return self._validated_executable(name)
def validate(self):
"""Validates this distribution against its configured constraints.
Raises Distribution.Error if this distribution is not valid according to the configured
constraints.
"""
if self._validated_binaries:
return
with self._valid_executable('java') as java:
if self._minimum_version:
version = self._get_version(java)
if version < self._minimum_version:
raise self.Error('The java distribution at %s is too old; expecting at least %s and'
' got %s' % (java, self._minimum_version, version))
if self._maximum_version:
version = self._get_version(java)
if version > self._maximum_version:
raise self.Error('The java distribution at %s is too new; expecting no older than'
' %s and got %s' % (java, self._maximum_version, version))
try:
self._validated_executable('javac') # Calling purely for the check and cache side effects
self._is_jdk = True
except self.Error:
if self._jdk:
raise
def _get_version(self, java):
if not self._version:
self._version = self._parse_java_version('java.version',
self._get_system_properties(java)['java.version'])
return self._version
def _get_system_properties(self, java):
if not self._system_properties:
with temporary_dir() as classpath:
with open(os.path.join(classpath, 'SystemProperties.class'), 'w+') as fp:
fp.write(pkgutil.get_data(__name__, 'SystemProperties.class'))
cmd = [java, '-cp', classpath, 'SystemProperties']
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode != 0:
raise self.Error('Failed to determine java system properties for %s with %s - exit code'
' %d: %s' % (java, ' '.join(cmd), process.returncode, stderr))
props = {}
for line in stdout.split(os.linesep):
key, _, val = line.partition('=')
props[key] = val
self._system_properties = props
return self._system_properties
def _validate_executable(self, name):
exe = os.path.join(self._bin_path, name)
if not self._is_executable(exe):
raise self.Error('Failed to locate the %s executable, %s does not appear to be a'
' valid %s distribution' % (name, self, 'JDK' if self._jdk else 'JRE'))
return exe
def _validated_executable(self, name):
exe = self._validated_binaries.get(name)
if not exe:
exe = self._validate_executable(name)
self._validated_binaries[name] = exe
return exe
@contextmanager
def _valid_executable(self, name):
exe = self._validate_executable(name)
yield exe
self._validated_binaries[name] = exe
def __repr__(self):
return ('Distribution(%r, minimum_version=%r, maximum_version=%r jdk=%r)'
% (self._bin_path, self._minimum_version, self._maximum_version, self._jdk))
| |
import os
import shutil
import tempfile
import pytest
from mock import Mock, patch, mock_open
from pip.exceptions import (
PreviousBuildDirError, InvalidWheelFilename, UnsupportedWheel,
)
from pip.download import PipSession
from pip._vendor import pkg_resources
from pip.index import PackageFinder
from pip.log import logger
from pip.req import (InstallRequirement, RequirementSet,
Requirements, parse_requirements)
from pip.req.req_install import parse_editable
from pip.util import read_text_file
from tests.lib import assert_raises_regexp
class TestRequirementSet(object):
"""RequirementSet tests"""
def setup(self):
logger.consumers = [(logger.NOTIFY, Mock())]
self.tempdir = tempfile.mkdtemp()
def teardown(self):
logger.consumers = []
shutil.rmtree(self.tempdir, ignore_errors=True)
def basic_reqset(self):
return RequirementSet(
build_dir=os.path.join(self.tempdir, 'build'),
src_dir=os.path.join(self.tempdir, 'src'),
download_dir=None,
session=PipSession(),
)
def test_no_reuse_existing_build_dir(self, data):
"""Test prepare_files raise exception with previous build dir"""
build_dir = os.path.join(self.tempdir, 'build', 'simple')
os.makedirs(build_dir)
open(os.path.join(build_dir, "setup.py"), 'w')
reqset = self.basic_reqset()
req = InstallRequirement.from_line('simple')
reqset.add_requirement(req)
finder = PackageFinder([data.find_links], [], session=PipSession())
assert_raises_regexp(
PreviousBuildDirError,
"pip can't proceed with [\s\S]*%s[\s\S]*%s" %
(req, build_dir.replace('\\', '\\\\')),
reqset.prepare_files,
finder,
)
@pytest.mark.parametrize(('file_contents', 'expected'), [
(b'\xf6\x80', b'\xc3\xb6\xe2\x82\xac'), # cp1252
(b'\xc3\xb6\xe2\x82\xac', b'\xc3\xb6\xe2\x82\xac'), # utf-8
(b'\xc3\xb6\xe2', b'\xc3\x83\xc2\xb6\xc3\xa2'), # Garbage
])
def test_egg_info_data(file_contents, expected):
om = mock_open(read_data=file_contents)
em = Mock()
em.return_value = 'cp1252'
with patch('pip.util.open', om, create=True):
with patch('locale.getpreferredencoding', em):
ret = read_text_file('foo')
assert ret == expected.decode('utf-8')
class TestInstallRequirement(object):
def test_url_with_query(self):
"""InstallRequirement should strip the fragment, but not the query."""
url = 'http://foo.com/?p=bar.git;a=snapshot;h=v0.1;sf=tgz'
fragment = '#egg=bar'
req = InstallRequirement.from_line(url + fragment)
assert req.url == url + fragment, req.url
def test_unsupported_wheel_requirement_raises(self):
with pytest.raises(UnsupportedWheel):
InstallRequirement.from_line(
'peppercorn-0.4-py2.py3-bogus-any.whl',
)
def test_invalid_wheel_requirement_raises(self):
with pytest.raises(InvalidWheelFilename):
InstallRequirement.from_line('invalid.whl')
def test_wheel_requirement_sets_req_attribute(self):
req = InstallRequirement.from_line('simple-0.1-py2.py3-none-any.whl')
assert req.req == pkg_resources.Requirement.parse('simple==0.1')
def test_url_preserved_line_req(self):
"""Confirm the url is preserved in a non-editable requirement"""
url = 'git+http://foo.com@ref#egg=foo'
req = InstallRequirement.from_line(url)
assert req.url == url
def test_url_preserved_editable_req(self):
"""Confirm the url is preserved in a editable requirement"""
url = 'git+http://foo.com@ref#egg=foo'
req = InstallRequirement.from_editable(url)
assert req.url == url
def test_requirements_data_structure_keeps_order():
requirements = Requirements()
requirements['pip'] = 'pip'
requirements['nose'] = 'nose'
requirements['coverage'] = 'coverage'
assert ['pip', 'nose', 'coverage'] == list(requirements.values())
assert ['pip', 'nose', 'coverage'] == list(requirements.keys())
def test_requirements_data_structure_implements__repr__():
requirements = Requirements()
requirements['pip'] = 'pip'
requirements['nose'] = 'nose'
assert "Requirements({'pip': 'pip', 'nose': 'nose'})" == repr(requirements)
def test_requirements_data_structure_implements__contains__():
requirements = Requirements()
requirements['pip'] = 'pip'
assert 'pip' in requirements
assert 'nose' not in requirements
@patch('os.path.normcase')
@patch('pip.req.req_install.os.getcwd')
@patch('pip.req.req_install.os.path.exists')
@patch('pip.req.req_install.os.path.isdir')
def test_parse_editable_local(
isdir_mock, exists_mock, getcwd_mock, normcase_mock):
exists_mock.return_value = isdir_mock.return_value = True
# mocks needed to support path operations on windows tests
normcase_mock.return_value = getcwd_mock.return_value = "/some/path"
assert parse_editable('.', 'git') == (None, 'file:///some/path', None)
normcase_mock.return_value = "/some/path/foo"
assert parse_editable('foo', 'git') == (
None, 'file:///some/path/foo', None,
)
def test_parse_editable_default_vcs():
assert parse_editable('https://foo#egg=foo', 'git') == (
'foo',
'git+https://foo#egg=foo',
{'egg': 'foo'},
)
def test_parse_editable_explicit_vcs():
assert parse_editable('svn+https://foo#egg=foo', 'git') == (
'foo',
'svn+https://foo#egg=foo',
{'egg': 'foo'},
)
def test_parse_editable_vcs_extras():
assert parse_editable('svn+https://foo#egg=foo[extras]', 'git') == (
'foo[extras]',
'svn+https://foo#egg=foo[extras]',
{'egg': 'foo[extras]'},
)
@patch('os.path.normcase')
@patch('pip.req.req_install.os.getcwd')
@patch('pip.req.req_install.os.path.exists')
@patch('pip.req.req_install.os.path.isdir')
def test_parse_editable_local_extras(
isdir_mock, exists_mock, getcwd_mock, normcase_mock):
exists_mock.return_value = isdir_mock.return_value = True
normcase_mock.return_value = getcwd_mock.return_value = "/some/path"
assert parse_editable('.[extras]', 'git') == (
None, 'file://' + "/some/path", ('extras',),
)
normcase_mock.return_value = "/some/path/foo"
assert parse_editable('foo[bar,baz]', 'git') == (
None, 'file:///some/path/foo', ('bar', 'baz'),
)
def test_remote_reqs_parse():
"""
Test parsing a simple remote requirements file
"""
# this requirements file just contains a comment
# previously this has failed in py3: https://github.com/pypa/pip/issues/760
for req in parse_requirements(
'https://raw.githubusercontent.com/pypa/pip-test-package/master/'
'tests/req_just_comment.txt', session=PipSession()):
pass
def test_req_file_parse_no_use_wheel(data):
"""
Test parsing --no-use-wheel from a req file
"""
finder = PackageFinder([], [], session=PipSession())
for req in parse_requirements(
data.reqfiles.join("supported_options.txt"), finder,
session=PipSession()):
pass
assert not finder.use_wheel
def test_req_file_parse_comment_start_of_line(tmpdir):
"""
Test parsing comments in a requirements file
"""
with open(tmpdir.join("req1.txt"), "w") as fp:
fp.write("# Comment ")
finder = PackageFinder([], [], session=PipSession())
reqs = list(parse_requirements(tmpdir.join("req1.txt"), finder,
session=PipSession()))
assert not reqs
def test_req_file_parse_comment_end_of_line_with_url(tmpdir):
"""
Test parsing comments in a requirements file
"""
with open(tmpdir.join("req1.txt"), "w") as fp:
fp.write("https://example.com/foo.tar.gz # Comment ")
finder = PackageFinder([], [], session=PipSession())
reqs = list(parse_requirements(tmpdir.join("req1.txt"), finder,
session=PipSession()))
assert len(reqs) == 1
assert reqs[0].url == "https://example.com/foo.tar.gz"
def test_req_file_parse_egginfo_end_of_line_with_url(tmpdir):
"""
Test parsing comments in a requirements file
"""
with open(tmpdir.join("req1.txt"), "w") as fp:
fp.write("https://example.com/foo.tar.gz#egg=wat")
finder = PackageFinder([], [], session=PipSession())
reqs = list(parse_requirements(tmpdir.join("req1.txt"), finder,
session=PipSession()))
assert len(reqs) == 1
assert reqs[0].name == "wat"
| |
#!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import platform
import os
import random
import re
import shlex
import signal
import subprocess
import sys
import tempfile
import elftools
# filetype needs to be imported here because pnacl-driver injects calls to
# filetype.ForceFileType into argument parse actions.
# TODO(dschuff): That's ugly. Find a better way.
import filetype
import pathtools
from driver_env import env
# TODO: import driver_log and change these references from 'foo' to
# 'driver_log.foo', or split driver_log further
from driver_log import Log, DriverOpen, DriverClose, StringifyCommand, DriverExit, FixArch
from driver_temps import TempFiles
from shelltools import shell
def ParseError(s, leftpos, rightpos, msg):
Log.Error("Parse Error: %s", msg)
Log.Error(' ' + s)
Log.Error(' ' + (' '*leftpos) + ('^'*(rightpos - leftpos + 1)))
DriverExit(1)
# Run a command with extra environment settings
def RunWithEnv(cmd, **kwargs):
env.push()
env.setmany(**kwargs)
ret = Run(cmd)
env.pop()
return ret
def SetExecutableMode(path):
if os.name == "posix":
realpath = pathtools.tosys(path)
# os.umask gets and sets at the same time.
# There's no way to get it without setting it.
umask = os.umask(0)
os.umask(umask)
os.chmod(realpath, 0755 & ~umask)
def FilterOutArchArgs(args):
while '-arch' in args:
i = args.index('-arch')
args = args[:i] + args[i+2:]
return args
# Parse and validate the target triple and return the architecture.
# We don't attempt to recognize all possible targets here, just the ones we
# support.
def ParseTriple(triple):
tokens = triple.split('-')
arch = tokens[0]
if arch != 'le32':
arch = FixArch(arch)
os = tokens[1]
# The machine/vendor field could be present or not.
if os != 'nacl' and len(tokens) >= 3:
os = tokens[2]
# Just check that the os is nacl.
if os == 'nacl':
return arch
Log.Fatal('machine/os ' + '-'.join(tokens[1:]) + ' not supported.')
def GetOSName():
if sys.platform == 'darwin':
os_name = 'mac'
elif sys.platform == 'nacl':
os_name = 'nacl'
elif sys.platform.startswith('linux'):
os_name = 'linux'
elif sys.platform in ('cygwin', 'win32'):
os_name = 'win'
else:
Log.Fatal('Machine: %s not supported.' % sys.platform)
return os_name
def GetArchNameShort():
machine = platform.machine().lower()
if machine.startswith('arm'):
return 'arm'
elif machine.startswith('mips'):
return 'mips'
elif (machine.startswith('x86')
or machine in ('amd32', 'i386', 'i686', 'ia32', '32', 'amd64', '64')):
return 'x86'
Log.Fatal('Architecture: %s not supported.' % machine)
return 'unknown'
def RunDriver(module_name, args, suppress_inherited_arch_args=False):
"""
RunDriver() is used to invoke "driver" tools, e.g.
those prefixed with "pnacl-"
It automatically appends some additional flags to the invocation
which were inherited from the current invocation.
Those flags were preserved by ParseArgs
"""
if isinstance(args, str):
args = shell.split(env.eval(args))
script = env.eval('${DRIVER_BIN}/%s' % module_name)
script = shell.unescape(script)
inherited_driver_args = env.get('INHERITED_DRIVER_ARGS')
if suppress_inherited_arch_args:
inherited_driver_args = FilterOutArchArgs(inherited_driver_args)
script = pathtools.tosys(script)
cmd = [script] + args + inherited_driver_args
Log.Info('Driver invocation: %s', repr(cmd))
module = __import__(module_name)
# Save the environment, reset the environment, run
# the driver module, and then restore the environment.
env.push()
env.reset()
DriverMain(module, cmd)
env.pop()
def memoize(f):
""" Memoize a function with no arguments """
saved = {}
def newf():
if len(saved) == 0:
saved[None] = f()
return saved[None]
newf.__name__ = f.__name__
return newf
@env.register
@memoize
def GetBuildOS():
name = platform.system().lower()
if name.startswith('cygwin_nt') or 'windows' in name:
name = 'windows'
if name not in ('linux', 'nacl', 'darwin', 'windows'):
Log.Fatal("Unsupported platform '%s'", name)
return name
@env.register
@memoize
def GetBuildArch():
m = platform.machine()
# Windows is special
if m == 'x86':
m = 'i686'
if m not in ('i386', 'i686', 'x86_64'):
Log.Fatal("Unsupported architecture '%s'", m)
return m
# Crawl backwards, starting from the directory containing this script,
# until we find a directory satisfying a filter function.
def FindBaseDir(function):
Depth = 0
cur = env.getone('DRIVER_BIN')
while not function(cur) and Depth < 16:
cur = pathtools.dirname(cur)
Depth += 1
if function(cur):
return cur
return None
@env.register
@memoize
def FindBaseNaCl():
""" Find native_client/ directory """
dir = FindBaseDir(lambda cur: pathtools.basename(cur) == 'native_client')
if dir is None:
Log.Fatal("Unable to find 'native_client' directory")
return shell.escape(dir)
@env.register
@memoize
def FindBaseToolchain():
""" Find toolchain/OS_ARCH directory """
base_dir = FindBaseDir(lambda cur: pathtools.basename(cur) == 'toolchain')
if base_dir is None:
Log.Fatal("Unable to find 'toolchain' directory")
toolchain_dir = os.path.join(
base_dir,
'%s_%s' % (GetOSName(), GetArchNameShort())
)
return shell.escape(toolchain_dir)
@env.register
@memoize
def FindBasePNaCl():
""" Find the base directory of the PNaCl toolchain """
# The <base> directory is one level up from the <base>/bin:
bindir = env.getone('DRIVER_BIN')
basedir = pathtools.dirname(bindir)
return shell.escape(basedir)
def AddHostBinarySearchPath(prefix):
""" Add a path to the list searched for host binaries. """
prefix = pathtools.normalize(prefix)
if pathtools.isdir(prefix) and not prefix.endswith('/'):
prefix += '/'
env.append('BPREFIXES', prefix)
@env.register
def FindBaseHost(tool):
""" Find the base directory for host binaries (i.e. llvm/binutils) """
if env.has('BPREFIXES'):
for prefix in env.get('BPREFIXES'):
if os.path.exists(pathtools.join(prefix, 'bin',
tool + env.getone('EXEC_EXT'))):
return prefix
base_pnacl = FindBasePNaCl()
if not pathtools.exists(pathtools.join(base_pnacl, 'bin',
tool + env.getone('EXEC_EXT'))):
Log.Fatal('Could not find PNaCl host directory for ' + tool)
return base_pnacl
def ReadConfig():
# Mock out ReadConfig if running unittests. Settings are applied directly
# by DriverTestEnv rather than reading this configuration file.
if env.has('PNACL_RUNNING_UNITTESTS'):
return
driver_bin = env.getone('DRIVER_BIN')
driver_conf = pathtools.join(driver_bin, 'driver.conf')
fp = DriverOpen(driver_conf, 'r')
linecount = 0
for line in fp:
linecount += 1
line = line.strip()
if line == '' or line.startswith('#'):
continue
sep = line.find('=')
if sep < 0:
Log.Fatal("%s: Parse error, missing '=' on line %d",
pathtools.touser(driver_conf), linecount)
keyname = line[:sep].strip()
value = line[sep+1:].strip()
env.setraw(keyname, value)
DriverClose(fp)
@env.register
def AddPrefix(prefix, varname):
values = env.get(varname)
return ' '.join([prefix + shell.escape(v) for v in values ])
######################################################################
#
# Argument Parser
#
######################################################################
DriverArgPatterns = [
( '--pnacl-driver-verbose', "env.set('LOG_VERBOSE', '1')"),
( ('-arch', '(.+)'), "SetArch($0)"),
( '--pnacl-sb', "env.set('SANDBOXED', '1')"),
( '--pnacl-use-emulator', "env.set('USE_EMULATOR', '1')"),
( '--dry-run', "env.set('DRY_RUN', '1')"),
( '--pnacl-arm-bias', "env.set('BIAS', 'ARM')"),
( '--pnacl-mips-bias', "env.set('BIAS', 'MIPS32')"),
( '--pnacl-i686-bias', "env.set('BIAS', 'X8632')"),
( '--pnacl-x86_64-bias', "env.set('BIAS', 'X8664')"),
( '--pnacl-bias=(.+)', "env.set('BIAS', FixArch($0))"),
( '-save-temps', "env.set('SAVE_TEMPS', '1')"),
( '-no-save-temps', "env.set('SAVE_TEMPS', '0')"),
( ('-B', '(.*)'), AddHostBinarySearchPath),
]
DriverArgPatternsNotInherited = [
( '--pnacl-driver-set-([^=]+)=(.*)', "env.set($0, $1)"),
( '--pnacl-driver-append-([^=]+)=(.*)', "env.append($0, $1)"),
]
def ShouldExpandCommandFile(arg):
""" We may be given files with commandline arguments.
Read in the arguments so that they can be handled as usual. """
if arg.startswith('@'):
possible_file = pathtools.normalize(arg[1:])
return pathtools.isfile(possible_file)
else:
return False
def DoExpandCommandFile(argv, i):
arg = argv[i]
fd = DriverOpen(pathtools.normalize(arg[1:]), 'r')
more_args = []
# Use shlex here to process the response file contents.
# This ensures that single and double quoted args are
# handled correctly. Since this file is very likely
# to contain paths with windows path seperators we can't
# use the normal shlex.parse() since we need to disable
# disable '\' (the default escape char).
for line in fd:
lex = shlex.shlex(line, posix=True)
lex.escape = ''
lex.whitespace_split = True
more_args += list(lex)
fd.close()
argv = argv[:i] + more_args + argv[i+1:]
return argv
def ParseArgs(argv,
patternlist,
driver_patternlist=DriverArgPatterns,
driver_patternlist_not_inherited=DriverArgPatternsNotInherited):
"""Parse argv using the patterns in patternlist
Also apply the built-in DriverArgPatterns unless instructed otherwise.
This function must be called by all (real) drivers.
"""
if driver_patternlist:
driver_args, argv = ParseArgsBase(argv, driver_patternlist)
# TODO(robertm): think about a less obscure mechanism to
# replace the inherited args feature
assert not env.get('INHERITED_DRIVER_ARGS')
env.append('INHERITED_DRIVER_ARGS', *driver_args)
_, argv = ParseArgsBase(argv, driver_patternlist_not_inherited)
_, unmatched = ParseArgsBase(argv, patternlist)
if unmatched:
for u in unmatched:
Log.Error('Unrecognized argument: ' + u)
Log.Fatal('unknown arguments')
def ParseArgsBase(argv, patternlist):
""" Parse argv using the patterns in patternlist
Returns: (matched, unmatched)
"""
matched = []
unmatched = []
i = 0
while i < len(argv):
if ShouldExpandCommandFile(argv[i]):
argv = DoExpandCommandFile(argv, i)
if i >= len(argv):
break
num_matched, action, groups = MatchOne(argv, i, patternlist)
if num_matched == 0:
unmatched.append(argv[i])
i += 1
continue
matched += argv[i:i+num_matched]
if isinstance(action, str):
# Perform $N substitution
for g in xrange(0, len(groups)):
action = action.replace('$%d' % g, 'groups[%d]' % g)
try:
if isinstance(action, str):
# NOTE: this is essentially an eval for python expressions
# which does rely on the current environment for unbound vars
# Log.Info('about to exec [%s]', str(action))
exec(action)
else:
action(*groups)
except Exception, err:
Log.Fatal('ParseArgs action [%s] failed with: %s', action, err)
i += num_matched
return (matched, unmatched)
def MatchOne(argv, i, patternlist):
"""Find a pattern which matches argv starting at position i"""
for (regex, action) in patternlist:
if isinstance(regex, str):
regex = [regex]
j = 0
matches = []
for r in regex:
if i+j < len(argv):
match = re.compile('^'+r+'$').match(argv[i+j])
else:
match = None
matches.append(match)
j += 1
if None in matches:
continue
groups = [ list(m.groups()) for m in matches ]
groups = reduce(lambda x,y: x+y, groups, [])
return (len(regex), action, groups)
return (0, '', [])
def UnrecognizedOption(*args):
Log.Fatal("Unrecognized option: " + ' '.join(args) + "\n" +
"Use '--help' for more information.")
######################################################################
#
# File Naming System (Temp files & Output files)
#
######################################################################
def DefaultOutputName(filename, outtype):
# For pre-processor mode, just print to stdout.
if outtype in ('pp'): return '-'
base = pathtools.basename(filename)
base = RemoveExtension(base)
if outtype in ('po'): return base + '.o'
assert(outtype in filetype.ExtensionMap.values())
assert(not filetype.IsSourceType(outtype))
return base + '.' + outtype
def DefaultPCHOutputName(filename):
# Clang currently uses the GCC '.gch' by default for precompiled headers,
# though their documentation example uses '-o foo.h.pch' as the example.
return filename + '.gch'
def RemoveExtension(filename):
if filename.endswith('.opt.bc'):
return filename[0:-len('.opt.bc')]
name, ext = pathtools.splitext(filename)
return name
def PathSplit(f):
paths = []
cur = f
while True:
cur, piece = pathtools.split(cur)
if piece == '':
break
paths.append(piece)
paths.reverse()
return paths
def CheckPathLength(filename, exit_on_failure=True):
'''Check that the length of the path is short enough for Windows.
On Windows, MAX_PATH is ~260 and applies to absolute paths, and to relative
paths and the absolute paths they expand to (except for specific uses of
some APIs; see link below). Most applications don't bother to support long
paths properly (including LLVM, GNU binutils, and ninja). If a path is too
long, ERROR_PATH_NOT_FOUND is returned, which isn't very useful or clear for
users. In addition the Chrome build has deep directory hierarchies with long
names.
This function checks that the path is valid, so we can throw meaningful
errors.
http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
'''
if not IsWindowsPython() and not env.has('PNACL_RUNNING_UNITTESTS'):
return True
# First check the name as-is (it's usually a relative path)
if len(filename) > 255:
if exit_on_failure:
Log.Fatal('Path name %s is too long (%d characters)' %
(filename, len(filename)))
return False
if os.path.isabs(filename):
return True
# Don't assume that the underlying tools or windows APIs will normalize
# the path before using it. Conservatively count the length of CWD + filename
appended_name = os.path.join(os.getcwd(), filename)
if len(appended_name) > 255:
if exit_on_failure:
Log.Fatal('Path name %s (expanded from %s) is too long (%d characters)' %
(appended_name, filename, len(appended_name)))
return False
return True
# Generate a unique identifier for each input file.
# Start with the basename, and if that is not unique enough,
# add parent directories. Rinse, repeat.
class TempNameGen(object):
def __init__(self, inputs, output):
self.TempBase = output + '---linked'
self.OutputDir = pathtools.dirname(output)
# TODO(pdox): Figure out if there's a less confusing way
# to simplify the intermediate filename in this case.
#if len(inputs) == 1:
# # There's only one input file, don't bother adding the source name.
# TempMap[inputs[0]] = output + '---'
# return
# Build the initial mapping
self.TempMap = dict()
for f in inputs:
if f.startswith('-'):
continue
path = PathSplit(f)
self.TempMap[f] = [1, path]
while True:
# Find conflicts
ConflictMap = dict()
Conflicts = set()
for (f, [n, path]) in self.TempMap.iteritems():
candidate = output + '---' + '_'.join(path[-n:]) + '---'
if candidate in ConflictMap:
Conflicts.add(ConflictMap[candidate])
Conflicts.add(f)
else:
ConflictMap[candidate] = f
if len(Conflicts) == 0:
break
# Resolve conflicts
for f in Conflicts:
n = self.TempMap[f][0]
if n+1 > len(self.TempMap[f][1]):
Log.Fatal('Unable to resolve naming conflicts')
self.TempMap[f][0] = n+1
# Clean up the map
NewMap = dict()
for (f, [n, path]) in self.TempMap.iteritems():
candidate = output + '---' + '_'.join(path[-n:]) + '---'
NewMap[f] = candidate
self.TempMap = NewMap
return
def ValidatePathLength(self, temp, imtype):
temp = pathtools.normpath(temp) if temp else temp
# If the temp name is too long, just pick a random one instead.
if not CheckPathLength(temp, exit_on_failure=False):
# imtype is sometimes just an extension, and sometimes a compound
# extension (e.g. pre_opt.pexe). To keep name length shorter,
# only take the last extension
if '.' in imtype:
imtype = imtype[imtype.rfind('.') + 1:]
temp = pathtools.join(
self.OutputDir,
str(random.randrange(100000, 1000000)) + '.' + imtype)
CheckPathLength(temp)
return temp
def TempNameForOutput(self, imtype):
temp = self.ValidatePathLength(self.TempBase + '.' + imtype, imtype)
TempFiles.add(temp)
return temp
def TempNameForInput(self, input, imtype):
# If input is already a temporary name, just change the extension
if input.startswith(self.TempBase):
temp = self.TempBase + '.' + imtype
else:
# Source file
temp = self.TempMap[input] + '.' + imtype
temp = self.ValidatePathLength(temp, imtype)
TempFiles.add(temp)
return temp
# (Invoked from loader.py)
# If the driver is waiting on a background process in RunWithLog()
# and the user Ctrl-C's or kill's the driver, it may leave
# the child process (such as llc) running. To prevent this,
# the code below sets up a signal handler which issues a kill to
# the currently running child processes.
CleanupProcesses = []
def SetupSignalHandlers():
global CleanupProcesses
def signal_handler(unused_signum, unused_frame):
for p in CleanupProcesses:
try:
p.kill()
except BaseException:
pass
os.kill(os.getpid(), signal.SIGKILL)
return 0
if os.name == 'posix' and sys.platform != 'nacl':
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGHUP, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
def ArgsTooLongForWindows(args):
""" Detect when a command line might be too long for Windows. """
if not IsWindowsPython():
return False
else:
return len(' '.join(args)) > 8191
def ConvertArgsToFile(args):
fd, outfile = tempfile.mkstemp()
# Remember to delete this file afterwards.
TempFiles.add(outfile)
cmd = args[0]
other_args = args[1:]
os.write(fd, ' '.join(other_args))
os.close(fd)
return [cmd, '@' + outfile]
# Note:
# The redirect_stdout and redirect_stderr is only used a handful of times
def Run(args,
errexit=True,
redirect_stdout=None,
redirect_stderr=None):
""" Run: Run a command.
Returns: return_code, stdout, stderr
Run() is used to invoke "other" tools, e.g.
those NOT prefixed with "pnacl-"
stdout and stderr only contain meaningful data if
redirect_{stdout,stderr} == subprocess.PIPE
Run will terminate the program upon failure unless errexit == False
TODO(robertm): errexit == True has not been tested and needs more work
redirect_stdout and redirect_stderr are passed straight
to subprocess.Popen
"""
result_stdout = None
result_stderr = None
if isinstance(args, str):
args = shell.split(env.eval(args))
args = [pathtools.tosys(args[0])] + args[1:]
Log.Info('Running: ' + StringifyCommand(args))
if env.getbool('DRY_RUN'):
if redirect_stderr or redirect_stdout:
# TODO(pdox): Prevent this from happening, so that
# dry-run is more useful.
Log.Fatal("Unhandled dry-run case.")
return 0, None, None
try:
# If we have too long of a cmdline on windows, running it would fail.
# Attempt to use a file with the command line options instead in that case.
if ArgsTooLongForWindows(args):
actual_args = ConvertArgsToFile(args)
Log.Info('Wrote long commandline to file for Windows: ' +
StringifyCommand(actual_args))
else:
actual_args = args
p = subprocess.Popen(actual_args,
stdout=redirect_stdout,
stderr=redirect_stderr)
result_stdout, result_stderr = p.communicate()
except Exception, e:
msg = '%s\nCommand was: %s' % (str(e), StringifyCommand(args))
print(msg)
DriverExit(1)
Log.Info('Return Code: ' + str(p.returncode))
if errexit and p.returncode != 0:
if redirect_stdout == subprocess.PIPE:
Log.Error('--------------stdout: begin')
Log.Error(result_stdout)
Log.Error('--------------stdout: end')
if redirect_stderr == subprocess.PIPE:
Log.Error('--------------stderr: begin')
Log.Error(result_stderr)
Log.Error('--------------stderr: end')
DriverExit(p.returncode)
return p.returncode, result_stdout, result_stderr
def IsWindowsPython():
return 'windows' in platform.system().lower()
def SetupCygwinLibs():
bindir = env.getone('DRIVER_BIN')
# Prepend the directory containing cygwin1.dll etc. to the PATH to ensure we
# get the right one.
os.environ['PATH'] = os.pathsep.join(
[pathtools.tosys(bindir)] + os.environ['PATH'].split(os.pathsep))
def HelpNotAvailable():
return 'Help text not available'
def DriverMain(module, argv):
# TODO(robertm): this is ugly - try to get rid of this
if '--pnacl-driver-verbose' in argv:
Log.IncreaseVerbosity()
env.set('LOG_VERBOSE', '1')
# driver_path has the form: /foo/bar/pnacl_root/newlib/bin/pnacl-clang
driver_path = pathtools.abspath(pathtools.normalize(argv[0]))
driver_bin = pathtools.dirname(driver_path)
script_name = pathtools.basename(driver_path)
env.set('SCRIPT_NAME', script_name)
env.set('DRIVER_PATH', driver_path)
env.set('DRIVER_BIN', driver_bin)
Log.SetScriptName(script_name)
ReadConfig()
if IsWindowsPython():
SetupCygwinLibs()
# skip tool name
argv = argv[1:]
# Handle help info
if ('--help' in argv or
'-h' in argv or
'-help' in argv or
'--help-full' in argv):
help_func = getattr(module, 'get_help', None)
if not help_func:
Log.Fatal(HelpNotAvailable())
helpstr = help_func(argv)
print(helpstr)
return 0
return module.main(argv)
def MaybeStripNonSFISuffix(s):
"""Removes _NONSFI suffix if possible, otherwise |s| as is."""
return s[:-len('_NONSFI')] if s.endswith('_NONSFI') else s
def SetArch(arch):
arch = FixArch(arch)
env.set('ARCH', arch)
base_arch = MaybeStripNonSFISuffix(arch)
env.set('BASE_ARCH', base_arch)
env.setbool('NONSFI_NACL', arch != base_arch)
def GetArch(required = False):
arch = env.getone('ARCH')
if arch == '':
arch = None
if required and not arch:
Log.Fatal('Missing -arch!')
return arch
# Read an ELF file or an archive file to determine the machine type. If ARCH is
# already set, make sure the file has the same architecture. If ARCH is not
# set, set the ARCH to the file's architecture.
# Note that the SFI and NONSFI shares the same file format, so they will be
# treated as same.
#
# Returns True if the file matches ARCH.
#
# Returns False if the file doesn't match ARCH. This only happens when
# must_match is False. If must_match is True, then a fatal error is generated
# instead.
def ArchMerge(filename, must_match):
file_type = filetype.FileType(filename)
if file_type in ('o','so'):
elfheader = elftools.GetELFHeader(filename)
if not elfheader:
Log.Fatal("%s: Cannot read ELF header", filename)
new_arch = elfheader.arch
elif filetype.IsNativeArchive(filename):
new_arch = file_type[len('archive-'):]
else:
Log.Fatal('%s: Unexpected file type in ArchMerge', filename)
existing_arch = GetArch()
if not existing_arch:
SetArch(new_arch)
return True
# The _NONSFI binary format is as same as the SFI's.
existing_arch = MaybeStripNonSFISuffix(existing_arch)
if new_arch != existing_arch:
if must_match:
msg = "%s: Incompatible object file (%s != %s)"
logfunc = Log.Fatal
else:
msg = "%s: Skipping incompatible object file (%s != %s)"
logfunc = Log.Warning
logfunc(msg, filename, new_arch, existing_arch)
return False
# existing_arch and new_arch == existing_arch
return True
def CheckTranslatorPrerequisites():
""" Assert that the scons artifacts for running the sandboxed translator
exist: sel_ldr, and the IRT blob. """
if env.getbool('DRY_RUN'):
return
reqs = ['SEL_LDR', 'IRT_BLOB']
# Linux also requires the nacl bootstrap helper.
if GetBuildOS() == 'linux':
reqs.append('BOOTSTRAP_LDR')
for var in reqs:
needed_file = env.getone(var)
if not pathtools.exists(needed_file):
Log.Fatal('Could not find %s [%s]', var, needed_file)
def SelLdrCommand():
if GetBuildOS() == 'linux':
cmd = '${BOOTSTRAP_LDR} ${SEL_LDR} --reserved_at_zero=0x%s' % ('X' * 16)
else:
cmd = '${SEL_LDR}'
return '${SEL_LDR_PREFIX} %s ${SEL_LDR_FLAGS}' % cmd
def AddListToEnv(command, env_var_prefix, string_list):
for index, string in enumerate(string_list):
command.append('-E')
command.append('%s_%d=%s' % (env_var_prefix, index, string))
class DriverChain(object):
""" The DriverChain class takes one or more input files,
an output file, and a sequence of steps. It executes
those steps, using intermediate files in between,
to generate the final outpu.
"""
def __init__(self, input, output, namegen):
self.input = input
self.output = pathtools.normpath(output) if output else output
self.steps = []
self.namegen = namegen
# "input" can be a list of files or a single file.
# If we're compiling for a single file, then we use
# TempNameForInput. If there are multiple files
# (e.g. linking), then we use TempNameForOutput.
if isinstance(self.input, str):
self.use_names_for_input = True
self.input = pathtools.normpath(self.input) if self.input else self.input
CheckPathLength(self.input)
else:
self.use_names_for_input = False
self.input = [pathtools.normpath(p) if p else p for p in self.input]
for path in self.input:
CheckPathLength(path)
CheckPathLength(output)
def add(self, callback, output_type, **extra):
step = (callback, output_type, extra)
self.steps.append(step)
def run(self):
step_input = self.input
for (i, (callback, output_type, extra)) in enumerate(self.steps):
if i == len(self.steps)-1:
# Last step
step_output = self.output
else:
# Intermediate step
if self.use_names_for_input:
step_output = self.namegen.TempNameForInput(self.input, output_type)
else:
step_output = self.namegen.TempNameForOutput(output_type)
callback(step_input, step_output, **extra)
step_input = step_output
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=arguments-differ
""" losses for training neural networks """
from __future__ import absolute_import
from .. import ndarray
from ..base import numeric_types
from .block import HybridBlock
def _apply_weighting(F, loss, weight=None, sample_weight=None):
"""Apply weighting to loss.
Parameters
----------
loss : Symbol
The loss to be weighted.
weight : float or None
Global scalar weight for loss.
sample_weight : Symbol or None
Per sample weighting. Must be broadcastable to
the same shape as loss. For example, if loss has
shape (64, 10) and you want to weight each sample
in the batch separately, `sample_weight` should have
shape (64, 1).
Returns
-------
loss : Symbol
Weighted loss
"""
if sample_weight is not None:
loss = F.broadcast_mul(loss, sample_weight)
if weight is not None:
assert isinstance(weight, numeric_types), "weight must be a number"
loss = loss * weight
return loss
def _reshape_label_as_output(F, output, label):
# for symbolic output.shape is not available so we reshape
# to empty shape and let it be inferred from output's shape
# via the '-' operator later.
return label.reshape(output.shape) if F is ndarray else label.reshape(())
class Loss(HybridBlock):
"""Base class for loss.
Parameters
----------
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
"""
def __init__(self, weight, batch_axis, **kwargs):
super(Loss, self).__init__(**kwargs)
self._weight = weight
self._batch_axis = batch_axis
def __repr__(self):
s = '{name}(batch_axis={_batch_axis}, w={_weight})'
return s.format(name=self.__class__.__name__, **self.__dict__)
def hybrid_forward(self, F, x, *args, **kwargs):
"""Overrides to construct symbolic graph for this `Block`.
Parameters
----------
x : Symbol or NDArray
The first input tensor.
*args : list of Symbol or list of NDArray
Additional input tensors.
"""
# pylint: disable= invalid-name
raise NotImplementedError
class L2Loss(Loss):
"""Calculates the mean squared error between output and label:
.. math::
L = \\frac{1}{2}\\sum_i \\Vert {output}_i - {label}_i \\Vert^2.
Output and label can have arbitrary shape as long as they have the same
number of elements.
Parameters
----------
weight : float or None
Global scalar weight for loss.
sample_weight : Symbol or None
Per sample weighting. Must be broadcastable to
the same shape as loss. For example, if loss has
shape (64, 10) and you want to weight each sample
in the batch, `sample_weight` should have shape (64, 1).
batch_axis : int, default 0
The axis that represents mini-batch.
"""
def __init__(self, weight=1., batch_axis=0, **kwargs):
super(L2Loss, self).__init__(weight, batch_axis, **kwargs)
def hybrid_forward(self, F, output, label, sample_weight=None):
label = _reshape_label_as_output(F, output, label)
loss = F.square(output - label)
loss = _apply_weighting(F, loss, self._weight/2, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
class L1Loss(Loss):
"""Calculates the mean absolute error between output and label:
.. math::
L = \\frac{1}{2}\\sum_i \\vert {output}_i - {label}_i \\vert.
Output and label must have the same shape.
Parameters
----------
weight : float or None
Global scalar weight for loss.
sample_weight : Symbol or None
Per sample weighting. Must be broadcastable to
the same shape as loss. For example, if loss has
shape (64, 10) and you want to weight each sample
in the batch, `sample_weight` should have shape (64, 1).
batch_axis : int, default 0
The axis that represents mini-batch.
"""
def __init__(self, weight=None, batch_axis=0, **kwargs):
super(L1Loss, self).__init__(weight, batch_axis, **kwargs)
def hybrid_forward(self, F, output, label, sample_weight=None):
label = _reshape_label_as_output(F, output, label)
loss = F.abs(output - label)
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
class SigmoidBinaryCrossEntropyLoss(Loss):
r"""The cross-entropy loss for binary classification. (alias: SigmoidBCELoss)
BCE loss is useful when training logistic regression.
.. math::
loss(o, t) = - 1/n \sum_i (t[i] * log(o[i]) + (1 - t[i]) * log(1 - o[i]))
Parameters
----------
from_sigmoid : bool, default is `False`
Whether the input is from the output of sigmoid. Set this to false will make
the loss calculate sigmoid and then BCE, which is more numerically stable through
log-sum-exp trick.
weight : float or None
Global scalar weight for loss.
sample_weight : Symbol or None
Per sample weighting. Must be broadcastable to
the same shape as loss. For example, if loss has
shape (64, 10) and you want to weight each sample
in the batch, `sample_weight` should have shape (64, 1).
batch_axis : int, default 0
The axis that represents mini-batch.
"""
def __init__(self, from_sigmoid=False, weight=None, batch_axis=0, **kwargs):
super(SigmoidBinaryCrossEntropyLoss, self).__init__(weight, batch_axis, **kwargs)
self._from_sigmoid = from_sigmoid
def hybrid_forward(self, F, output, label, sample_weight=None):
label = _reshape_label_as_output(F, output, label)
if not self._from_sigmoid:
max_val = F.maximum(-output, 0)
loss = output - output*label + max_val + F.log(F.exp(-max_val)+F.exp(-output-max_val))
else:
loss = -(F.log(output+1e-8)*label + F.log(1.-output+1e-8)*(1.-label))
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
SigmoidBCELoss = SigmoidBinaryCrossEntropyLoss
class SoftmaxCrossEntropyLoss(Loss):
"""Computes the softmax cross entropy loss. (alias: SoftmaxCELoss)
If `sparse_label` is `True`, label should contain integer category indicators:
.. math::
p = {softmax}({output})
L = -\\sum_i {log}(p_{i,{label}_i})
Label's shape should be output's shape without the `axis` dimension. i.e. for
`output.shape` = (1,2,3,4) and axis = 2, `label.shape` should be (1,2,4).
If `sparse_label` is `False`, label should contain probability distribution
with the same shape as output:
.. math::
p = {softmax}({output})
L = -\\sum_i \\sum_j {label}_j {log}(p_{ij})
Parameters
----------
axis : int, default -1
The axis to sum over when computing softmax and entropy.
sparse_label : bool, default True
Whether label is an integer array instead of probability distribution.
from_logits : bool, default False
Whether input is a log probability (usually from log_softmax) instead
of unnormalized numbers.
weight : float or None
Global scalar weight for loss.
sample_weight : Symbol or None
Per sample weighting. Must be broadcastable to
the same shape as loss. For example, if loss has
shape (64, 10) and you want to weight each sample
in the batch, `sample_weight` should have shape (64, 1).
batch_axis : int, default 0
The axis that represents mini-batch.
"""
def __init__(self, axis=-1, sparse_label=True, from_logits=False, weight=None,
batch_axis=0, **kwargs):
super(SoftmaxCrossEntropyLoss, self).__init__(weight, batch_axis, **kwargs)
self._axis = axis
self._sparse_label = sparse_label
self._from_logits = from_logits
def hybrid_forward(self, F, output, label, sample_weight=None):
if not self._from_logits:
output = F.log_softmax(output)
if self._sparse_label:
loss = -F.pick(output, label, axis=self._axis, keepdims=True)
else:
loss = -F.sum(output*label, axis=self._axis, keepdims=True)
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
SoftmaxCELoss = SoftmaxCrossEntropyLoss
class KLDivLoss(Loss):
"""The Kullback-Leibler divergence loss.
KL divergence is a useful distance measure for continuous distributions
and is often useful when performing direct regression over the space of
(discretely sampled) continuous output distributions.
.. _Kullback-Leibler divergence:
https://en.wikipedia.org/wiki/Kullback-Leibler_divergence
.. math::
L = 1/n \\sum_i (label_i * (log(label_i) - output_i))
Label's shape should be the same as output's.
Parameters
----------
from_logits : bool, default is `True`
Whether the input is log probability (usually from log_softmax) instead
of unnormalized numbers.
weight : float or None
Global scalar weight for loss.
sample_weight : Symbol or None
Per sample weighting. Must be broadcastable to
the same shape as loss. For example, if loss has
shape (64, 10) and you want to weight each sample
in the batch, `sample_weight` should have shape (64, 1).
batch_axis : int, default 0
The axis that represents mini-batch.
"""
def __init__(self, from_logits=True, weight=None, batch_axis=0, **kwargs):
super(KLDivLoss, self).__init__(weight, batch_axis, **kwargs)
self._from_logits = from_logits
def hybrid_forward(self, F, output, label, sample_weight=None):
if not self._from_logits:
output = F.log_softmax(output)
loss = label * (F.log(label+1e-8) - output)
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Windows Registry plugin to parse the Application Compatibility Cache key."""
import construct
import logging
from plaso.events import time_events
from plaso.lib import binary
from plaso.lib import eventdata
from plaso.parsers import winreg
from plaso.parsers.winreg_plugins import interface
class AppCompatCacheEvent(time_events.FiletimeEvent):
"""Class that contains the event object for AppCompatCache entries."""
DATA_TYPE = 'windows:registry:appcompatcache'
def __init__(
self, filetime, usage, key, entry_index, path, offset):
"""Initializes a Windows Registry event.
Args:
filetime: The FILETIME timestamp value.
usage: The description of the usage of the time value.
key: Name of the Registry key being parsed.
entry_index: The cache entry index number for the record.
path: The full path to the executable.
offset: The (data) offset of the Registry key or value.
"""
super(AppCompatCacheEvent, self).__init__(filetime, usage)
self.keyname = key
self.offset = offset
self.entry_index = entry_index
self.path = path
class AppCompatCacheHeader(object):
"""Class that contains the Application Compatibility Cache header."""
def __init__(self):
"""Initializes the header object."""
super(AppCompatCacheHeader, self).__init__()
self.number_of_cached_entries = 0
self.header_size = 0
class AppCompatCacheCachedEntry(object):
"""Class that contains the Application Compatibility Cache cached entry."""
def __init__(self):
"""Initializes the cached entry object."""
super(AppCompatCacheCachedEntry, self).__init__()
self.cached_entry_size = 0
self.data = None
self.file_size = None
self.insertion_flags = None
self.last_modification_time = None
self.last_update_time = None
self.shim_flags = None
self.path = None
class AppCompatCachePlugin(interface.KeyPlugin):
"""Class that parses the Application Compatibility Cache Registry data."""
NAME = 'winreg_appcompatcache'
DESCRIPTION = u'Parser for Application Compatibility Cache Registry data.'
REG_KEYS = [
u'\\{current_control_set}\\Control\\Session Manager\\AppCompatibility',
u'\\{current_control_set}\\Control\\Session Manager\\AppCompatCache']
REG_TYPE = 'SYSTEM'
URL = [
(u'https://code.google.com/p/winreg-kb/wiki/'
u'ApplicationCompatibilityCacheKey')]
_FORMAT_TYPE_2000 = 1
_FORMAT_TYPE_XP = 2
_FORMAT_TYPE_2003 = 3
_FORMAT_TYPE_VISTA = 4
_FORMAT_TYPE_7 = 5
_FORMAT_TYPE_8 = 6
# AppCompatCache format signature used in Windows XP.
_HEADER_SIGNATURE_XP = 0xdeadbeef
# AppCompatCache format used in Windows XP.
_HEADER_XP_32BIT_STRUCT = construct.Struct(
'appcompatcache_header_xp',
construct.ULInt32('signature'),
construct.ULInt32('number_of_cached_entries'),
construct.ULInt32('unknown1'),
construct.ULInt32('unknown2'),
construct.Padding(384))
_CACHED_ENTRY_XP_32BIT_STRUCT = construct.Struct(
'appcompatcache_cached_entry_xp_32bit',
construct.Array(528, construct.Byte('path')),
construct.ULInt64('last_modification_time'),
construct.ULInt64('file_size'),
construct.ULInt64('last_update_time'))
# AppCompatCache format signature used in Windows 2003, Vista and 2008.
_HEADER_SIGNATURE_2003 = 0xbadc0ffe
# AppCompatCache format used in Windows 2003.
_HEADER_2003_STRUCT = construct.Struct(
'appcompatcache_header_2003',
construct.ULInt32('signature'),
construct.ULInt32('number_of_cached_entries'))
_CACHED_ENTRY_2003_32BIT_STRUCT = construct.Struct(
'appcompatcache_cached_entry_2003_32bit',
construct.ULInt16('path_size'),
construct.ULInt16('maximum_path_size'),
construct.ULInt32('path_offset'),
construct.ULInt64('last_modification_time'),
construct.ULInt64('file_size'))
_CACHED_ENTRY_2003_64BIT_STRUCT = construct.Struct(
'appcompatcache_cached_entry_2003_64bit',
construct.ULInt16('path_size'),
construct.ULInt16('maximum_path_size'),
construct.ULInt32('unknown1'),
construct.ULInt64('path_offset'),
construct.ULInt64('last_modification_time'),
construct.ULInt64('file_size'))
# AppCompatCache format used in Windows Vista and 2008.
_CACHED_ENTRY_VISTA_32BIT_STRUCT = construct.Struct(
'appcompatcache_cached_entry_vista_32bit',
construct.ULInt16('path_size'),
construct.ULInt16('maximum_path_size'),
construct.ULInt32('path_offset'),
construct.ULInt64('last_modification_time'),
construct.ULInt32('insertion_flags'),
construct.ULInt32('shim_flags'))
_CACHED_ENTRY_VISTA_64BIT_STRUCT = construct.Struct(
'appcompatcache_cached_entry_vista_64bit',
construct.ULInt16('path_size'),
construct.ULInt16('maximum_path_size'),
construct.ULInt32('unknown1'),
construct.ULInt64('path_offset'),
construct.ULInt64('last_modification_time'),
construct.ULInt32('insertion_flags'),
construct.ULInt32('shim_flags'))
# AppCompatCache format signature used in Windows 7 and 2008 R2.
_HEADER_SIGNATURE_7 = 0xbadc0fee
# AppCompatCache format used in Windows 7 and 2008 R2.
_HEADER_7_STRUCT = construct.Struct(
'appcompatcache_header_7',
construct.ULInt32('signature'),
construct.ULInt32('number_of_cached_entries'),
construct.Padding(120))
_CACHED_ENTRY_7_32BIT_STRUCT = construct.Struct(
'appcompatcache_cached_entry_7_32bit',
construct.ULInt16('path_size'),
construct.ULInt16('maximum_path_size'),
construct.ULInt32('path_offset'),
construct.ULInt64('last_modification_time'),
construct.ULInt32('insertion_flags'),
construct.ULInt32('shim_flags'),
construct.ULInt32('data_size'),
construct.ULInt32('data_offset'))
_CACHED_ENTRY_7_64BIT_STRUCT = construct.Struct(
'appcompatcache_cached_entry_7_64bit',
construct.ULInt16('path_size'),
construct.ULInt16('maximum_path_size'),
construct.ULInt32('unknown1'),
construct.ULInt64('path_offset'),
construct.ULInt64('last_modification_time'),
construct.ULInt32('insertion_flags'),
construct.ULInt32('shim_flags'),
construct.ULInt64('data_size'),
construct.ULInt64('data_offset'))
# AppCompatCache format used in Windows 8.0 and 8.1.
_HEADER_SIGNATURE_8 = 0x00000080
_HEADER_8_STRUCT = construct.Struct(
'appcompatcache_header_8',
construct.ULInt32('signature'),
construct.Padding(124))
_CACHED_ENTRY_HEADER_8_STRUCT = construct.Struct(
'appcompatcache_cached_entry_header_8',
construct.ULInt32('signature'),
construct.ULInt32('unknown1'),
construct.ULInt32('cached_entry_data_size'),
construct.ULInt16('path_size'))
# AppCompatCache format used in Windows 8.0.
_CACHED_ENTRY_SIGNATURE_8_0 = '00ts'
# AppCompatCache format used in Windows 8.1.
_CACHED_ENTRY_SIGNATURE_8_1 = '10ts'
def _CheckSignature(self, value_data):
"""Parses and validates the signature.
Args:
value_data: a binary string containing the value data.
Returns:
The format type if successful or None otherwise.
"""
signature = construct.ULInt32('signature').parse(value_data)
if signature == self._HEADER_SIGNATURE_XP:
return self._FORMAT_TYPE_XP
elif signature == self._HEADER_SIGNATURE_2003:
# TODO: determine which format version is used (2003 or Vista).
return self._FORMAT_TYPE_2003
elif signature == self._HEADER_SIGNATURE_7:
return self._FORMAT_TYPE_7
elif signature == self._HEADER_SIGNATURE_8:
if value_data[signature:signature + 4] in [
self._CACHED_ENTRY_SIGNATURE_8_0, self._CACHED_ENTRY_SIGNATURE_8_1]:
return self._FORMAT_TYPE_8
def _DetermineCacheEntrySize(
self, format_type, value_data, cached_entry_offset):
"""Determines the size of a cached entry.
Args:
format_type: integer value that contains the format type.
value_data: a binary string containing the value data.
cached_entry_offset: integer value that contains the offset of
the first cached entry data relative to the start of
the value data.
Returns:
The cached entry size if successful or None otherwise.
Raises:
RuntimeError: if the format type is not supported.
"""
if format_type not in [
self._FORMAT_TYPE_XP, self._FORMAT_TYPE_2003, self._FORMAT_TYPE_VISTA,
self._FORMAT_TYPE_7, self._FORMAT_TYPE_8]:
raise RuntimeError(
u'[{0:s}] Unsupported format type: {1:d}'.format(
self.NAME, format_type))
cached_entry_data = value_data[cached_entry_offset:]
cached_entry_size = 0
if format_type == self._FORMAT_TYPE_XP:
cached_entry_size = self._CACHED_ENTRY_XP_32BIT_STRUCT.sizeof()
elif format_type in [
self._FORMAT_TYPE_2003, self._FORMAT_TYPE_VISTA, self._FORMAT_TYPE_7]:
path_size = construct.ULInt16('path_size').parse(cached_entry_data[0:2])
maximum_path_size = construct.ULInt16('maximum_path_size').parse(
cached_entry_data[2:4])
path_offset_32bit = construct.ULInt32('path_offset').parse(
cached_entry_data[4:8])
path_offset_64bit = construct.ULInt32('path_offset').parse(
cached_entry_data[8:16])
if maximum_path_size < path_size:
logging.error(
u'[{0:s}] Path size value out of bounds.'.format(self.NAME))
return
path_end_of_string_size = maximum_path_size - path_size
if path_size == 0 or path_end_of_string_size != 2:
logging.error(
u'[{0:s}] Unsupported path size values.'.format(self.NAME))
return
# Assume the entry is 64-bit if the 32-bit path offset is 0 and
# the 64-bit path offset is set.
if path_offset_32bit == 0 and path_offset_64bit != 0:
if format_type == self._FORMAT_TYPE_2003:
cached_entry_size = self._CACHED_ENTRY_2003_64BIT_STRUCT.sizeof()
elif format_type == self._FORMAT_TYPE_VISTA:
cached_entry_size = self._CACHED_ENTRY_VISTA_64BIT_STRUCT.sizeof()
elif format_type == self._FORMAT_TYPE_7:
cached_entry_size = self._CACHED_ENTRY_7_64BIT_STRUCT.sizeof()
else:
if format_type == self._FORMAT_TYPE_2003:
cached_entry_size = self._CACHED_ENTRY_2003_32BIT_STRUCT.sizeof()
elif format_type == self._FORMAT_TYPE_VISTA:
cached_entry_size = self._CACHED_ENTRY_VISTA_32BIT_STRUCT.sizeof()
elif format_type == self._FORMAT_TYPE_7:
cached_entry_size = self._CACHED_ENTRY_7_32BIT_STRUCT.sizeof()
elif format_type == self._FORMAT_TYPE_8:
cached_entry_size = self._CACHED_ENTRY_HEADER_8_STRUCT.sizeof()
return cached_entry_size
def _ParseHeader(self, format_type, value_data):
"""Parses the header.
Args:
format_type: integer value that contains the format type.
value_data: a binary string containing the value data.
Returns:
A header object (instance of AppCompatCacheHeader).
Raises:
RuntimeError: if the format type is not supported.
"""
if format_type not in [
self._FORMAT_TYPE_XP, self._FORMAT_TYPE_2003, self._FORMAT_TYPE_VISTA,
self._FORMAT_TYPE_7, self._FORMAT_TYPE_8]:
raise RuntimeError(
u'[{0:s}] Unsupported format type: {1:d}'.format(
self.NAME, format_type))
# TODO: change to collections.namedtuple or use __slots__ if the overhead
# of a regular object becomes a problem.
header_object = AppCompatCacheHeader()
if format_type == self._FORMAT_TYPE_XP:
header_object.header_size = self._HEADER_XP_32BIT_STRUCT.sizeof()
header_struct = self._HEADER_XP_32BIT_STRUCT.parse(value_data)
elif format_type == self._FORMAT_TYPE_2003:
header_object.header_size = self._HEADER_2003_STRUCT.sizeof()
header_struct = self._HEADER_2003_STRUCT.parse(value_data)
elif format_type == self._FORMAT_TYPE_VISTA:
header_object.header_size = self._HEADER_VISTA_STRUCT.sizeof()
header_struct = self._HEADER_VISTA_STRUCT.parse(value_data)
elif format_type == self._FORMAT_TYPE_7:
header_object.header_size = self._HEADER_7_STRUCT.sizeof()
header_struct = self._HEADER_7_STRUCT.parse(value_data)
elif format_type == self._FORMAT_TYPE_8:
header_object.header_size = self._HEADER_8_STRUCT.sizeof()
header_struct = self._HEADER_8_STRUCT.parse(value_data)
if format_type in [
self._FORMAT_TYPE_XP, self._FORMAT_TYPE_2003, self._FORMAT_TYPE_VISTA,
self._FORMAT_TYPE_7]:
header_object.number_of_cached_entries = header_struct.get(
'number_of_cached_entries')
return header_object
def _ParseCachedEntry(
self, format_type, value_data, cached_entry_offset, cached_entry_size):
"""Parses a cached entry.
Args:
format_type: integer value that contains the format type.
value_data: a binary string containing the value data.
cached_entry_offset: integer value that contains the offset of
the cached entry data relative to the start of
the value data.
cached_entry_size: integer value that contains the cached entry data size.
Returns:
A cached entry object (instance of AppCompatCacheCachedEntry).
Raises:
RuntimeError: if the format type is not supported.
"""
if format_type not in [
self._FORMAT_TYPE_XP, self._FORMAT_TYPE_2003, self._FORMAT_TYPE_VISTA,
self._FORMAT_TYPE_7, self._FORMAT_TYPE_8]:
raise RuntimeError(
u'[{0:s}] Unsupported format type: {1:d}'.format(
self.NAME, format_type))
cached_entry_data = value_data[
cached_entry_offset:cached_entry_offset + cached_entry_size]
cached_entry_struct = None
if format_type == self._FORMAT_TYPE_XP:
if cached_entry_size == self._CACHED_ENTRY_XP_32BIT_STRUCT.sizeof():
cached_entry_struct = self._CACHED_ENTRY_XP_32BIT_STRUCT.parse(
cached_entry_data)
elif format_type == self._FORMAT_TYPE_2003:
if cached_entry_size == self._CACHED_ENTRY_2003_32BIT_STRUCT.sizeof():
cached_entry_struct = self._CACHED_ENTRY_2003_32BIT_STRUCT.parse(
cached_entry_data)
elif cached_entry_size == self._CACHED_ENTRY_2003_64BIT_STRUCT.sizeof():
cached_entry_struct = self._CACHED_ENTRY_2003_64BIT_STRUCT.parse(
cached_entry_data)
elif format_type == self._FORMAT_TYPE_VISTA:
if cached_entry_size == self._CACHED_ENTRY_VISTA_32BIT_STRUCT.sizeof():
cached_entry_struct = self._CACHED_ENTRY_VISTA_32BIT_STRUCT.parse(
cached_entry_data)
elif cached_entry_size == self._CACHED_ENTRY_VISTA_64BIT_STRUCT.sizeof():
cached_entry_struct = self._CACHED_ENTRY_VISTA_64BIT_STRUCT.parse(
cached_entry_data)
elif format_type == self._FORMAT_TYPE_7:
if cached_entry_size == self._CACHED_ENTRY_7_32BIT_STRUCT.sizeof():
cached_entry_struct = self._CACHED_ENTRY_7_32BIT_STRUCT.parse(
cached_entry_data)
elif cached_entry_size == self._CACHED_ENTRY_7_64BIT_STRUCT.sizeof():
cached_entry_struct = self._CACHED_ENTRY_7_64BIT_STRUCT.parse(
cached_entry_data)
elif format_type == self._FORMAT_TYPE_8:
if cached_entry_data[0:4] not in [
self._CACHED_ENTRY_SIGNATURE_8_0, self._CACHED_ENTRY_SIGNATURE_8_1]:
raise RuntimeError(
u'[{0:s}] Unsupported cache entry signature'.format(self.NAME))
if cached_entry_size == self._CACHED_ENTRY_HEADER_8_STRUCT.sizeof():
cached_entry_struct = self._CACHED_ENTRY_HEADER_8_STRUCT.parse(
cached_entry_data)
cached_entry_data_size = cached_entry_struct.get(
'cached_entry_data_size')
cached_entry_size = 12 + cached_entry_data_size
cached_entry_data = value_data[
cached_entry_offset:cached_entry_offset + cached_entry_size]
if not cached_entry_struct:
raise RuntimeError(
u'[{0:s}] Unsupported cache entry size: {1:d}'.format(
self.NAME, cached_entry_size))
cached_entry_object = AppCompatCacheCachedEntry()
cached_entry_object.cached_entry_size = cached_entry_size
path_offset = 0
data_size = 0
if format_type == self._FORMAT_TYPE_XP:
string_size = 0
for string_index in xrange(0, 528, 2):
if (ord(cached_entry_data[string_index]) == 0 and
ord(cached_entry_data[string_index + 1]) == 0):
break
string_size += 2
cached_entry_object.path = binary.Ut16StreamCopyToString(
cached_entry_data[0:string_size])
elif format_type in [
self._FORMAT_TYPE_2003, self._FORMAT_TYPE_VISTA, self._FORMAT_TYPE_7]:
path_size = cached_entry_struct.get('path_size')
path_offset = cached_entry_struct.get('path_offset')
elif format_type == self._FORMAT_TYPE_8:
path_size = cached_entry_struct.get('path_size')
cached_entry_data_offset = 14 + path_size
cached_entry_object.path = binary.Ut16StreamCopyToString(
cached_entry_data[14:cached_entry_data_offset])
remaining_data = cached_entry_data[cached_entry_data_offset:]
cached_entry_object.insertion_flags = construct.ULInt32(
'insertion_flags').parse(remaining_data[0:4])
cached_entry_object.shim_flags = construct.ULInt32(
'shim_flags').parse(remaining_data[4:8])
if cached_entry_data[0:4] == self._CACHED_ENTRY_SIGNATURE_8_0:
cached_entry_data_offset += 8
elif cached_entry_data[0:4] == self._CACHED_ENTRY_SIGNATURE_8_1:
cached_entry_data_offset += 10
remaining_data = cached_entry_data[cached_entry_data_offset:]
if format_type in [
self._FORMAT_TYPE_XP, self._FORMAT_TYPE_2003, self._FORMAT_TYPE_VISTA,
self._FORMAT_TYPE_7]:
cached_entry_object.last_modification_time = cached_entry_struct.get(
'last_modification_time')
elif format_type == self._FORMAT_TYPE_8:
cached_entry_object.last_modification_time = construct.ULInt64(
'last_modification_time').parse(remaining_data[0:8])
if format_type in [self._FORMAT_TYPE_XP, self._FORMAT_TYPE_2003]:
cached_entry_object.file_size = cached_entry_struct.get('file_size')
elif format_type in [self._FORMAT_TYPE_VISTA, self._FORMAT_TYPE_7]:
cached_entry_object.insertion_flags = cached_entry_struct.get(
'insertion_flags')
cached_entry_object.shim_flags = cached_entry_struct.get('shim_flags')
if format_type == self._FORMAT_TYPE_XP:
cached_entry_object.last_update_time = cached_entry_struct.get(
'last_update_time')
if format_type == self._FORMAT_TYPE_7:
data_offset = cached_entry_struct.get('data_offset')
data_size = cached_entry_struct.get('data_size')
elif format_type == self._FORMAT_TYPE_8:
data_offset = cached_entry_offset + cached_entry_data_offset + 12
data_size = construct.ULInt32('data_size').parse(remaining_data[8:12])
if path_offset > 0 and path_size > 0:
path_size += path_offset
cached_entry_object.path = binary.Ut16StreamCopyToString(
value_data[path_offset:path_size])
if data_size > 0:
data_size += data_offset
cached_entry_object.data = value_data[data_offset:data_size]
return cached_entry_object
def GetEntries(self, parser_context, key=None, **unused_kwargs):
"""Extracts event objects from a Application Compatibility Cache key.
Args:
parser_context: A parser context object (instance of ParserContext).
key: Optional Registry key (instance of winreg.WinRegKey).
The default is None.
"""
value = key.GetValue('AppCompatCache')
if not value:
return
value_data = value.data
value_data_size = len(value.data)
format_type = self._CheckSignature(value_data)
if not format_type:
# TODO: Instead of logging emit a parser error object that once that
# mechanism is implemented.
logging.error(
u'AppCompatCache format error: [{0:s}] Unsupported signature'.format(
key.path))
return
header_object = self._ParseHeader(format_type, value_data)
# On Windows Vista and 2008 when the cache is empty it will
# only consist of the header.
if value_data_size <= header_object.header_size:
return
cached_entry_offset = header_object.header_size
cached_entry_size = self._DetermineCacheEntrySize(
format_type, value_data, cached_entry_offset)
if not cached_entry_size:
# TODO: Instead of logging emit a parser error object that once that
# mechanism is implemented.
logging.error(
u'AppCompatCache format error: [{0:s}] Unsupported cached entry '
u'size.'.format(key.path))
return
cached_entry_index = 0
while cached_entry_offset < value_data_size:
cached_entry_object = self._ParseCachedEntry(
format_type, value_data, cached_entry_offset, cached_entry_size)
if cached_entry_object.last_modification_time is not None:
# TODO: refactor to file modification event.
event_object = AppCompatCacheEvent(
cached_entry_object.last_modification_time,
u'File Last Modification Time', key.path,
cached_entry_index + 1, cached_entry_object.path,
cached_entry_offset)
parser_context.ProduceEvent(event_object, plugin_name=self.NAME)
if cached_entry_object.last_update_time is not None:
# TODO: refactor to process run event.
event_object = AppCompatCacheEvent(
cached_entry_object.last_update_time,
eventdata.EventTimestamp.LAST_RUNTIME, key.path,
cached_entry_index + 1, cached_entry_object.path,
cached_entry_offset)
parser_context.ProduceEvent(event_object, plugin_name=self.NAME)
cached_entry_offset += cached_entry_object.cached_entry_size
cached_entry_index += 1
if (header_object.number_of_cached_entries != 0 and
cached_entry_index >= header_object.number_of_cached_entries):
break
winreg.WinRegistryParser.RegisterPlugin(AppCompatCachePlugin)
| |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 07 16:25:23 2014
@author: Yang Xuefeng
"""
from __future__ import division
import numpy as np
import cPickle as cp
import sys
import scipy.stats as ss
import bisect
import argparse
class evaluation(object):
def __init__(self, wl):
s = set(wl.keys())
f = np.load(r'D:\SS\ResourceData\antonmys\wordsim353.npz')
words = f['w']
score = f['s']
score = [float(i) for i in score]
select_index = [i for i in xrange(len(words)) if words[i][0] in s and words[i][1] in s]
self.words353 = [words[i] for i in select_index]
self.score353 = [score[i] for i in select_index]
self.index353 = [(wl.get(i[0],0), wl.get(i[1],0)) for i in self.words353]
f = np.load(r'D:\SS\ResourceData\antonmys\turk771.npz')
words = f['w']
score = f['s']
score = [float(i) for i in score]
select_index = [i for i in xrange(len(words)) if words[i][0] in s and words[i][1] in s]
self.words771 = [words[i] for i in select_index]
self.score771 = [score[i] for i in select_index]
self.index771 = [(wl.get(i[0],0), wl.get(i[1],0)) for i in self.words771]
f = np.load(r'D:\SS\ResourceData\antonmys\rg65.npz')
words = f['w']
score = f['s']
score = [float(i) for i in score]
select_index = [i for i in xrange(len(words)) if words[i][0] in s and words[i][1] in s]
self.words65 = [words[i] for i in select_index]
self.score65 = [score[i] for i in select_index]
self.index65 = [(wl.get(i[0],0), wl.get(i[1],0)) for i in self.words65]
f = np.load(r'D:\SS\ResourceData\antonmys\yp130.npz')
words = f['w']
score = f['s']
score = [float(i) for i in score]
select_index = [i for i in xrange(len(words)) if words[i][0] in s and words[i][1] in s]
self.words130 = [words[i] for i in select_index]
self.score130 = [score[i] for i in select_index]
self.index130 = [(wl.get(i[0],0), wl.get(i[1],0)) for i in self.words130]
f = np.load(r'D:\SS\ResourceData\antonmys\M3k.npz')
words = f['w']
score = f['s']
score = [float(i) for i in score]
select_index = [i for i in xrange(len(words)) if words[i][0] in s and words[i][1] in s]
self.words3k = [words[i] for i in select_index]
self.score3k = [score[i] for i in select_index]
self.index3k = [(wl.get(i[0],0), wl.get(i[1],0)) for i in self.words3k]
l = cp.load(open(r'D:\SS\ResourceData\antonmys\analogy_g.pkl'))
l = [i for i in l if i[0] in s and i[1] in s and i[2] in s and i[3] in s]
self.word_g = l
self.index_g = [(wl[i[0]],wl[i[1]],wl[i[2]],wl[i[3]]) for i in l]
index_list = zip(*self.index_g)
self.index_g_mat = [list(i) for i in index_list]
l = cp.load(open(r'D:\SS\ResourceData\antonmys\analogy_m.pkl'))
l = [i for i in l if i[0] in s and i[1] in s and i[2] in s and i[3] in s]
self.word_m = l
self.index_m = [(wl[i[0]],wl[i[1]],wl[i[2]],wl[i[3]]) for i in l]
index_list = zip(*self.index_m)
self.index_m_mat = [list(i) for i in index_list]
f = np.load(r'D:\SS\ResourceData\antonmys\sent_complete.npz')
select = []
for i in xrange(len(f['c'])):
t = [1 for j in f['c'][i] if j in s]
p = [1 for j in f['s'][i] if j in s]
if len(t)==5 and 2*len(p)>len(f['s'][i]):
select.append(i)
#print len(select)
self.sents = [f['s'][i] for i in select]
self.candidates = [f['c'][i] for i in select]
self.answers = [f['a'][i] for i in select]
self.index_sents = []
self.index_candidates = []
self.index_answers = [wl[i] for i in self.answers]
for i in self.sents:
t = [wl[j] for j in i if j in s]
self.index_sents.append(t)
for i in self.candidates:
t = [wl[j] for j in i]
self.index_candidates.append(t)
def sent_completation(self,epoch,wm):
r = []
for i in xrange(len(self.answers)):
t = []
for j in self.index_candidates[i]:
simi = [self.get_cosine(wm[j,:], wm[k,:]) for k in self.index_sents[i]]
t.append((j,np.mean(simi)))
t.sort(key=lambda x:x[1])
r.append(t[-1][0])
f = [1 if r[i]==self.index_answers[i] else 0 for i in xrange(len(r))]
result = sum(f)/len(r)
return result
def word353(self, wm):
simi = [self.get_cosine(wm[i[0],:], wm[i[1],:]) for i in self.index353]
r,p = ss.spearmanr(simi, self.score353)
return r
def turk771(self, wm):
simi = [self.get_cosine(wm[i[0],:], wm[i[1],:]) for i in self.index771]
r,p = ss.spearmanr(simi, self.score771)
return r
def rg65(self,wm):
simi = [self.get_cosine(wm[i[0],:], wm[i[1],:]) for i in self.index65]
r,p = ss.spearmanr(simi, self.score65)
return r
def yp130(self, wm):
simi = [self.get_cosine(wm[i[0],:], wm[i[1],:]) for i in self.index130]
r,p = ss.spearmanr(simi, self.score130)
return r
def m3k(self, wm):
simi = [self.get_cosine(wm[i[0],:], wm[i[1],:]) for i in self.index3k]
r,p = ss.spearmanr(simi, self.score3k)
return r
def analogy(self,wm,t):
wm_t = wm.transpose()
if t == 'g':
a = self.index_g_mat[0]
b = self.index_g_mat[1]
c = self.index_g_mat[2]
d = self.index_g_mat[3]
elif t == 'm':
a = self.index_m_mat[0]
b = self.index_m_mat[1]
c = self.index_m_mat[2]
d = self.index_m_mat[3]
ma = wm[a,:]
mb = wm[b,:]
mc = wm[c,:]
m = mb + mc- ma
l = []
for i in xrange(len(a)):
simi = np.dot(m[i,:],wm_t)
simi[[a[i],b[i],c[i]]] = -1
l.append(np.argmax(simi))
r = [1 if d[i]==l[i] else 0 for i in xrange(len(d))]
r = sum(r)/len(d)
return r
def eval_all(self,wm):
r = []
r.append(self.word353(wm))
r.append(self.rg65(wm))
r.append(self.yp130(wm))
r.append(self.turk771(wm))
r.append(self.m3k(wm))
r.append(self.analogy(wm,'g'))
r.append(self.analogy(wm,'m'))
r.append(self.sent_completation(wm))
return r
class fine_tuning(object):
def __init__(self, wl,sr):
"""
"""
self.sr = float(sr)
self.wl = wl
it = wl.items()
it = [(i[1],i[0]) for i in it]
self.lw = dict(it)
def normalization_mat(self, wm):
a,b = wm.shape
norms = [np.sqrt(np.sum(np.square(wm[i,:]))) for i in xrange(a)]
norms = np.array(norms).reshape(a,1)
wm = wm/norms
return wm
def normalization_vec(self, v):
norm = np.sqrt(np.sum(np.square(v)))
v = v/norm
return v
def random_matrix_thres(self, wm):
np.random.seed(10000)
a,b = wm.shape
rm = np.random.uniform(-1,1,(a,b))
norms = [np.sqrt(np.sum(np.square(rm[i,:]))) for i in xrange(a)]
norms = np.array(norms).reshape(a,1)
rm = rm/norms
rand_index = np.random.randint(0,a-1,300)
r = []
rmt = rm.transpose()
for i in rand_index:
simi = np.dot(rm[i,:],rmt)
r.append(np.mean(np.sort(simi)[-3:-1]))
thres = np.mean(r)
self.thres = thres
def judge(self,i1,i2):
if i1 < i2:
return 1
elif i1 > i2:
return -1
else:
return 0
def get_local_direction(self, k, wm,simi,kb):
index = np.argsort(simi)
simi_sort = simi[index]
number = wm.shape[0] - bisect.bisect_left(simi_sort, self.thres)
if number > 300:
number = 300
index = index[::-1]
index_dict = {index[i]:i-1 for i in xrange(len(index))}
inter = set.intersection(set(index[0:number]),set(kb))
kb_bad = set(kb)-inter
index_bad = set(index[0:number])-inter
index_bad_sign = [(i,-1) for i in index_bad]
index_bad_error = [abs(number-index_dict[i]) for i in index_bad]
kb_bad_sign = [(i,1) for i in kb_bad]
kb_bad_error = [abs(index_dict[kb[i]]-i) for i in xrange(len(kb)) if kb[i] in kb_bad]
inter_sign_error = [abs(index_dict[kb[i]]-i) for i in xrange(len(kb)) if kb[i] in inter]
inter_sign = [(kb[i],self.judge(i,index_dict[kb[i]])) for i in xrange(len(kb)) if kb[i] in inter]
inter_sign = [i for i in inter_sign if i[1]!=0]
kb_bad_sign.extend(inter_sign)
kb_bad_sign.extend(index_bad_sign)
kb_bad_error.extend(index_bad_error)
kb_bad_error.extend(inter_sign_error)
error = np.sum(kb_bad_error)
index_sign = zip(*kb_bad_sign)
index = list(index_sign[0])
sign = np.array(index_sign[1]).reshape(len(index_sign[1]),1)
temp = wm[index,:]
result = sign * temp
return result ,error
def get_update(self, k, wm, simi, kb):
"""
"""
result, error = self.get_local_direction(k,wm,simi,kb)
result = np.mean(result,axis=0).reshape(1,result.shape[1])
result = self.normalization_vec(result)
return result, error
def get_cosine(self, x, y):
nominator = np.sum( x * y )
dominator = np.sqrt(np.sum(x*x)) * np.sqrt(np.sum(y*y))
return nominator/dominator
def training(self, wl, wm, kb, eva,evaluate=True):
epoch = 1
wm = ft.normalization_mat(wm)
wm_t = wm.transpose()
if eva:
result = eva.eval_all(wm)
print result
error_list = []
error_list.append(100000000000)
stop = True
learning_rate = 0.1
while(stop):
count = 0
l_e = []
print 'epoch: {}'.format(epoch)
for k in kb.keys():
count = count + 1
simi = np.dot(wm[k,:],wm_t)
update, error = self.get_update(k, wm, simi, kb[k])
#print error
l_e.append(error)
update = update * learning_rate
wm[k,:] = wm[k,:]+ update
wm[k,:] = ft.normalization_vec(wm[k,:])
sys.stdout.write('{:10d} fin'.format(count))
sys.stdout.write('\r')
sys.stdout.flush()
epoch = epoch + 1
if eva:
result = eva.eval_all(wm)
print result
error = np.mean(l_e)
error_list.append(error)
if len(error_list)>2:
if error_list[-2]-error_list[-1]< error_list[1] * ft.sr:
stop = False
print error
return wm, error_list
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-wm','-word_matrix')
parser.add_argument('-wl','-word_list')
parser.add_argument('-kb','-knowledge_base')
parser.add_argument('-out','-output_file')
#parser.add_argument('-res','-output_result')
parser.add_argument('-s','-stop_rate')
args = parser.parse_args()
name_wl = args.wl
name_wm = args.wm
name_kb = args.kb
name_out = args.out
#name_res = args.res
sr = args.s
print 'loading'
kb = cp.load(open(name_kb))
wl = cp.load(open(name_wl))
wm = np.load(name_wm)
norms = np.sqrt(np.sum(np.square(wm),axis=1))
print 'Generating Threshold'
ft = fine_tuning(wl,sr)
ft.random_matrix_thres(wm)
#eva = evaluation(wl)
print 'Training Start'
eva = False
wm,el = ft.training(wl, wm, kb, eva)
np.save(name_out,wm)
| |
#!/usr/bin/env python
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##~ Copyright (C) 2002-2004 TechGame Networks, LLC.
##~
##~ This library is free software; you can redistribute it and/or
##~ modify it under the terms of the BSD style License as found in the
##~ LICENSE file included with this distribution.
##
## Modified by Dirk Holtwick <holtwick@web.de>, 2007-2008
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""CSS-2.1 engine
Primary classes:
* CSSElementInterfaceAbstract
Provide a concrete implementation for the XML element model used.
* CSSCascadeStrategy
Implements the CSS-2.1 engine's attribute lookup rules.
* CSSParser
Parses CSS source forms into usable results using CSSBuilder and
CSSMutableSelector. You may want to override parseExternal()
* CSSBuilder (and CSSMutableSelector)
A concrete implementation for cssParser.CSSBuilderAbstract (and
cssParser.CSSSelectorAbstract) to provide usable results to
CSSParser requests.
Dependencies:
python 2.3 (or greater)
sets, cssParser, re (via cssParser)
"""
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ To replace any for with list comprehension
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def stopIter(value):
raise StopIteration, value
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ Imports
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import copy
try:
set
except NameError:
from sets import Set as set
import cssParser
import cssSpecial
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ Constants / Variables / Etc.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
CSSParseError = cssParser.CSSParseError
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ Definitions
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class CSSElementInterfaceAbstract(object):
def getAttr(self, name, default=NotImplemented):
raise NotImplementedError('Subclass responsibility')
def getIdAttr(self):
return self.getAttr('id', '')
def getClassAttr(self):
return self.getAttr('class', '')
def getInlineStyle(self):
raise NotImplementedError('Subclass responsibility')
def matchesNode(self):
raise NotImplementedError('Subclass responsibility')
def inPseudoState(self, name, params=()):
raise NotImplementedError('Subclass responsibility')
def iterXMLParents(self):
"""Results must be compatible with CSSElementInterfaceAbstract"""
raise NotImplementedError('Subclass responsibility')
def getPreviousSibling(self):
raise NotImplementedError('Subclass responsibility')
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class CSSCascadeStrategy(object):
author = None
user = None
userAgenr = None
def __init__(self, author=None, user=None, userAgent=None):
if author is not None:
self.author = author
if user is not None:
self.user = user
if userAgent is not None:
self.userAgenr = userAgent
def copyWithUpdate(self, author=None, user=None, userAgent=None):
if author is None:
author = self.author
if user is None:
user = self.user
if userAgent is None:
userAgent = self.userAgenr
return self.__class__(author, user, userAgent)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def iterCSSRulesets(self, inline=None):
if self.userAgenr is not None:
yield self.userAgenr[0]
yield self.userAgenr[1]
if self.user is not None:
yield self.user[0]
if self.author is not None:
yield self.author[0]
yield self.author[1]
if inline:
yield inline[0]
yield inline[1]
if self.user is not None:
yield self.user[1]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def findStyleFor(self, element, attrName, default=NotImplemented):
"""Attempts to find the style setting for attrName in the CSSRulesets.
Note: This method does not attempt to resolve rules that return
"inherited", "default", or values that have units (including "%").
This is left up to the client app to re-query the CSS in order to
implement these semantics.
"""
rule = self.findCSSRulesFor(element, attrName)
return self._extractStyleForRule(rule, attrName, default)
def findStylesForEach(self, element, attrNames, default=NotImplemented):
"""Attempts to find the style setting for attrName in the CSSRulesets.
Note: This method does not attempt to resolve rules that return
"inherited", "default", or values that have units (including "%").
This is left up to the client app to re-query the CSS in order to
implement these semantics.
"""
rules = self.findCSSRulesForEach(element, attrNames)
return [(attrName, self._extractStyleForRule(rule, attrName, default))
for attrName, rule in rules.iteritems()]
def findCSSRulesFor(self, element, attrName):
rules = []
inline = element.getInlineStyle()
# Generator are wonderfull but sometime slow...
#for ruleset in self.iterCSSRulesets(inline):
# rules += ruleset.findCSSRuleFor(element, attrName)
if self.userAgenr is not None:
rules += self.userAgenr[0].findCSSRuleFor(element, attrName)
rules += self.userAgenr[1].findCSSRuleFor(element, attrName)
if self.user is not None:
rules += self.user[0].findCSSRuleFor(element, attrName)
if self.author is not None:
rules += self.author[0].findCSSRuleFor(element, attrName)
rules += self.author[1].findCSSRuleFor(element, attrName)
if inline:
rules += inline[0].findCSSRuleFor(element, attrName)
rules += inline[1].findCSSRuleFor(element, attrName)
if self.user is not None:
rules += self.user[1].findCSSRuleFor(element, attrName)
rules.sort()
return rules
def findCSSRulesForEach(self, element, attrNames):
rules = dict([(name, []) for name in attrNames])
inline = element.getInlineStyle()
for ruleset in self.iterCSSRulesets(inline):
for attrName, attrRules in rules.iteritems():
attrRules += ruleset.findCSSRuleFor(element, attrName)
for attrRules in rules.itervalues():
attrRules.sort()
return rules
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _extractStyleForRule(self, rule, attrName, default=NotImplemented):
if rule:
# rule is packed in a list to differentiate from "no rule" vs "rule
# whose value evalutates as False"
style = rule[-1][1]
return style[attrName]
elif default is not NotImplemented:
return default
else:
raise LookupError("Could not find style for '%s' in %r" % (attrName, rule))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ CSS Selectors
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class CSSSelectorBase(object):
inline = False
_hash = None
_specificity = None
def __init__(self, completeName='*'):
if not isinstance(completeName, tuple):
completeName = (None, '*', completeName)
self.completeName = completeName
def _updateHash(self):
self._hash = hash((self.fullName, self.specificity(), self.qualifiers))
def __hash__(self):
if self._hash is None:
return object.__hash__(self)
else:
return self._hash
def getNSPrefix(self):
return self.completeName[0]
nsPrefix = property(getNSPrefix)
def getName(self):
return self.completeName[2]
name = property(getName)
def getNamespace(self):
return self.completeName[1]
namespace = property(getNamespace)
def getFullName(self):
return self.completeName[1:3]
fullName = property(getFullName)
def __repr__(self):
strArgs = (self.__class__.__name__,)+self.specificity()+(self.asString(),)
return '<%s %d:%d:%d:%d %s >' % strArgs
def __str__(self):
return self.asString()
def __cmp__(self, other):
result = cmp(self.specificity(), other.specificity())
if result != 0:
return result
result = cmp(self.fullName, other.fullName)
if result != 0:
return result
result = cmp(self.qualifiers, other.qualifiers)
return result
def specificity(self):
if self._specificity is None:
self._specificity = self._calcSpecificity()
return self._specificity
def _calcSpecificity(self):
"""from http://www.w3.org/TR/CSS21/cascade.html#specificity"""
hashCount = 0
qualifierCount = 0
elementCount = int(self.name != '*')
for q in self.qualifiers:
if q.isHash(): hashCount += 1
elif q.isClass(): qualifierCount += 1
elif q.isAttr(): qualifierCount += 1
elif q.isPseudo(): elementCount += 1
elif q.isCombiner():
i,h,q,e = q.selector.specificity()
hashCount += h
qualifierCount += q
elementCount += e
return self.inline, hashCount, qualifierCount, elementCount
def matches(self, element=None):
if element is None:
return False
# with CSSDOMElementInterface.matchesNode(self, (namespace, tagName)) replacement:
if self.fullName[1] not in ('*', element.domElement.tagName):
return False
if self.fullName[0] not in (None, '', '*') and self.fullName[0] != element.domElement.namespaceURI:
return False
for qualifier in self.qualifiers:
if not qualifier.matches(element):
return False
else:
return True
def asString(self):
result = []
if self.nsPrefix is not None:
result.append('%s|%s' % (self.nsPrefix, self.name))
else: result.append(self.name)
for q in self.qualifiers:
if q.isCombiner():
result.insert(0, q.asString())
else:
result.append(q.asString())
return ''.join(result)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class CSSInlineSelector(CSSSelectorBase):
inline = True
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class CSSMutableSelector(CSSSelectorBase, cssParser.CSSSelectorAbstract):
qualifiers = []
def asImmutable(self):
return CSSImmutableSelector(self.completeName, [q.asImmutable() for q in self.qualifiers])
def combineSelectors(klass, selectorA, op, selectorB):
selectorB.addCombination(op, selectorA)
return selectorB
combineSelectors = classmethod(combineSelectors)
def addCombination(self, op, other):
self._addQualifier(CSSSelectorCombinationQualifier(op, other))
def addHashId(self, hashId):
self._addQualifier(CSSSelectorHashQualifier(hashId))
def addClass(self, class_):
self._addQualifier(CSSSelectorClassQualifier(class_))
def addAttribute(self, attrName):
self._addQualifier(CSSSelectorAttributeQualifier(attrName))
def addAttributeOperation(self, attrName, op, attrValue):
self._addQualifier(CSSSelectorAttributeQualifier(attrName, op, attrValue))
def addPseudo(self, name):
self._addQualifier(CSSSelectorPseudoQualifier(name))
def addPseudoFunction(self, name, params):
self._addQualifier(CSSSelectorPseudoQualifier(name, params))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _addQualifier(self, qualifier):
if self.qualifiers:
self.qualifiers.append(qualifier)
else:
self.qualifiers = [qualifier]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class CSSImmutableSelector(CSSSelectorBase):
def __init__(self, completeName='*', qualifiers=()):
# print completeName, qualifiers
self.qualifiers = tuple(qualifiers)
CSSSelectorBase.__init__(self, completeName)
self._updateHash()
def fromSelector(klass, selector):
return klass(selector.completeName, selector.qualifiers)
fromSelector = classmethod(fromSelector)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ CSS Selector Qualifiers -- see CSSImmutableSelector
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class CSSSelectorQualifierBase(object):
def isHash(self):
return False
def isClass(self):
return False
def isAttr(self):
return False
def isPseudo(self):
return False
def isCombiner(self):
return False
def asImmutable(self):
return self
def __str__(self):
return self.asString()
class CSSSelectorHashQualifier(CSSSelectorQualifierBase):
def __init__(self, hashId):
self.hashId = hashId
def isHash(self):
return True
def __hash__(self):
return hash((self.hashId,))
def asString(self):
return '#'+self.hashId
def matches(self, element):
return element.getIdAttr() == self.hashId
class CSSSelectorClassQualifier(CSSSelectorQualifierBase):
def __init__(self, classId):
self.classId = classId
def isClass(self):
return True
def __hash__(self):
return hash((self.classId,))
def asString(self):
return '.'+self.classId
def matches(self, element):
#return self.classId in element.getClassAttr().split()
attrValue = element.domElement.attributes.get('class')
if attrValue is not None:
return self.classId in attrValue.value.split()
else:
return False
class CSSSelectorAttributeQualifier(CSSSelectorQualifierBase):
name, op, value = None, None, NotImplemented
def __init__(self, attrName, op=None, attrValue=NotImplemented):
self.name = attrName
if op is not self.op:
self.op = op
if attrValue is not self.value:
self.value = attrValue
def isAttr(self):
return True
def __hash__(self):
return hash((self.name, self.op, self.value))
def asString(self):
if self.value is NotImplemented:
return '[%s]' % (self.name,)
else: return '[%s%s%s]' % (self.name, self.op, self.value)
def matches(self, element):
if self.op is None:
return element.getAttr(self.name, NotImplemented) != NotImplemented
elif self.op == '=':
return self.value == element.getAttr(self.name, NotImplemented)
elif self.op == '~=':
#return self.value in element.getAttr(self.name, '').split()
attrValue = element.domElement.attributes.get(self.name)
if attrValue is not None:
return self.value in attrValue.value.split()
else:
return False
elif self.op == '|=':
#return self.value in element.getAttr(self.name, '').split('-')
attrValue = element.domElement.attributes.get(self.name)
if attrValue is not None:
return self.value in attrValue.value.split('-')
else:
return False
else:
raise RuntimeError("Unknown operator %r for %r" % (self.op, self))
class CSSSelectorPseudoQualifier(CSSSelectorQualifierBase):
def __init__(self, attrName, params=()):
self.name = attrName
self.params = tuple(params)
def isPseudo(self):
return True
def __hash__(self):
return hash((self.name, self.params))
def asString(self):
if self.params:
return ':'+self.name
else:
return ':%s(%s)' % (self.name, self.params)
def matches(self, element):
return element.inPseudoState(self.name, self.params)
class CSSSelectorCombinationQualifier(CSSSelectorQualifierBase):
def __init__(self, op, selector):
self.op = op
self.selector = selector
def isCombiner(self):
return True
def __hash__(self):
return hash((self.op, self.selector))
def asImmutable(self):
return self.__class__(self.op, self.selector.asImmutable())
def asString(self):
return '%s%s' % (self.selector.asString(), self.op)
def matches(self, element):
if self.op == ' ':
if element is not None:
if element.matchesNode(self.selector.fullName):
try:
[ None for qualifier in self.selector.qualifiers if qualifier.matches(element) and stopIter(None) ]
except StopIteration:
return True
return False
elif self.op == '>':
if element is not None:
if element.matchesNode(self.selector.fullName):
if self.selector.qualifiers[0].matches(element):
return True
return False
elif self.op == '+':
return self.selector.matches(element.getPreviousSibling())
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ CSS Misc
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class CSSTerminalFunction(object):
def __init__(self, name, params):
self.name = name
self.params = params
def __repr__(self):
return '<CSS function: %s(%s)>' % (self.name, ', '.join(self.params))
class CSSTerminalOperator(tuple):
def __new__(klass, *args):
return tuple.__new__(klass, args)
def __repr__(self):
return 'op' + tuple.__repr__(self)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ CSS Objects
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class CSSDeclarations(dict):
pass
class CSSRuleset(dict):
def findCSSRulesFor(self, element, attrName):
ruleResults = [ (nodeFilter, declarations) for nodeFilter, declarations in self.iteritems() if (attrName in declarations) and (nodeFilter.matches(element)) ]
ruleResults.sort()
return ruleResults
def findCSSRuleFor(self, element, attrName):
try:
return [ None for nodeFilter, declarations in self.iteritems() if (attrName in declarations) and (nodeFilter.matches(element)) and stopIter((nodeFilter, declarations)) ]
except StopIteration, value:
return [value]
def mergeStyles(self, styles):
" XXX Bugfix for use in PISA "
for k, v in styles.items():
if self.has_key(k) and self[k]:
self[k] = copy.copy(self[k])
self[k].update(v)
else:
self[k] = v
class CSSInlineRuleset(CSSRuleset, CSSDeclarations):
def findCSSRulesFor(self, element, attrName):
if attrName in self:
return [(CSSInlineSelector(), self)]
else:
return []
def findCSSRuleFor(self, *args, **kw):
# rule is packed in a list to differentiate from "no rule" vs "rule
# whose value evalutates as False"
return self.findCSSRulesFor(*args, **kw)[-1:]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ CSS Builder
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class CSSBuilder(cssParser.CSSBuilderAbstract):
RulesetFactory = CSSRuleset
SelectorFactory = CSSMutableSelector
MediumSetFactory = set
DeclarationsFactory = CSSDeclarations
TermFunctionFactory = CSSTerminalFunction
TermOperatorFactory = CSSTerminalOperator
xmlnsSynonyms = {}
mediumSet = None
trackImportance = True
charset = None
def __init__(self, mediumSet=mediumSet, trackImportance=trackImportance):
self.setMediumSet(mediumSet)
self.setTrackImportance(trackImportance)
def isValidMedium(self, mediums):
if not mediums:
return False
if 'all' in mediums:
return True
mediums = self.MediumSetFactory(mediums)
return bool(self.getMediumSet().intersection(mediums))
def getMediumSet(self):
return self.mediumSet
def setMediumSet(self, mediumSet):
self.mediumSet = self.MediumSetFactory(mediumSet)
def updateMediumSet(self, mediumSet):
self.getMediumSet().update(mediumSet)
def getTrackImportance(self):
return self.trackImportance
def setTrackImportance(self, trackImportance=True):
self.trackImportance = trackImportance
#~ helpers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _pushState(self):
_restoreState = self.__dict__
self.__dict__ = self.__dict__.copy()
self._restoreState = _restoreState
self.namespaces = {}
def _popState(self):
self.__dict__ = self._restoreState
def _declarations(self, declarations, DeclarationsFactory=None):
DeclarationsFactory = DeclarationsFactory or self.DeclarationsFactory
if self.trackImportance:
normal, important = [], []
for d in declarations:
if d[-1]:
important.append(d[:-1])
else: normal.append(d[:-1])
return DeclarationsFactory(normal), DeclarationsFactory(important)
else:
return DeclarationsFactory(declarations)
def _xmlnsGetSynonym(self, uri):
# Don't forget to substitute our namespace synonyms!
return self.xmlnsSynonyms.get(uri or None, uri) or None
#~ css results ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def beginStylesheet(self):
self._pushState()
def endStylesheet(self):
self._popState()
def stylesheet(self, stylesheetElements, stylesheetImports):
# XXX Updated for PISA
if self.trackImportance:
normal, important = self.RulesetFactory(), self.RulesetFactory()
for normalStylesheet, importantStylesheet in stylesheetImports:
normal.mergeStyles(normalStylesheet)
important.mergeStyles(importantStylesheet)
for normalStyleElement, importantStyleElement in stylesheetElements:
normal.mergeStyles(normalStyleElement)
important.mergeStyles(importantStyleElement)
return normal, important
else:
result = self.RulesetFactory()
for stylesheet in stylesheetImports:
result.mergeStyles(stylesheet)
for styleElement in stylesheetElements:
result.mergeStyles(styleElement)
return result
def beginInline(self):
self._pushState()
def endInline(self):
self._popState()
def specialRules(self, declarations):
return cssSpecial.parseSpecialRules(declarations)
def inline(self, declarations):
declarations = self.specialRules(declarations)
return self._declarations(declarations, CSSInlineRuleset)
def ruleset(self, selectors, declarations):
# XXX Modified for pisa!
declarations = self.specialRules(declarations)
# XXX Modified for pisa!
if self.trackImportance:
normalDecl, importantDecl = self._declarations(declarations)
normal, important = self.RulesetFactory(), self.RulesetFactory()
for s in selectors:
s = s.asImmutable()
if normalDecl:
normal[s] = normalDecl
if importantDecl:
important[s] = importantDecl
return normal, important
else:
declarations = self._declarations(declarations)
result = [(s.asImmutable(), declarations) for s in selectors]
return self.RulesetFactory(result)
#~ css namespaces ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def resolveNamespacePrefix(self, nsPrefix, name):
if nsPrefix == '*':
return (nsPrefix, '*', name)
xmlns = self.namespaces.get(nsPrefix, None)
xmlns = self._xmlnsGetSynonym(xmlns)
return (nsPrefix, xmlns, name)
#~ css @ directives ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def atCharset(self, charset):
self.charset = charset
def atImport(self, import_, mediums, cssParser):
if self.isValidMedium(mediums):
return cssParser.parseExternal(import_)
return None
def atNamespace(self, nsprefix, uri):
self.namespaces[nsprefix] = uri
def atMedia(self, mediums, ruleset):
if self.isValidMedium(mediums):
return ruleset
return None
def atPage(self, page, pseudopage, declarations):
"""
This is overriden by xhtml2pdf.context.pisaCSSBuilder
"""
return self.ruleset([self.selector('*')], declarations)
def atFontFace(self, declarations):
"""
This is overriden by xhtml2pdf.context.pisaCSSBuilder
"""
return self.ruleset([self.selector('*')], declarations)
def atIdent(self, atIdent, cssParser, src):
return src, NotImplemented
#~ css selectors ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def selector(self, name):
return self.SelectorFactory(name)
def combineSelectors(self, selectorA, op, selectorB):
return self.SelectorFactory.combineSelectors(selectorA, op, selectorB)
#~ css declarations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def property(self, name, value, important=False):
if self.trackImportance:
return (name, value, important)
else:
return (name, value)
def combineTerms(self, termA, op, termB):
if op in (',', ' '):
if isinstance(termA, list):
termA.append(termB)
return termA
else:
return [termA, termB]
elif op is None and termB is None:
return [termA]
else:
if isinstance(termA, list):
# Bind these "closer" than the list operators -- i.e. work on
# the (recursively) last element of the list
termA[-1] = self.combineTerms(termA[-1], op, termB)
return termA
else:
return self.TermOperatorFactory(termA, op, termB)
def termIdent(self, value):
return value
def termNumber(self, value, units=None):
if units:
return value, units
else:
return value
def termRGB(self, value):
return value
def termURI(self, value):
return value
def termString(self, value):
return value
def termUnicodeRange(self, value):
return value
def termFunction(self, name, value):
return self.TermFunctionFactory(name, value)
def termUnknown(self, src):
return src, NotImplemented
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ CSS Parser -- finally!
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class CSSParser(cssParser.CSSParser):
CSSBuilderFactory = CSSBuilder
def __init__(self, cssBuilder=None, create=True, **kw):
if not cssBuilder and create:
assert cssBuilder is None
cssBuilder = self.createCSSBuilder(**kw)
cssParser.CSSParser.__init__(self, cssBuilder)
def createCSSBuilder(self, **kw):
return self.CSSBuilderFactory(**kw)
def parseExternal(self, cssResourceName):
if os.path.isfile(cssResourceName):
cssFile = file(cssResourceName, 'r')
return self.parseFile(cssFile, True)
else:
raise RuntimeError("Cannot resolve external CSS file: \"%s\"" % cssResourceName)
| |
# Copyright 2009 Paul J. Davis <paul.joseph.davis@gmail.com>
#
# This file is part of the pywebmachine package released
# under the MIT license.
import inspect
import os
import random
import re
import sys
import urlparse
from parser import RequestParser
dirname = os.path.dirname(__file__)
random.seed()
def uri(data):
ret = {"raw": data}
parts = urlparse.urlparse(data)
ret["scheme"] = parts.scheme or None
ret["host"] = parts.netloc.rsplit(":", 1)[0] or None
ret["port"] = parts.port or 80
if parts.path and parts.params:
ret["path"] = ";".join([parts.path, parts.params])
elif parts.path:
ret["path"] = parts.path
elif parts.params:
# Don't think this can happen
ret["path"] = ";" + parts.path
else:
ret["path"] = None
ret["query"] = parts.query or None
ret["fragment"] = parts.fragment or None
return ret
class request(object):
def __init__(self, name, expect):
self.name = name
self.fname = os.path.join(dirname, "data", "requests", name)
with open(self.fname) as handle:
self.data = handle.read()
self.data = self.data.replace("\n", "").replace("\\r\\n", "\r\n")
self.expect = expect
if not isinstance(self.expect, list):
self.expect = [self.expect]
# Functions for sending data to the parser.
# These functions mock out reading from a
# socket or other data source that might
# be used in real life.
def send_all(self):
yield self.data
def send_lines(self):
lines = self.data
pos = lines.find("\r\n")
while pos > 0:
yield lines[:pos+2]
lines = lines[pos+2:]
pos = lines.find("\r\n")
if len(lines):
yield lines
def send_bytes(self):
for d in self.data:
yield d
def send_random(self):
maxs = len(self.data) / 10
read = 0
while read < len(self.data):
chunk = random.randint(1, maxs)
yield self.data[read:read+chunk]
read += chunk
# These functions define the sizes that the
# read functions will read with.
def size_all(self):
return -1
def size_bytes(self):
return 1
def size_small_random(self):
return random.randint(0, 2)
def size_random(self):
return random.randint(1, 4096)
# Match a body against various ways of reading
# a message. Pass in the request, expected body
# and one of the size functions.
def szread(self, func, sizes):
sz = sizes()
data = func(sz)
if sz >= 0 and len(data) > sz:
raise AssertionError("Read more than %d bytes: %s" % (sz, data))
return data
def match_read(self, req, body, sizes):
data = self.szread(req.body.read, sizes)
count = 1000 # XXX old value
while len(body):
if body[:len(data)] != data:
raise AssertionError("Invalid body data read: %r != %r" % (
data, body[:len(data)]))
body = body[len(data):]
data = self.szread(req.body.read, sizes)
if not data:
count -= 1
if count <= 0:
raise AssertionError("Unexpected apparent EOF")
if len(body):
raise AssertionError("Failed to read entire body: %r" % body)
elif len(data):
raise AssertionError("Read beyond expected body: %r" % data)
data = req.body.read(sizes())
if data:
raise AssertionError("Read after body finished: %r" % data)
def match_readline(self, req, body, sizes):
data = self.szread(req.body.readline, sizes)
count = 1000
while len(body):
if body[:len(data)] != data:
raise AssertionError("Invalid data read: %r" % data)
if '\n' in data[:-1]:
raise AssertionError("Embedded new line: %r" % data)
body = body[len(data):]
data = self.szread(req.body.readline, sizes)
if not data:
count -= 1
if count <= 0:
raise AssertionError("Apparent unexpected EOF")
if len(body):
raise AssertionError("Failed to read entire body: %r" % body)
elif len(data):
raise AssertionError("Read beyond expected body: %r" % data)
data = req.body.readline(sizes())
if data:
raise AssertionError("Read data after body finished: %r" % data)
def match_readlines(self, req, body, sizes):
"""\
This skips the sizes checks as we don't implement it.
"""
data = req.body.readlines()
for line in data:
if '\n' in line[:-1]:
raise AssertionError("Embedded new line: %r" % line)
if line != body[:len(line)]:
raise AssertionError("Invalid body data read: %r != %r" % (
line, body[:len(line)]))
body = body[len(line):]
#if len(body):
#raise AssertionError("Failed to read entire body: %r" % body)
data = req.body.readlines(sizes())
if data:
raise AssertionError("Read data after body finished: %r" % data)
def match_iter(self, req, body, sizes):
"""\
This skips sizes because there's its not part of the iter api.
"""
for line in req.body:
if '\n' in line[:-1]:
raise AssertionError("Embedded new line: %r" % line)
if line != body[:len(line)]:
raise AssertionError("Invalid body data read: %r != %r" % (
line, body[:len(line)]))
body = body[len(line):]
#if len(body):
#raise AssertionError("Failed to read entire body: %r" % body)
try:
data = iter(req.body).next()
raise AssertionError("Read data after body finished: %r" % data)
except StopIteration:
pass
# Construct a series of test cases from the permutations of
# send, size, and match functions.
def gen_cases(self):
def get_funs(p):
return [v for k, v in inspect.getmembers(self) if k.startswith(p)]
senders = get_funs("send_")
sizers = get_funs("size_")
matchers = get_funs("match_")
cfgs = [
(mt, sz, sn)
for mt in matchers
for sz in sizers
for sn in senders
]
# Strip out match_readlines, match_iter for all but one sizer
cfgs = [
(mt, sz, sn)
for (mt, sz, sn) in cfgs
if mt in [self.match_readlines, self.match_iter]
and sz != self.size_all
or mt not in [self.match_readlines, self.match_iter]
]
ret = []
for (mt, sz, sn) in cfgs:
mtn = mt.func_name[6:]
szn = sz.func_name[5:]
snn = sn.func_name[5:]
def test_req(sn, sz, mt):
self.check(sn, sz, mt)
desc = "%s: MT: %s SZ: %s SN: %s" % (self.name, mtn, szn, snn)
test_req.description = desc
ret.append((test_req, sn, sz, mt))
return ret
def check(self, sender, sizer, matcher):
cases = self.expect[:]
ended = False
try:
p = RequestParser(sender())
except Exception, e:
if not isinstance(cases[0], Exception):
raise
self.same_error(e, cases[0])
eq(len(casese), 1)
while True:
try:
req = p.next()
except StopIteration, e:
eq(len(cases), 0)
ended = True
break
except Exception, e:
raise
if not isinstance(cases[0], Exception):
raise
self.same_error(e, cases.pop(0))
else:
self.same(req, sizer, matcher, cases.pop(0))
eq(len(cases), 0)
eq(ended, True)
def same(self, req, sizer, matcher, exp):
if isinstance(req, Exception):
self.same_error(req, exp)
else:
self.same_obj(req, sizer, matcher, exp)
def same_error(self, req, exp):
istype(req, Exception)
istype(exp, Exception)
istype(req, exp)
def same_obj(self, req, sizer, matcher, exp):
eq(req.method, exp["method"])
eq(req.uri, exp["uri"]["raw"])
eq(req.scheme, exp["uri"]["scheme"])
#eq(req.host, exp["uri"]["host"]) #XXX: not sure if "uri" should contain the host
eq(req.port, exp["uri"]["port"])
eq(req.path, exp["uri"]["path"])
eq(req.query, exp["uri"]["query"])
eq(req.fragment, exp["uri"]["fragment"])
eq(req.version, exp["version"])
eq(sorted(req.headers), sorted(exp["headers"]))
matcher(req, exp["body"], sizer)
#eq(req.body, exp["body"])
eq(req.trailers, exp.get("trailers", []))
def eq(a, b):
assert a == b, "%r != %r" % (a, b)
def ne(a, b):
assert a != b, "%r == %r" % (a, b)
def lt(a, b):
assert a < b, "%r >= %r" % (a, b)
def gt(a, b):
assert a > b, "%r <= %r" % (a, b)
def isin(a, b):
assert a in b, "%r is not in %r" % (a, b)
def isnotin(a, b):
assert a not in b, "%r is in %r" % (a, b)
def has(a, b):
assert hasattr(a, b), "%r has no attribute %r" % (a, b)
def hasnot(a, b):
assert not hasattr(a, b), "%r has an attribute %r" % (a, b)
def istype(a, b):
assert isinstance(a, b), "%r is not an instance of %r" % (a, b)
def raises(exctype, func, *args, **kwargs):
try:
func(*args, **kwargs)
except exctype, inst:
pass
else:
func_name = getattr(func, "func_name", "<builtin_function>")
fmt = "Function %s did not raise %s"
raise AssertionError(fmt % (func_name, exctype.__name__))
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import tensorflow as tf
from official.resnet import imagenet_main
tf.logging.set_verbosity(tf.logging.ERROR)
_BATCH_SIZE = 32
_LABEL_CLASSES = 1001
class BaseTest(tf.test.TestCase):
def tensor_shapes_helper(self, resnet_size, version, with_gpu=False):
"""Checks the tensor shapes after each phase of the ResNet model."""
def reshape(shape):
"""Returns the expected dimensions depending on if a
GPU is being used.
"""
# If a GPU is used for the test, the shape is returned (already in NCHW
# form). When GPU is not used, the shape is converted to NHWC.
if with_gpu:
return shape
return shape[0], shape[2], shape[3], shape[1]
graph = tf.Graph()
with graph.as_default(), self.test_session(
use_gpu=with_gpu, force_gpu=with_gpu):
model = imagenet_main.ImagenetModel(
resnet_size,
data_format='channels_first' if with_gpu else 'channels_last',
version=version)
inputs = tf.random_uniform([1, 224, 224, 3])
output = model(inputs, training=True)
initial_conv = graph.get_tensor_by_name('initial_conv:0')
max_pool = graph.get_tensor_by_name('initial_max_pool:0')
block_layer1 = graph.get_tensor_by_name('block_layer1:0')
block_layer2 = graph.get_tensor_by_name('block_layer2:0')
block_layer3 = graph.get_tensor_by_name('block_layer3:0')
block_layer4 = graph.get_tensor_by_name('block_layer4:0')
avg_pool = graph.get_tensor_by_name('final_avg_pool:0')
dense = graph.get_tensor_by_name('final_dense:0')
self.assertAllEqual(initial_conv.shape, reshape((1, 64, 112, 112)))
self.assertAllEqual(max_pool.shape, reshape((1, 64, 56, 56)))
# The number of channels after each block depends on whether we're
# using the building_block or the bottleneck_block.
if resnet_size < 50:
self.assertAllEqual(block_layer1.shape, reshape((1, 64, 56, 56)))
self.assertAllEqual(block_layer2.shape, reshape((1, 128, 28, 28)))
self.assertAllEqual(block_layer3.shape, reshape((1, 256, 14, 14)))
self.assertAllEqual(block_layer4.shape, reshape((1, 512, 7, 7)))
self.assertAllEqual(avg_pool.shape, reshape((1, 512, 1, 1)))
else:
self.assertAllEqual(block_layer1.shape, reshape((1, 256, 56, 56)))
self.assertAllEqual(block_layer2.shape, reshape((1, 512, 28, 28)))
self.assertAllEqual(block_layer3.shape, reshape((1, 1024, 14, 14)))
self.assertAllEqual(block_layer4.shape, reshape((1, 2048, 7, 7)))
self.assertAllEqual(avg_pool.shape, reshape((1, 2048, 1, 1)))
self.assertAllEqual(dense.shape, (1, _LABEL_CLASSES))
self.assertAllEqual(output.shape, (1, _LABEL_CLASSES))
def test_tensor_shapes_resnet_18_v1(self):
self.tensor_shapes_helper(18, version=1)
def test_tensor_shapes_resnet_18_v2(self):
self.tensor_shapes_helper(18, version=2)
def test_tensor_shapes_resnet_34_v1(self):
self.tensor_shapes_helper(34, version=1)
def test_tensor_shapes_resnet_34_v2(self):
self.tensor_shapes_helper(34, version=2)
def test_tensor_shapes_resnet_50_v1(self):
self.tensor_shapes_helper(50, version=1)
def test_tensor_shapes_resnet_50_v2(self):
self.tensor_shapes_helper(50, version=2)
def test_tensor_shapes_resnet_101_v1(self):
self.tensor_shapes_helper(101, version=1)
def test_tensor_shapes_resnet_101_v2(self):
self.tensor_shapes_helper(101, version=2)
def test_tensor_shapes_resnet_152_v1(self):
self.tensor_shapes_helper(152, version=1)
def test_tensor_shapes_resnet_152_v2(self):
self.tensor_shapes_helper(152, version=2)
def test_tensor_shapes_resnet_200_v1(self):
self.tensor_shapes_helper(200, version=1)
def test_tensor_shapes_resnet_200_v2(self):
self.tensor_shapes_helper(200, version=2)
@unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')
def test_tensor_shapes_resnet_18_with_gpu_v1(self):
self.tensor_shapes_helper(18, version=1, with_gpu=True)
@unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')
def test_tensor_shapes_resnet_18_with_gpu_v2(self):
self.tensor_shapes_helper(18, version=2, with_gpu=True)
@unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')
def test_tensor_shapes_resnet_34_with_gpu_v1(self):
self.tensor_shapes_helper(34, version=1, with_gpu=True)
@unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')
def test_tensor_shapes_resnet_34_with_gpu_v2(self):
self.tensor_shapes_helper(34, version=2, with_gpu=True)
@unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')
def test_tensor_shapes_resnet_50_with_gpu_v1(self):
self.tensor_shapes_helper(50, version=1, with_gpu=True)
@unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')
def test_tensor_shapes_resnet_50_with_gpu_v2(self):
self.tensor_shapes_helper(50, version=2, with_gpu=True)
@unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')
def test_tensor_shapes_resnet_101_with_gpu_v1(self):
self.tensor_shapes_helper(101, version=1, with_gpu=True)
@unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')
def test_tensor_shapes_resnet_101_with_gpu_v2(self):
self.tensor_shapes_helper(101, version=2, with_gpu=True)
@unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')
def test_tensor_shapes_resnet_152_with_gpu_v1(self):
self.tensor_shapes_helper(152, version=1, with_gpu=True)
@unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')
def test_tensor_shapes_resnet_152_with_gpu_v2(self):
self.tensor_shapes_helper(152, version=2, with_gpu=True)
@unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')
def test_tensor_shapes_resnet_200_with_gpu_v1(self):
self.tensor_shapes_helper(200, version=1, with_gpu=True)
@unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')
def test_tensor_shapes_resnet_200_with_gpu_v2(self):
self.tensor_shapes_helper(200, version=2, with_gpu=True)
def resnet_model_fn_helper(self, mode, version, multi_gpu=False):
"""Tests that the EstimatorSpec is given the appropriate arguments."""
tf.train.create_global_step()
input_fn = imagenet_main.get_synth_input_fn()
dataset = input_fn(True, '', _BATCH_SIZE)
iterator = dataset.make_one_shot_iterator()
features, labels = iterator.get_next()
spec = imagenet_main.imagenet_model_fn(
features, labels, mode, {
'resnet_size': 50,
'data_format': 'channels_last',
'batch_size': _BATCH_SIZE,
'version': version,
'multi_gpu': multi_gpu,
})
predictions = spec.predictions
self.assertAllEqual(predictions['probabilities'].shape,
(_BATCH_SIZE, _LABEL_CLASSES))
self.assertEqual(predictions['probabilities'].dtype, tf.float32)
self.assertAllEqual(predictions['classes'].shape, (_BATCH_SIZE,))
self.assertEqual(predictions['classes'].dtype, tf.int64)
if mode != tf.estimator.ModeKeys.PREDICT:
loss = spec.loss
self.assertAllEqual(loss.shape, ())
self.assertEqual(loss.dtype, tf.float32)
if mode == tf.estimator.ModeKeys.EVAL:
eval_metric_ops = spec.eval_metric_ops
self.assertAllEqual(eval_metric_ops['accuracy'][0].shape, ())
self.assertAllEqual(eval_metric_ops['accuracy'][1].shape, ())
self.assertEqual(eval_metric_ops['accuracy'][0].dtype, tf.float32)
self.assertEqual(eval_metric_ops['accuracy'][1].dtype, tf.float32)
def test_resnet_model_fn_train_mode_v1(self):
self.resnet_model_fn_helper(tf.estimator.ModeKeys.TRAIN, version=1)
def test_resnet_model_fn_train_mode_v2(self):
self.resnet_model_fn_helper(tf.estimator.ModeKeys.TRAIN, version=2)
def test_resnet_model_fn_train_mode_multi_gpu_v1(self):
self.resnet_model_fn_helper(tf.estimator.ModeKeys.TRAIN, version=1,
multi_gpu=True)
def test_resnet_model_fn_train_mode_multi_gpu_v2(self):
self.resnet_model_fn_helper(tf.estimator.ModeKeys.TRAIN, version=2,
multi_gpu=True)
def test_resnet_model_fn_eval_mode_v1(self):
self.resnet_model_fn_helper(tf.estimator.ModeKeys.EVAL, version=1)
def test_resnet_model_fn_eval_mode_v2(self):
self.resnet_model_fn_helper(tf.estimator.ModeKeys.EVAL, version=2)
def test_resnet_model_fn_predict_mode_v1(self):
self.resnet_model_fn_helper(tf.estimator.ModeKeys.PREDICT, version=1)
def test_resnet_model_fn_predict_mode_v2(self):
self.resnet_model_fn_helper(tf.estimator.ModeKeys.PREDICT, version=2)
def test_imagenetmodel_shape(self):
batch_size = 135
num_classes = 246
for version in (1, 2):
model = imagenet_main.ImagenetModel(50, data_format='channels_last',
num_classes=num_classes, version=version)
fake_input = tf.random_uniform([batch_size, 224, 224, 3])
output = model(fake_input, training=True)
self.assertAllEqual(output.shape, (batch_size, num_classes))
if __name__ == '__main__':
tf.test.main()
| |
import os
import re
from ctypes import c_char_p
from django.core.validators import ipv4_re
from django.contrib.gis.geoip.libgeoip import GEOIP_SETTINGS
from django.contrib.gis.geoip.prototypes import (
GeoIP_open, GeoIP_delete, GeoIP_database_info,
GeoIP_lib_version, GeoIP_record_by_addr, GeoIP_record_by_name,
GeoIP_country_code_by_addr, GeoIP_country_code_by_name,
GeoIP_country_name_by_addr, GeoIP_country_name_by_name)
from django.utils import six
from django.utils.encoding import force_bytes
# Regular expressions for recognizing the GeoIP free database editions.
free_regex = re.compile(r'^GEO-\d{3}FREE')
lite_regex = re.compile(r'^GEO-\d{3}LITE')
#### GeoIP classes ####
class GeoIPException(Exception):
pass
class GeoIP(object):
# The flags for GeoIP memory caching.
# GEOIP_STANDARD - read database from filesystem, uses least memory.
#
# GEOIP_MEMORY_CACHE - load database into memory, faster performance
# but uses more memory
#
# GEOIP_CHECK_CACHE - check for updated database. If database has been
# updated, reload filehandle and/or memory cache. This option
# is not thread safe.
#
# GEOIP_INDEX_CACHE - just cache the most frequently accessed index
# portion of the database, resulting in faster lookups than
# GEOIP_STANDARD, but less memory usage than GEOIP_MEMORY_CACHE -
# useful for larger databases such as GeoIP Organization and
# GeoIP City. Note, for GeoIP Country, Region and Netspeed
# databases, GEOIP_INDEX_CACHE is equivalent to GEOIP_MEMORY_CACHE
#
# GEOIP_MMAP_CACHE - load database into mmap shared memory ( not available
# on Windows).
GEOIP_STANDARD = 0
GEOIP_MEMORY_CACHE = 1
GEOIP_CHECK_CACHE = 2
GEOIP_INDEX_CACHE = 4
GEOIP_MMAP_CACHE = 8
cache_options = dict((opt, None) for opt in (0, 1, 2, 4, 8))
# Paths to the city & country binary databases.
_city_file = ''
_country_file = ''
# Initially, pointers to GeoIP file references are NULL.
_city = None
_country = None
def __init__(self, path=None, cache=0, country=None, city=None):
"""
Initializes the GeoIP object, no parameters are required to use default
settings. Keyword arguments may be passed in to customize the locations
of the GeoIP data sets.
* path: Base directory to where GeoIP data is located or the full path
to where the city or country data files (*.dat) are located.
Assumes that both the city and country data sets are located in
this directory; overrides the GEOIP_PATH settings attribute.
* cache: The cache settings when opening up the GeoIP datasets,
and may be an integer in (0, 1, 2, 4, 8) corresponding to
the GEOIP_STANDARD, GEOIP_MEMORY_CACHE, GEOIP_CHECK_CACHE,
GEOIP_INDEX_CACHE, and GEOIP_MMAP_CACHE, `GeoIPOptions` C API
settings, respectively. Defaults to 0, meaning that the data is read
from the disk.
* country: The name of the GeoIP country data file. Defaults to
'GeoIP.dat'; overrides the GEOIP_COUNTRY settings attribute.
* city: The name of the GeoIP city data file. Defaults to
'GeoLiteCity.dat'; overrides the GEOIP_CITY settings attribute.
"""
# Checking the given cache option.
if cache in self.cache_options:
self._cache = cache
else:
raise GeoIPException('Invalid GeoIP caching option: %s' % cache)
# Getting the GeoIP data path.
if not path:
path = GEOIP_SETTINGS.get('GEOIP_PATH', None)
if not path:
raise GeoIPException('GeoIP path must be provided via parameter or the GEOIP_PATH setting.')
if not isinstance(path, six.string_types):
raise TypeError('Invalid path type: %s' % type(path).__name__)
if os.path.isdir(path):
# Constructing the GeoIP database filenames using the settings
# dictionary. If the database files for the GeoLite country
# and/or city datasets exist, then try and open them.
country_db = os.path.join(path, country or GEOIP_SETTINGS.get('GEOIP_COUNTRY', 'GeoIP.dat'))
if os.path.isfile(country_db):
self._country = GeoIP_open(force_bytes(country_db), cache)
self._country_file = country_db
city_db = os.path.join(path, city or GEOIP_SETTINGS.get('GEOIP_CITY', 'GeoLiteCity.dat'))
if os.path.isfile(city_db):
self._city = GeoIP_open(force_bytes(city_db), cache)
self._city_file = city_db
elif os.path.isfile(path):
# Otherwise, some detective work will be needed to figure
# out whether the given database path is for the GeoIP country
# or city databases.
ptr = GeoIP_open(force_bytes(path), cache)
info = GeoIP_database_info(ptr)
if lite_regex.match(info):
# GeoLite City database detected.
self._city = ptr
self._city_file = path
elif free_regex.match(info):
# GeoIP Country database detected.
self._country = ptr
self._country_file = path
else:
raise GeoIPException('Unable to recognize database edition: %s' % info)
else:
raise GeoIPException('GeoIP path must be a valid file or directory.')
def __del__(self):
# Cleaning any GeoIP file handles lying around.
if GeoIP_delete is None:
return
if self._country:
GeoIP_delete(self._country)
if self._city:
GeoIP_delete(self._city)
def _check_query(self, query, country=False, city=False, city_or_country=False):
"Helper routine for checking the query and database availability."
# Making sure a string was passed in for the query.
if not isinstance(query, six.string_types):
raise TypeError('GeoIP query must be a string, not type %s' % type(query).__name__)
# Extra checks for the existence of country and city databases.
if city_or_country and not (self._country or self._city):
raise GeoIPException('Invalid GeoIP country and city data files.')
elif country and not self._country:
raise GeoIPException('Invalid GeoIP country data file: %s' % self._country_file)
elif city and not self._city:
raise GeoIPException('Invalid GeoIP city data file: %s' % self._city_file)
# Return the query string back to the caller. GeoIP only takes bytestrings.
return force_bytes(query)
def city(self, query):
"""
Returns a dictionary of city information for the given IP address or
Fully Qualified Domain Name (FQDN). Some information in the dictionary
may be undefined (None).
"""
enc_query = self._check_query(query, city=True)
if ipv4_re.match(query):
# If an IP address was passed in
return GeoIP_record_by_addr(self._city, c_char_p(enc_query))
else:
# If a FQDN was passed in.
return GeoIP_record_by_name(self._city, c_char_p(enc_query))
def country_code(self, query):
"Returns the country code for the given IP Address or FQDN."
enc_query = self._check_query(query, city_or_country=True)
if self._country:
if ipv4_re.match(query):
return GeoIP_country_code_by_addr(self._country, enc_query)
else:
return GeoIP_country_code_by_name(self._country, enc_query)
else:
return self.city(query)['country_code']
def country_name(self, query):
"Returns the country name for the given IP Address or FQDN."
enc_query = self._check_query(query, city_or_country=True)
if self._country:
if ipv4_re.match(query):
return GeoIP_country_name_by_addr(self._country, enc_query)
else:
return GeoIP_country_name_by_name(self._country, enc_query)
else:
return self.city(query)['country_name']
def country(self, query):
"""
Returns a dictionary with the country code and name when given an
IP address or a Fully Qualified Domain Name (FQDN). For example, both
'24.124.1.80' and 'djangoproject.com' are valid parameters.
"""
# Returning the country code and name
return {'country_code': self.country_code(query),
'country_name': self.country_name(query),
}
#### Coordinate retrieval routines ####
def coords(self, query, ordering=('longitude', 'latitude')):
cdict = self.city(query)
if cdict is None:
return None
else:
return tuple(cdict[o] for o in ordering)
def lon_lat(self, query):
"Returns a tuple of the (longitude, latitude) for the given query."
return self.coords(query)
def lat_lon(self, query):
"Returns a tuple of the (latitude, longitude) for the given query."
return self.coords(query, ('latitude', 'longitude'))
def geos(self, query):
"Returns a GEOS Point object for the given query."
ll = self.lon_lat(query)
if ll:
from django.contrib.gis.geos import Point
return Point(ll, srid=4326)
else:
return None
#### GeoIP Database Information Routines ####
@property
def country_info(self):
"Returns information about the GeoIP country database."
if self._country is None:
ci = 'No GeoIP Country data in "%s"' % self._country_file
else:
ci = GeoIP_database_info(self._country)
return ci
@property
def city_info(self):
"Returns information about the GeoIP city database."
if self._city is None:
ci = 'No GeoIP City data in "%s"' % self._city_file
else:
ci = GeoIP_database_info(self._city)
return ci
@property
def info(self):
"Returns information about the GeoIP library and databases in use."
info = ''
if GeoIP_lib_version:
info += 'GeoIP Library:\n\t%s\n' % GeoIP_lib_version()
return info + 'Country:\n\t%s\nCity:\n\t%s' % (self.country_info, self.city_info)
#### Methods for compatibility w/the GeoIP-Python API. ####
@classmethod
def open(cls, full_path, cache):
return GeoIP(full_path, cache)
def _rec_by_arg(self, arg):
if self._city:
return self.city(arg)
else:
return self.country(arg)
region_by_addr = city
region_by_name = city
record_by_addr = _rec_by_arg
record_by_name = _rec_by_arg
country_code_by_addr = country_code
country_code_by_name = country_code
country_name_by_addr = country_name
country_name_by_name = country_name
| |
# Licensed under a 3-clause BSD style license - see PYFITS.rst
import os
import pathlib
import warnings
import io
import pytest
import numpy as np
from numpy.testing import assert_array_equal
from astropy.io import fits
from astropy import units as u
from astropy.table import Table
from astropy.io.fits import printdiff
from astropy.io.fits.connect import REMOVE_KEYWORDS
from astropy.utils.exceptions import AstropyUserWarning
from . import FitsTestCase
class TestConvenience(FitsTestCase):
def test_resource_warning(self):
warnings.simplefilter('always', ResourceWarning)
_ = fits.getdata(self.data('test0.fits'))
_ = fits.getheader(self.data('test0.fits'))
def test_fileobj_not_closed(self):
"""
Tests that file-like objects are not closed after being passed
to convenience functions.
Regression test for https://github.com/astropy/astropy/issues/5063
"""
f = open(self.data('test0.fits'), 'rb')
_ = fits.getdata(f)
assert not f.closed
f.seek(0)
_ = fits.getheader(f)
assert not f.closed
f.close() # Close it now
def test_table_to_hdu(self):
table = Table([[1, 2, 3], ['a', 'b', 'c'], [2.3, 4.5, 6.7]],
names=['a', 'b', 'c'], dtype=['i', 'U1', 'f'])
table['a'].unit = 'm/s'
table['b'].unit = 'not-a-unit'
with pytest.warns(u.UnitsWarning, match="'not-a-unit' did not parse as"
" fits unit") as w:
hdu = fits.table_to_hdu(table)
assert len(w) == 1
# Check that TUNITn cards appear in the correct order
# (https://github.com/astropy/astropy/pull/5720)
assert hdu.header.index('TUNIT1') < hdu.header.index('TTYPE2')
assert isinstance(hdu, fits.BinTableHDU)
filename = self.temp('test_table_to_hdu.fits')
hdu.writeto(filename, overwrite=True)
def test_masked_table_to_hdu(self):
i = np.ma.MaskedArray([1, 2, 3], mask=[True, False, False])
s = np.ma.MaskedArray(['a', 'b', 'c'], mask=[False, True, True])
c = np.ma.MaskedArray([2.3+1j, 4.5+0j, 6.7-1j], mask=[True, False, True])
f = np.ma.MaskedArray([2.3, 4.5, 6.7], mask=[True, False, True])
table = Table([i, s, c, f], names=['i', 's', 'c', 'f'])
# Check that FITS standard is used in replacing masked values.
hdu = fits.table_to_hdu(table)
assert isinstance(hdu, fits.BinTableHDU)
assert hdu.header['TNULL1'] == i.fill_value
assert_array_equal(hdu.data['i'], i.filled())
assert_array_equal(hdu.data['s'], s.filled(''))
assert_array_equal(hdu.data['c'], c.filled(np.nan))
assert_array_equal(hdu.data['c'].real, c.real.filled(np.nan))
assert_array_equal(hdu.data['c'].imag, c.imag.filled(np.nan))
assert_array_equal(hdu.data['c'], c.filled(complex(np.nan, np.nan)))
assert_array_equal(hdu.data['f'], f.filled(np.nan))
filename = self.temp('test_table_to_hdu.fits')
hdu.writeto(filename, overwrite=True)
def test_table_non_stringifyable_unit_to_hdu(self):
table = Table([[1, 2, 3], ['a', 'b', 'c'], [2.3, 4.5, 6.7]],
names=['a', 'b', 'c'], dtype=['i', 'U1', 'f'])
table['a'].unit = u.core.IrreducibleUnit("test")
with pytest.warns(AstropyUserWarning, match="The unit 'test' could not be saved") as w:
fits.table_to_hdu(table)
assert len(w) == 1
def test_table_to_hdu_convert_comment_convention(self):
"""
Regression test for https://github.com/astropy/astropy/issues/6079
"""
table = Table([[1, 2, 3], ['a', 'b', 'c'], [2.3, 4.5, 6.7]],
names=['a', 'b', 'c'], dtype=['i', 'U1', 'f'])
table.meta['comments'] = ['This', 'is', 'a', 'comment']
hdu = fits.table_to_hdu(table)
assert hdu.header.get('comment') == ['This', 'is', 'a', 'comment']
with pytest.raises(ValueError):
hdu.header.index('comments')
def test_table_to_hdu_filter_reserved(self):
"""
Regression test for https://github.com/astropy/astropy/issues/9387
"""
diag = 'be ignored since it conflicts with a FITS reserved keyword'
ins_cards = {'EXPTIME': 32.1, 'XTENSION': 'NEWTABLE',
'NAXIS': 1, 'NAXIS1': 3, 'NAXIS2': 9,
'PCOUNT': 42, 'OBSERVER': 'Adams'}
table = Table([[1, 2, 3], ['a', 'b', 'c'], [2.3, 4.5, 6.7]],
names=['a', 'b', 'c'], dtype=['i4', 'U1', 'f8'])
table.meta.update(ins_cards)
with pytest.warns(AstropyUserWarning,
match=rf'Meta-data keyword \w+ will {diag}') as w:
hdu = fits.table_to_hdu(table)
# This relies on the warnings being raised in the order of the
# meta dict (note that the first and last card are legitimate keys)
assert len(w) == len(ins_cards) - 2
for i, key in enumerate(list(ins_cards)[1:-1]):
assert f'Meta-data keyword {key}' in str(w[i].message)
assert hdu.header.get('XTENSION') == 'BINTABLE'
assert hdu.header.get('NAXIS') == 2
assert hdu.header.get('NAXIS1') == 13
assert hdu.header.get('NAXIS2') == 3
assert hdu.header.get('PCOUNT') == 0
np.testing.assert_almost_equal(hdu.header.get('EXPTIME'), 3.21e1)
@pytest.mark.parametrize('card', REMOVE_KEYWORDS)
def test_table_to_hdu_warn_reserved(self, card):
"""
Test warning for each keyword in ..connect.REMOVE_KEYWORDS, 1 by 1
"""
diag = 'be ignored since it conflicts with a FITS reserved keyword'
res_cards = {'XTENSION': 'BINTABLE', 'BITPIX': 8,
'NAXIS': 2, 'NAXIS1': 12, 'NAXIS2': 3,
'PCOUNT': 0, 'GCOUNT': 1, 'TFIELDS': 2, 'THEAP': None}
ins_cards = {'XTENSION': 'TABLE', 'BITPIX': 16,
'NAXIS': 1, 'NAXIS1': 2, 'NAXIS2': 6,
'PCOUNT': 2, 'GCOUNT': 2, 'TFIELDS': 4, 'THEAP': 36}
table = Table([[1.0, 2.0, 3.0], [2.3, 4.5, 6.7]],
names=['wavelength', 'flux'], dtype=['f8', 'f4'])
table.meta['ORIGIN'] = 'Min.Silly Walks'
table.meta[card] = ins_cards[card]
assert table.meta.get(card) != res_cards[card]
with pytest.warns(AstropyUserWarning,
match=f'Meta-data keyword {card} will {diag}'):
hdu = fits.table_to_hdu(table)
assert hdu.header.get(card) == res_cards[card]
assert hdu.header.get('ORIGIN') == 'Min.Silly Walks'
def test_table_to_hdu_filter_incompatible(self):
"""
Test removal of unsupported data types from header
"""
table = Table([[1, 2, 3], ['a', 'b', 'c'], [2.3, 4.5, 6.7]],
names=['a', 'b', 'c'], dtype=['i4', 'U1', 'f8'])
table.meta.update({'OBSDATE': '2001-05-26', 'RAMP': np.arange(5),
'TARGETS': {'PRIMARY': 1, 'SECONDAR': 3}})
with pytest.warns(AstropyUserWarning, match=r'Attribute \S+ of type '
r'.+ cannot be added to FITS Header - skipping'):
hdu = fits.table_to_hdu(table)
assert hdu.header.get('OBSDATE') == '2001-05-26'
assert 'RAMP' not in hdu.header
assert 'TARGETS' not in hdu.header
def test_table_writeto_header(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5988
"""
data = np.zeros((5, ), dtype=[('x', float), ('y', int)])
h_in = fits.Header()
h_in['ANSWER'] = (42.0, 'LTU&E')
filename = self.temp('tabhdr42.fits')
fits.writeto(filename, data=data, header=h_in, overwrite=True)
h_out = fits.getheader(filename, ext=1)
assert h_out['ANSWER'] == 42
def test_image_extension_update_header(self):
"""
Test that _makehdu correctly includes the header. For example in the
fits.update convenience function.
"""
filename = self.temp('twoextension.fits')
hdus = [fits.PrimaryHDU(np.zeros((10, 10))),
fits.ImageHDU(np.zeros((10, 10)))]
fits.HDUList(hdus).writeto(filename)
fits.update(filename,
np.zeros((10, 10)),
header=fits.Header([('WHAT', 100)]),
ext=1)
h_out = fits.getheader(filename, ext=1)
assert h_out['WHAT'] == 100
def test_printdiff(self):
"""
Test that FITSDiff can run the different inputs without crashing.
"""
# Testing different string input options
assert printdiff(self.data('arange.fits'),
self.data('blank.fits')) is None
assert printdiff(self.data('arange.fits'),
self.data('blank.fits'), ext=0) is None
assert printdiff(self.data('o4sp040b0_raw.fits'),
self.data('o4sp040b0_raw.fits'),
extname='sci') is None
# This may seem weird, but check printdiff to see, need to test
# incorrect second file
with pytest.raises(OSError):
printdiff('o4sp040b0_raw.fits', 'fakefile.fits', extname='sci')
# Test HDU object inputs
with fits.open(self.data('stddata.fits'), mode='readonly') as in1:
with fits.open(self.data('checksum.fits'), mode='readonly') as in2:
assert printdiff(in1[0], in2[0]) is None
with pytest.raises(ValueError):
printdiff(in1[0], in2[0], ext=0)
assert printdiff(in1, in2) is None
with pytest.raises(NotImplementedError):
printdiff(in1, in2, 0)
def test_tabledump(self):
"""
Regression test for https://github.com/astropy/astropy/issues/6937
"""
# copy fits file to the temp directory
self.copy_file('tb.fits')
# test without datafile
fits.tabledump(self.temp('tb.fits'))
assert os.path.isfile(self.temp('tb_1.txt'))
# test with datafile
fits.tabledump(self.temp('tb.fits'), datafile=self.temp('test_tb.txt'))
assert os.path.isfile(self.temp('test_tb.txt'))
def test_append_filename(self):
"""
Test fits.append with a filename argument.
"""
data = np.arange(6)
testfile = self.temp('test_append_1.fits')
# Test case 1: creation of file
fits.append(testfile, data=data, checksum=True)
# Test case 2: append to existing file, with verify=True
# Also test that additional keyword can be passed to fitsopen
fits.append(testfile, data=data * 2, checksum=True, ignore_blank=True)
# Test case 3: append to existing file, with verify=False
fits.append(testfile, data=data * 3, checksum=True, verify=False)
with fits.open(testfile, checksum=True) as hdu1:
np.testing.assert_array_equal(hdu1[0].data, data)
np.testing.assert_array_equal(hdu1[1].data, data * 2)
np.testing.assert_array_equal(hdu1[2].data, data * 3)
@pytest.mark.parametrize('mode', ['wb', 'wb+', 'ab', 'ab+'])
def test_append_filehandle(self, tmpdir, mode):
"""
Test fits.append with a file handle argument.
"""
append_file = tmpdir.join('append.fits')
with append_file.open(mode) as handle:
fits.append(filename=handle, data=np.ones((4, 4)))
def test_append_with_header(self):
"""
Test fits.append with a fits Header, which triggers detection of the
HDU class. Regression test for
https://github.com/astropy/astropy/issues/8660
"""
testfile = self.temp('test_append_1.fits')
with fits.open(self.data('test0.fits')) as hdus:
for hdu in hdus:
fits.append(testfile, hdu.data, hdu.header, checksum=True)
with fits.open(testfile, checksum=True) as hdus:
assert len(hdus) == 5
def test_pathlib(self):
testfile = pathlib.Path(self.temp('test.fits'))
data = np.arange(10)
hdulist = fits.HDUList([fits.PrimaryHDU(data)])
hdulist.writeto(testfile)
with fits.open(testfile) as hdul:
np.testing.assert_array_equal(hdul[0].data, data)
def test_getdata_ext_given(self):
prihdu = fits.PrimaryHDU(data=np.zeros((5, 5), dtype=int))
exthdu1 = fits.ImageHDU(data=np.ones((5, 5), dtype=int))
exthdu2 = fits.ImageHDU(data=2 * np.ones((5, 5), dtype=int))
hdulist = fits.HDUList([prihdu, exthdu1, exthdu2])
buf = io.BytesIO()
hdulist.writeto(buf)
for ext in [0, 1, 2]:
buf.seek(0)
data = fits.getdata(buf, ext=ext)
assert data[0, 0] == ext
def test_getdata_ext_given_nodata(self):
prihdu = fits.PrimaryHDU(data=np.zeros((5, 5), dtype=int))
exthdu1 = fits.ImageHDU(data=np.ones((5, 5), dtype=int))
exthdu2 = fits.ImageHDU(data=None)
hdulist = fits.HDUList([prihdu, exthdu1, exthdu2])
buf = io.BytesIO()
hdulist.writeto(buf)
buf.seek(0)
with pytest.raises(IndexError, match="No data in HDU #2."):
fits.getdata(buf, ext=2)
def test_getdata_ext_not_given_with_data_in_primary(self):
prihdu = fits.PrimaryHDU(data=np.zeros((5, 5), dtype=int))
exthdu1 = fits.ImageHDU(data=None)
exthdu2 = fits.ImageHDU(data=None)
hdulist = fits.HDUList([prihdu, exthdu1, exthdu2])
buf = io.BytesIO()
hdulist.writeto(buf)
buf.seek(0)
data = fits.getdata(buf)
assert data[0, 0] == 0
def test_getdata_ext_not_given_with_data_in_ext(self):
# tests fallback mechanism
prihdu = fits.PrimaryHDU(data=None)
exthdu1 = fits.ImageHDU(data=np.ones((5, 5), dtype=int))
exthdu2 = fits.ImageHDU(data=None)
hdulist = fits.HDUList([prihdu, exthdu1, exthdu2])
buf = io.BytesIO()
hdulist.writeto(buf)
buf.seek(0)
data = fits.getdata(buf)
assert data[0, 0] == 1
def test_getdata_ext_not_given_nodata_any(self):
# tests exception raised when there is no data in either
# Primary HDU or first extension HDU
prihdu = fits.PrimaryHDU(data=None)
exthdu1 = fits.ImageHDU(data=None)
exthdu2 = fits.ImageHDU(data=np.ones((5, 5), dtype=int))
hdulist = fits.HDUList([prihdu, exthdu1, exthdu2])
buf = io.BytesIO()
hdulist.writeto(buf)
buf.seek(0)
with pytest.raises(
IndexError,
match="No data in either Primary or first extension HDUs."
):
fits.getdata(buf)
def test_getdata_ext_not_given_nodata_noext(self):
# tests exception raised when there is no data in the
# Primary HDU and there are no extension HDUs
prihdu = fits.PrimaryHDU(data=None)
hdulist = fits.HDUList([prihdu])
buf = io.BytesIO()
hdulist.writeto(buf)
buf.seek(0)
with pytest.raises(
IndexError,
match="No data in Primary HDU and no extension HDU found."
):
fits.getdata(buf)
| |
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import ast
import inspect
import itertools
import sys
import typing
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Callable, Dict, List, Optional, Tuple, Type, get_type_hints
from pants.engine.goal import Goal
from pants.engine.objects import union
from pants.engine.selectors import Get
from pants.option.optionable import OptionableFactory
from pants.util.collections import assert_single_element
from pants.util.memo import memoized
from pants.util.meta import frozen_after_init
from pants.util.ordered_set import FrozenOrderedSet, OrderedSet
def side_effecting(cls):
"""Annotates a class to indicate that it is a side-effecting type, which needs to be handled
specially with respect to rule caching semantics."""
cls.__side_effecting = True
return cls
class _RuleVisitor(ast.NodeVisitor):
"""Pull `Get` calls out of an @rule body."""
def __init__(self):
super().__init__()
self._gets: List[Get] = []
@property
def gets(self) -> List[Get]:
return self._gets
def _matches_get_name(self, node: ast.AST) -> bool:
"""Check if the node is a Name which matches 'Get'."""
return isinstance(node, ast.Name) and node.id == Get.__name__
def _is_get(self, node: ast.AST) -> bool:
"""Check if the node looks like a Get(...) or Get[X](...) call."""
if isinstance(node, ast.Call):
if self._matches_get_name(node.func):
return True
if isinstance(node.func, ast.Subscript) and self._matches_get_name(node.func.value):
return True
return False
return False
def visit_Call(self, node: ast.Call) -> None:
if self._is_get(node):
self._gets.append(Get.extract_constraints(node))
# Ensure we descend into e.g. MultiGet(Get(...)...) calls.
self.generic_visit(node)
@memoized
def subsystem_rule(optionable_factory: Type[OptionableFactory]) -> "TaskRule":
"""Returns a TaskRule that constructs an instance of the subsystem.
TODO: This API is slightly awkward for two reasons:
1) We should consider whether Subsystems/Optionables should be constructed explicitly using
`@rule`s, which would allow them to have non-option dependencies that would be explicit in
their constructors (which would avoid the need for the `Subsystem.Factory` pattern).
2) Optionable depending on TaskRule would create a cycle in the Python package graph.
"""
return TaskRule(**optionable_factory.signature())
def _get_starting_indent(source):
"""Used to remove leading indentation from `source` so ast.parse() doesn't raise an
exception."""
if source.startswith(" "):
return sum(1 for _ in itertools.takewhile(lambda c: c in {" ", b" "}, source))
return 0
def _make_rule(
return_type: Type,
parameter_types: typing.Iterable[Type],
*,
cacheable: bool = True,
name: Optional[str] = None,
) -> Callable[[Callable], Callable]:
"""A @decorator that declares that a particular static function may be used as a TaskRule.
As a special case, if the output_type is a subclass of `Goal`, the `Goal.Options` for the `Goal`
are registered as dependency Optionables.
:param return_type: The return/output type for the Rule. This must be a concrete Python type.
:param parameter_types: A sequence of types that matches the number and order of arguments to the
decorated function.
:param cacheable: Whether the results of executing the Rule should be cached as keyed by all of
its inputs.
"""
has_goal_return_type = issubclass(return_type, Goal)
if cacheable and has_goal_return_type:
raise TypeError(
"An `@rule` that returns a `Goal` must instead be declared with `@goal_rule`."
)
if not cacheable and not has_goal_return_type:
raise TypeError("An `@goal_rule` must return a subclass of `engine.goal.Goal`.")
is_goal_cls = has_goal_return_type
def wrapper(func):
if not inspect.isfunction(func):
raise ValueError("The @rule decorator must be applied innermost of all decorators.")
owning_module = sys.modules[func.__module__]
source = inspect.getsource(func)
beginning_indent = _get_starting_indent(source)
if beginning_indent:
source = "\n".join(line[beginning_indent:] for line in source.split("\n"))
module_ast = ast.parse(source)
def resolve_type(name):
resolved = getattr(owning_module, name, None) or owning_module.__builtins__.get(
name, None
)
if resolved is None:
raise ValueError(
f"Could not resolve type `{name}` in top level of module {owning_module.__name__}"
)
elif not isinstance(resolved, type):
raise ValueError(
f"Expected a `type` constructor for `{name}`, but got: {resolved} (type "
f"`{type(resolved).__name__}`)"
)
return resolved
rule_func_node = assert_single_element(
node
for node in ast.iter_child_nodes(module_ast)
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef))
and node.name == func.__name__
)
parents_table = {}
for parent in ast.walk(rule_func_node):
for child in ast.iter_child_nodes(parent):
parents_table[child] = parent
rule_visitor = _RuleVisitor()
rule_visitor.visit(rule_func_node)
gets = FrozenOrderedSet(
Get.create_statically_for_rule_graph(resolve_type(p), resolve_type(s))
for p, s in rule_visitor.gets
)
# Register dependencies for @goal_rule/Goal.
dependency_rules = (subsystem_rule(return_type.subsystem_cls),) if is_goal_cls else None
# Set a default name for Goal classes if one is not explicitly provided
if is_goal_cls and name is None:
effective_name = return_type.name
else:
effective_name = name
# Set our own custom `__line_number__` dunder so that the engine may visualize the line number.
func.__line_number__ = func.__code__.co_firstlineno
func.rule = TaskRule(
return_type,
tuple(parameter_types),
func,
input_gets=tuple(gets),
dependency_rules=dependency_rules,
cacheable=cacheable,
name=effective_name,
)
return func
return wrapper
class InvalidTypeAnnotation(TypeError):
"""Indicates an incorrect type annotation for an `@rule`."""
class UnrecognizedRuleArgument(TypeError):
"""Indicates an unrecognized keyword argument to a `@rule`."""
class MissingTypeAnnotation(TypeError):
"""Indicates a missing type annotation for an `@rule`."""
class MissingReturnTypeAnnotation(InvalidTypeAnnotation):
"""Indicates a missing return type annotation for an `@rule`."""
class MissingParameterTypeAnnotation(InvalidTypeAnnotation):
"""Indicates a missing parameter type annotation for an `@rule`."""
def _ensure_type_annotation(
*, type_annotation: Optional[Type], name: str, raise_type: Type[InvalidTypeAnnotation],
) -> Type:
if type_annotation is None:
raise raise_type(f"{name} is missing a type annotation.")
if not isinstance(type_annotation, type):
raise raise_type(
f"The annotation for {name} must be a type, got {type_annotation} of type {type(type_annotation)}."
)
return type_annotation
PUBLIC_RULE_DECORATOR_ARGUMENTS = {"name"}
# We don't want @rule-writers to use 'cacheable' as a kwarg directly, but rather
# set it implicitly based on whether the rule annotation is @rule or @goal_rule.
# So we leave it out of PUBLIC_RULE_DECORATOR_ARGUMENTS.
IMPLICIT_PRIVATE_RULE_DECORATOR_ARGUMENTS = {"cacheable"}
def rule_decorator(*args, **kwargs) -> Callable:
if len(args) != 1 and not inspect.isfunction(args[0]):
raise ValueError(
"The @rule decorator expects no arguments and for the function it decorates to be "
f"type-annotated. Given {args}."
)
if (
len(
set(kwargs)
- PUBLIC_RULE_DECORATOR_ARGUMENTS
- IMPLICIT_PRIVATE_RULE_DECORATOR_ARGUMENTS
)
!= 0
):
raise UnrecognizedRuleArgument(
f"`@rule`s and `@goal_rule`s only accept the following keyword arguments: {PUBLIC_RULE_DECORATOR_ARGUMENTS}"
)
func = args[0]
cacheable: bool = kwargs["cacheable"]
name: Optional[str] = kwargs.get("name")
func_id = f"@rule {func.__module__}:{func.__name__}"
type_hints = get_type_hints(func)
return_type = _ensure_type_annotation(
type_annotation=type_hints.get("return"),
name=f"{func_id} return",
raise_type=MissingReturnTypeAnnotation,
)
parameter_types = tuple(
_ensure_type_annotation(
type_annotation=type_hints.get(parameter),
name=f"{func_id} parameter {parameter}",
raise_type=MissingParameterTypeAnnotation,
)
for parameter in inspect.signature(func).parameters
)
validate_parameter_types(func_id, parameter_types, cacheable)
return _make_rule(return_type, parameter_types, cacheable=cacheable, name=name)(func)
def validate_parameter_types(
func_id: str, parameter_types: Tuple[Type, ...], cacheable: bool
) -> None:
if cacheable:
for ty in parameter_types:
if getattr(ty, "__side_effecting", False):
raise ValueError(
f"Non-console `@rule` {func_id} has a side-effecting parameter: {ty}"
)
def inner_rule(*args, **kwargs) -> Callable:
if len(args) == 1 and inspect.isfunction(args[0]):
return rule_decorator(*args, **kwargs)
else:
def wrapper(*args):
return rule_decorator(*args, **kwargs)
return wrapper
def rule(*args, **kwargs) -> Callable:
return inner_rule(*args, **kwargs, cacheable=True)
def goal_rule(*args, **kwargs) -> Callable:
return inner_rule(*args, **kwargs, cacheable=False)
@dataclass(frozen=True)
class UnionRule:
"""Specify that an instance of `union_member` can be substituted wherever `union_base` is
used."""
union_base: Type
union_member: Type
def __post_init__(self) -> None:
if not union.is_instance(self.union_base):
raise ValueError(
f"union_base must be a type annotated with @union: was {self.union_base} "
f"(type {type(self.union_base).__name__})"
)
@dataclass(frozen=True)
class UnionMembership:
union_rules: Dict[Type, OrderedSet[Type]]
def is_member(self, union_type, putative_member):
members = self.union_rules.get(union_type)
if members is None:
raise TypeError(f"Not a registered union type: {union_type}")
return type(putative_member) in members
def has_members(self, union_type: Type) -> bool:
"""Check whether the union has an implementation or not."""
return bool(self.union_rules.get(union_type))
def has_members_for_all(self, union_types: typing.Iterable[Type]) -> bool:
"""Check whether every union given has an implementation or not."""
return all(self.has_members(union_type) for union_type in union_types)
class Rule(ABC):
"""Rules declare how to produce products for the product graph.
A rule describes what dependencies must be provided to produce a particular product. They also
act as factories for constructing the nodes within the graph.
"""
@property
@abstractmethod
def output_type(self):
"""An output `type` for the rule."""
@property
@abstractmethod
def dependency_rules(self):
"""A tuple of @rules that are known to be necessary to run this rule.
Note that installing @rules as flat lists is generally preferable, as Rules already
implicitly form a loosely coupled RuleGraph: this facility exists only to assist with
boilerplate removal.
"""
@property
@abstractmethod
def dependency_optionables(self):
"""A tuple of Optionable classes that are known to be necessary to run this rule."""
return ()
@frozen_after_init
@dataclass(unsafe_hash=True)
class TaskRule(Rule):
"""A Rule that runs a task function when all of its input selectors are satisfied.
NB: This API is experimental, and not meant for direct consumption. To create a `TaskRule` you
should always prefer the `@rule` constructor, and in cases where that is too constraining
(likely due to #4535) please bump or open a ticket to explain the usecase.
"""
_output_type: Type
input_selectors: Tuple[Type, ...]
input_gets: Tuple
func: Callable
_dependency_rules: Tuple
_dependency_optionables: Tuple
cacheable: bool
name: Optional[str]
def __init__(
self,
output_type: Type,
input_selectors: Tuple[Type, ...],
func: Callable,
input_gets: Tuple,
dependency_rules: Optional[Tuple] = None,
dependency_optionables: Optional[Tuple] = None,
cacheable: bool = True,
name: Optional[str] = None,
):
self._output_type = output_type
self.input_selectors = input_selectors
self.input_gets = input_gets
self.func = func # type: ignore[assignment] # cannot assign to a method
self._dependency_rules = dependency_rules or ()
self._dependency_optionables = dependency_optionables or ()
self.cacheable = cacheable
self.name = name
def __str__(self):
return "(name={}, {}, {!r}, {}, gets={}, opts={})".format(
self.name or "<not defined>",
self.output_type.__name__,
self.input_selectors,
self.func.__name__,
self.input_gets,
self.dependency_optionables,
)
@property
def output_type(self):
return self._output_type
@property
def dependency_rules(self):
return self._dependency_rules
@property
def dependency_optionables(self):
return self._dependency_optionables
@frozen_after_init
@dataclass(unsafe_hash=True)
class RootRule(Rule):
"""Represents a root input to an execution of a rule graph.
Roots act roughly like parameters, in that in some cases the only source of a particular type
might be when a value is provided as a root subject at the beginning of an execution.
"""
_output_type: Type
def __init__(self, output_type: Type) -> None:
self._output_type = output_type
@property
def output_type(self):
return self._output_type
@property
def dependency_rules(self):
return tuple()
@property
def dependency_optionables(self):
return tuple()
@dataclass(frozen=True)
class NormalizedRules:
rules: FrozenOrderedSet
union_rules: Dict[Type, OrderedSet[Type]]
@dataclass(frozen=True)
class RuleIndex:
"""Holds a normalized index of Rules used to instantiate Nodes."""
rules: Dict
roots: FrozenOrderedSet
union_rules: Dict[Type, OrderedSet[Type]]
@classmethod
def create(cls, rule_entries, union_rules=None) -> "RuleIndex":
"""Creates a RuleIndex with tasks indexed by their output type."""
serializable_rules: Dict = {}
serializable_roots: OrderedSet = OrderedSet()
union_rules = dict(union_rules or ())
def add_task(product_type, rule):
# TODO(#7311): make a defaultdict-like wrapper for OrderedDict if more widely used.
if product_type not in serializable_rules:
serializable_rules[product_type] = OrderedSet()
serializable_rules[product_type].add(rule)
def add_root_rule(root_rule):
serializable_roots.add(root_rule)
def add_rule(rule):
if isinstance(rule, RootRule):
add_root_rule(rule)
else:
add_task(rule.output_type, rule)
for dep_rule in rule.dependency_rules:
add_rule(dep_rule)
def add_type_transition_rule(union_rule):
# NB: This does not require that union bases be supplied to `def rules():`, as the union type
# is never instantiated!
union_base = union_rule.union_base
assert union.is_instance(union_base)
union_member = union_rule.union_member
if union_base not in union_rules:
union_rules[union_base] = OrderedSet()
union_rules[union_base].add(union_member)
for entry in rule_entries:
if isinstance(entry, Rule):
add_rule(entry)
elif isinstance(entry, UnionRule):
add_type_transition_rule(entry)
elif hasattr(entry, "__call__"):
rule = getattr(entry, "rule", None)
if rule is None:
raise TypeError(
"Expected callable {} to be decorated with @rule.".format(entry)
)
add_rule(rule)
else:
raise TypeError(
"""\
Rule entry {} had an unexpected type: {}. Rules either extend Rule or UnionRule, or are static \
functions decorated with @rule.""".format(
entry, type(entry)
)
)
return cls(serializable_rules, FrozenOrderedSet(serializable_roots), union_rules)
def normalized_rules(self) -> NormalizedRules:
rules = FrozenOrderedSet(
(
*itertools.chain.from_iterable(ruleset for ruleset in self.rules.values()),
*self.roots,
)
)
return NormalizedRules(rules, self.union_rules)
| |
# -*- coding: utf-8 -*-
# Copyright (2018) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Python libs
import collections
import configparser
import glob
import json
import logging
import logging.config
import os
# Modules own libs
from oneview_redfish_toolkit.api.errors import \
OneViewRedfishException
from oneview_redfish_toolkit.api.errors import \
OneViewRedfishResourceNotFoundException
from oneview_redfish_toolkit.api import schemas
from oneview_redfish_toolkit import connection
from oneview_redfish_toolkit import util
API_VERSION = 1200
COUNTER_LOGGER_NAME = 'qtty'
PERFORMANCE_LOGGER_NAME = 'perf'
ONEVIEW_SDK_LOGGER_NAME = 'ovData'
# Globals vars:
# globals()['config']
# globals()['registry_dict']
# globals()['stored_schemas']
def get_config():
return globals()['config']
def get_oneview_config():
return dict(get_config().items('oneview_config'))
def get_oneview_multiple_ips():
ips_config = dict(get_config().items('oneview_config'))['ip'].split(",")
list_ips = [ip.strip() for ip in ips_config]
return list_ips
def get_credentials():
return dict(get_config().items('credentials'))
def get_authentication_mode():
return get_config().get('redfish', 'authentication_mode')
def auth_mode_is_session():
return get_authentication_mode() == 'session'
def auth_mode_is_conf():
return get_authentication_mode() == 'conf'
def get_cherrypy_config():
cherrypy_config = dict(get_config().items('cherrypy_config'))
for key, val in cherrypy_config.items():
if val.isdigit() or (val.startswith('-') and val[1:].isdigit()):
cherrypy_config[key] = int(val)
return cherrypy_config
def get_registry_dict():
return globals()['registry_dict']
def get_stored_schemas():
return globals()['stored_schemas']
def get_api_version():
return API_VERSION
def get_composition_settings():
return dict(get_config().items('redfish-composition'))
def configure_logging(log_file_path):
"""Loads logging.conf file
Loads logging.conf file to create the logger configuration.
The logger configuration has two handlers, one of stream
(show logs in the console) and other of file (save a log file)
where you can choose one of it in [logger_root : handlers].
In it you can choose the logger level as well.
Level: Numeric value
---------------------
CRITICAL: 50
ERROR: 40
WARNING: 30
INFO: 20
DEBUG: 10
NOTSET: 00
---------------------
How to use: import logging and logging.exception('message')
Args:
log_file_path: logging.conf path.
Exception:
Exception: if logging.conf file not found.
"""
if os.path.isfile(log_file_path) is False:
raise Exception("Config file {} not found".format(log_file_path))
else:
logging.config.fileConfig(log_file_path)
def load_config(conf_file):
"""Loads redfish.conf file
Loads and parsers the system conf file into config global var
Loads json schemas into schemas_dict global var
Established a connection with OneView and sets in as ov_conn
global var
Args:
conf_file: string with the conf file name
Returns:
None
Exception:
HPOneViewException:
- if fails to connect to oneview
"""
config = load_conf_file(conf_file)
globals()['config'] = config
# Config file read set global vars
# Setting ov_config
# ov_config = dict(config.items('oneview_config'))
# ov_config['credentials'] = dict(config.items('credentials'))
# ov_config['api_version'] = API_VERSION
# globals()['ov_config'] = ov_config
util.load_event_service_info()
# Load schemas | Store schemas
try:
for ip_oneview in get_oneview_multiple_ips():
connection.check_oneview_availability(ip_oneview)
registry_dict = load_registry(
get_registry_path(),
schemas.REGISTRY)
globals()['registry_dict'] = registry_dict
load_schemas(get_schemas_path())
except Exception as e:
raise OneViewRedfishException(
'Failed to connect to OneView: {}'.format(e)
)
def load_conf_file(conf_file):
"""Loads and parses conf file
Loads and parses the module conf file
Args:
conf_file: string with the conf file name
Returns:
ConfigParser object with conf_file configs
"""
if not os.path.isfile(conf_file):
raise OneViewRedfishResourceNotFoundException(
"File {} not found.".format(conf_file)
)
config = configparser.ConfigParser()
config.optionxform = str
try:
config.read(conf_file)
except Exception:
raise
return config
def load_registry(registry_dir, registries):
"""Loads Registries
Loads all registries using registry_dir directory
Args:
registry_dir: string with the directory to load registries from
registries: dict with registry name as key and registry file_name
as value. The key will also be the key in the returning dict.
Returns:
OrderedDict: A dict containing 'RegistryName': registry_obj
Exceptions:
OneviewRedfishResourceNotAccessible:
- if registry_dir is can't be accessed
"""
if os.path.isdir(registry_dir) is False:
raise OneViewRedfishResourceNotFoundException(
"Directory {} not found.".format(registry_dir)
)
if os.access(registry_dir, os.R_OK) is False:
raise OneViewRedfishResourceNotFoundException(
"Directory {} not found.".format(registry_dir)
)
registries_dict = collections.OrderedDict()
for key in registries:
try:
with open(registry_dir + '/' + registries[key]) as f:
registries_dict[key] = json.load(f)
except Exception:
raise OneViewRedfishResourceNotFoundException(
"File {} not found.".format(registries[key])
)
return registries_dict
def load_schemas(schema_dir):
"""Load all DMTF JSON Schemas
Load all schemas listed in schemas searching schema_dir directory.
Args:
schema_dir: String with the directory to load schemas from.
Returns:
Dictionary: A dict containing ('http://redfish.dmtf.org/schemas/
v1/<schema_file_name>': schema_obj) pairs
"""
schema_paths = glob.glob(schema_dir + '/*.json')
if not schema_paths:
raise OneViewRedfishResourceNotFoundException(
"JSON Schemas file not found."
)
stored_schemas = dict()
for path in schema_paths:
with open(path) as schema_file:
json_schema = json.load(schema_file)
if os.name == 'nt':
file_name = path.split('\\')[-1]
else:
file_name = path.split('/')[-1]
stored_schemas["http://redfish.dmtf.org/schemas/v1/" + file_name] = \
json_schema
globals()['stored_schemas'] = stored_schemas
def get_registry_path():
source = util.get_app_path()
return os.path.join(source, "registry")
def get_schemas_path():
source = util.get_app_path()
return os.path.join(source, "schemas")
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'LinodeDNSDriver'
]
from libcloud.utils.misc import merge_valid_keys, get_new_obj
from libcloud.common.linode import (API_ROOT, LinodeException,
LinodeConnection, LinodeResponse)
from libcloud.dns.types import Provider, RecordType
from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError
from libcloud.dns.base import DNSDriver, Zone, Record
VALID_ZONE_EXTRA_PARAMS = ['SOA_Email', 'Refresh_sec', 'Retry_sec',
'Expire_sec', 'status', 'master_ips']
VALID_RECORD_EXTRA_PARAMS = ['Priority', 'Weight', 'Port', 'Protocol',
'TTL_sec']
class LinodeDNSResponse(LinodeResponse):
def _make_excp(self, error):
result = super(LinodeDNSResponse, self)._make_excp(error)
if isinstance(result, LinodeException) and result.code == 5:
context = self.connection.context
if context['resource'] == 'zone':
result = ZoneDoesNotExistError(value='',
driver=self.connection.driver,
zone_id=context['id'])
elif context['resource'] == 'record':
result = RecordDoesNotExistError(value='',
driver=self.connection.driver,
record_id=context['id'])
return result
class LinodeDNSConnection(LinodeConnection):
responseCls = LinodeDNSResponse
class LinodeDNSDriver(DNSDriver):
type = Provider.LINODE
name = 'Linode DNS'
website = 'http://www.linode.com/'
connectionCls = LinodeDNSConnection
RECORD_TYPE_MAP = {
RecordType.NS: 'NS',
RecordType.MX: 'MX',
RecordType.A: 'A',
RecordType.AAAA: 'AAAA',
RecordType.CNAME: 'CNAME',
RecordType.TXT: 'TXT',
RecordType.SRV: 'SRV',
}
def list_zones(self):
params = {'api_action': 'domain.list'}
data = self.connection.request(API_ROOT, params=params).objects[0]
zones = self._to_zones(data)
return zones
def list_records(self, zone):
params = {'api_action': 'domain.resource.list', 'DOMAINID': zone.id}
self.connection.set_context(context={'resource': 'zone',
'id': zone.id})
data = self.connection.request(API_ROOT, params=params).objects[0]
records = self._to_records(items=data, zone=zone)
return records
def get_zone(self, zone_id):
params = {'api_action': 'domain.list', 'DomainID': zone_id}
self.connection.set_context(context={'resource': 'zone',
'id': zone_id})
data = self.connection.request(API_ROOT, params=params).objects[0]
zones = self._to_zones(data)
if len(zones) != 1:
raise ZoneDoesNotExistError(value='', driver=self, zone_id=zone_id)
return zones[0]
def get_record(self, zone_id, record_id):
zone = self.get_zone(zone_id=zone_id)
params = {'api_action': 'domain.resource.list', 'DomainID': zone_id,
'ResourceID': record_id}
self.connection.set_context(context={'resource': 'record',
'id': record_id})
data = self.connection.request(API_ROOT, params=params).objects[0]
records = self._to_records(items=data, zone=zone)
if len(records) != 1:
raise RecordDoesNotExistError(value='', driver=self,
record_id=record_id)
return records[0]
def create_zone(self, domain, type='master', ttl=None, extra=None):
"""
Create a new zone.
API docs: http://www.linode.com/api/dns/domain.create
"""
params = {'api_action': 'domain.create', 'Type': type,
'Domain': domain}
if ttl:
params['TTL_sec'] = ttl
merged = merge_valid_keys(params=params,
valid_keys=VALID_ZONE_EXTRA_PARAMS,
extra=extra)
data = self.connection.request(API_ROOT, params=params).objects[0]
zone = Zone(id=data['DomainID'], domain=domain, type=type, ttl=ttl,
extra=merged, driver=self)
return zone
def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None):
"""
Update an existing zone.
API docs: http://www.linode.com/api/dns/domain.update
"""
params = {'api_action': 'domain.update', 'DomainID': zone.id}
if type:
params['Type'] = type
if domain:
params['Domain'] = domain
if ttl:
params['TTL_sec'] = ttl
merged = merge_valid_keys(params=params,
valid_keys=VALID_ZONE_EXTRA_PARAMS,
extra=extra)
self.connection.request(API_ROOT, params=params).objects[0]
updated_zone = get_new_obj(obj=zone, klass=Zone,
attributes={'domain': domain,
'type': type, 'ttl': ttl,
'extra': merged})
return updated_zone
def create_record(self, name, zone, type, data, extra=None):
"""
Create a new record.
API docs: http://www.linode.com/api/dns/domain.resource.create
"""
params = {'api_action': 'domain.resource.create', 'DomainID': zone.id,
'Name': name, 'Target': data,
'Type': self.RECORD_TYPE_MAP[type]}
merged = merge_valid_keys(params=params,
valid_keys=VALID_RECORD_EXTRA_PARAMS,
extra=extra)
result = self.connection.request(API_ROOT, params=params).objects[0]
record = Record(id=result['ResourceID'], name=name, type=type,
data=data, extra=merged, zone=zone, driver=self,
ttl=merged.get('TTL_sec', None))
return record
def update_record(self, record, name=None, type=None, data=None,
extra=None):
"""
Update an existing record.
API docs: http://www.linode.com/api/dns/domain.resource.update
"""
params = {'api_action': 'domain.resource.update',
'ResourceID': record.id, 'DomainID': record.zone.id}
if name:
params['Name'] = name
if data:
params['Target'] = data
if type is not None:
params['Type'] = self.RECORD_TYPE_MAP[type]
merged = merge_valid_keys(params=params,
valid_keys=VALID_RECORD_EXTRA_PARAMS,
extra=extra)
self.connection.request(API_ROOT, params=params).objects[0]
updated_record = get_new_obj(obj=record, klass=Record,
attributes={'name': name, 'data': data,
'type': type,
'extra': merged})
return updated_record
def delete_zone(self, zone):
params = {'api_action': 'domain.delete', 'DomainID': zone.id}
self.connection.set_context(context={'resource': 'zone',
'id': zone.id})
data = self.connection.request(API_ROOT, params=params).objects[0]
return 'DomainID' in data
def delete_record(self, record):
params = {'api_action': 'domain.resource.delete',
'DomainID': record.zone.id, 'ResourceID': record.id}
self.connection.set_context(context={'resource': 'record',
'id': record.id})
data = self.connection.request(API_ROOT, params=params).objects[0]
return 'ResourceID' in data
def _to_zones(self, items):
"""
Convert a list of items to the Zone objects.
"""
zones = []
for item in items:
zones.append(self._to_zone(item))
return zones
def _to_zone(self, item):
"""
Build an Zone object from the item dictionary.
"""
extra = {'SOA_Email': item['SOA_EMAIL'], 'status': item['STATUS'],
'description': item['DESCRIPTION']}
zone = Zone(id=item['DOMAINID'], domain=item['DOMAIN'],
type=item['TYPE'], ttl=item['TTL_SEC'], driver=self,
extra=extra)
return zone
def _to_records(self, items, zone=None):
"""
Convert a list of items to the Record objects.
"""
records = []
for item in items:
records.append(self._to_record(item=item, zone=zone))
return records
def _to_record(self, item, zone=None):
"""
Build a Record object from the item dictionary.
"""
extra = {'protocol': item['PROTOCOL'], 'ttl_sec': item['TTL_SEC'],
'port': item['PORT'], 'weight': item['WEIGHT']}
type = self._string_to_record_type(item['TYPE'])
record = Record(id=item['RESOURCEID'], name=item['NAME'], type=type,
data=item['TARGET'], zone=zone, driver=self,
ttl=item['TTL_SEC'], extra=extra)
return record
| |
# -*- coding: utf-8 -*-
# !/usr/bin/python
################################### PART0 DESCRIPTION #################################
# Filename: class_compute_meta_data_of_network.py
# Description:
#
# Author: Shuai Yuan
# E-mail: ysh329@sina.com
# Create: 2015-12-06 21:49:46
# Last:
__author__ = 'yuens'
################################### PART1 IMPORT ######################################
import logging
import MySQLdb
import time
from operator import add
import networkx as nx
from Tkinter import _flatten
################################### PART2 CLASS && FUNCTION ###########################
class ComputeEdgeProperty(object):
def __init__(self, database_name, pyspark_sc):
self.start = time.clock()
logging.basicConfig(level = logging.INFO,
format = '%(asctime)s %(levelname)5s %(filename)19s[line:%(lineno)3d] %(funcName)s %(message)s',
datefmt = '%y-%m-%d %H:%M:%S',
filename = './main.log',
filemode = 'a')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)5s %(filename)19s[line:%(lineno)3d] %(funcName)s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logging.info("START CLASS {class_name}.".format(class_name = ComputeEdgeProperty.__name__))
# connect database
try:
self.con = MySQLdb.connect(host='localhost', user='root', passwd='931209', db = database_name, charset='utf8')
logging.info("Success in connecting MySQL.")
except MySQLdb.Error, e:
logging.error("Fail in connecting MySQL.")
logging.error("MySQL Error {error_num}: {error_info}.".format(error_num = e.args[0], error_info = e.args[1]))
# spark configure
try:
self.sc = pyspark_sc
logging.info("Config spark successfully.")
except Exception as e:
logging.error("Config spark failed.")
logging.error(e)
def __del__(self):
# close database
try:
self.con.close()
logging.info("Success in quiting MySQL.")
except MySQLdb.Error, e:
logging.error("MySQL Error {error_num}: {error_info}.".format(error_num = e.args[0], error_info = e.args[1]))
logging.info("END CLASS {class_name}.".format(class_name = ComputeEdgeProperty.__name__))
self.end = time.clock()
logging.info("The class {class_name} run time is : {delta_time} seconds".format(class_name = ComputeEdgeProperty.__name__, delta_time = self.end))
def compute_common_degree_in_different_network(self, database_name, node_table_name, connection_table_name):
# sub-function
def get_node_and_connection_data_from_database(self, database_name, node_table_name, connection_table_name):
cursor = self.con.cursor()
sqls = ["SELECT network_type, is_directed, node,"
" degree_str, in_degree_str, out_degree_str"
" FROM {database}.{table}"\
.format(database = database_name,\
table = node_table_name)]
sqls.append("SELECT network_type, is_directed, node1, node2"
" FROM {database}.{table}"\
.format(database = database_name,\
table = connection_table_name))
for sql_idx in xrange(len(sqls)):
sql = sqls[sql_idx]
try:
cursor.execute(sql)
if sql_idx == 0:
node_tuple_list = map(lambda (network_type,\
is_directed,\
node,\
degree_str,\
in_degree_str,\
out_degree_str): (str(network_type),\
int(is_directed),\
int(node),\
str(degree_str),\
str(in_degree_str),\
str(out_degree_str)),\
cursor.fetchall()\
)
logging.info("len(node_tuple_list):{0}".format(len(node_tuple_list)))
logging.info("node_tuple_list[:3]:{0}".format(node_tuple_list[:3]))
elif sql_idx == 1:
connection_tuple_list = map(lambda (network_type,\
is_directed,\
node1,\
node2): (str(network_type),\
int(is_directed),\
int(node1),\
int(node2)\
),\
cursor.fetchall()\
)
logging.info("len(connection_tuple_list):{0}".format(len(connection_tuple_list)))
logging.info("connection_tuple_list[:3]:{0}".format(connection_tuple_list[:3]))
except MySQLdb.Error, e:
logging.error("failed to get node and connection data from database.")
logging.error("MySQL Error {error_num}: {error_info}.".format(error_num = e.args[0], error_info = e.args[1]))
cursor.close()
return node_tuple_list, connection_tuple_list
# sub-function
def compute_common_degree_in_network(network_type, is_directed, node_tuple_list, connection_tuple_list):
node_tuple_list = filter(lambda (network_type_,\
is_directed_,\
node,\
degree_str,\
in_degree_str,\
out_degree_str): network_type_ == network_type and is_directed_ == is_directed,\
node_tuple_list\
)
edge_tuple_list = filter(lambda (network_type_,\
is_directed_,\
node1,\
node2): network_type_ == network_type and is_directed_ == is_directed,\
connection_tuple_list\
)
edge_tuple_list_length = len(edge_tuple_list)
success_compute = 0
failure_compute = 0
common_degree_str_list_in_network = []
common_degree_num_list_in_network = []
common_edge_tuple_list_in_network = []
common_degree_rate_list_in_network = []
for edge_idx in xrange(len(edge_tuple_list)):
edge_tuple = edge_tuple_list[edge_idx]
node1 = edge_tuple[2]
node2 = edge_tuple[3]
if (edge_idx % 1000 == 0 and edge_idx > 998) or (edge_idx == edge_tuple_list_length-1):
logging.info("============== Computer common node of {edge_idx}th edge in {network_type}.{is_directed} network ==============".format(edge_idx = edge_idx, network_type = network_type, is_directed = is_directed))
logging.info("edge_index:{idx}, finish rate:{rate}".format(idx = edge_idx, rate = float(edge_idx+1)/edge_tuple_list_length))
logging.info("success_rate:{success_rate}".format(success_rate = success_compute / float(success_compute + failure_compute + 0.0001)))
logging.info("success_update:{success}, failure_update:{failure}".format(success = success_compute, failure = failure_compute))
try:
node1_and_degree_str_list = filter(lambda (network_type,\
is_directed,\
node,\
degree_str,\
in_degree_str,\
out_degree_str): node == node1,\
node_tuple_list)[0]
except Exception as e:
failure_compute = failure_compute + 1
logging.error(e)
continue
if node1_and_degree_str_list[3] == "":
node1_degree_list = []
else:
node1_degree_list = node1_and_degree_str_list[3].split("///")
try:
node2_and_degree_str_list = filter(lambda (network_type,\
is_directed,\
node,\
degree_str,\
in_degree_str,\
out_degree_str): node == node2,\
node_tuple_list)[0]
except Exception as e:
failure_compute = failure_compute + 1
logging.error(e)
continue
if node2_and_degree_str_list[3] == "":
node2_degree_list = []
else:
node2_degree_list = node2_and_degree_str_list[3].split("///")
# Merge current result
node1_degree_list = _flatten([node1_degree_list])
node2_degree_list = _flatten([node2_degree_list])
if len(node2_degree_list) <= len(node1_degree_list):
common_degree_list = filter(lambda node: node in node1_degree_list, node2_degree_list)
else:
common_degree_list = filter(lambda node: node in node2_degree_list, node1_degree_list)
common_degree_str_list_in_network.append("///".join(map(str, common_degree_list)))
common_degree_num_list_in_network.append(len(common_degree_list))
common_edge_tuple_list_in_network.append((node1, node2))
common_degree_rate_list_in_network.append(len(common_degree_list)/float(len(node1_degree_list)+len(node2_degree_list)))
success_compute = success_compute + 1
# Merge all results
degree_data_tuple_list_in_network = map(lambda edge_tuple, degree_str, degree_num, degree_rate:\
(edge_tuple, degree_str, degree_num, degree_rate),\
common_edge_tuple_list_in_network,\
common_degree_str_list_in_network,\
common_degree_num_list_in_network,\
common_degree_rate_list_in_network\
)
logging.info("len(degree_data_tuple_list_in_network):{0}".format(len(degree_data_tuple_list_in_network)))
logging.info("degree_data_tuple_list_in_network[:3]:{0}".format(degree_data_tuple_list_in_network[:3]))
return degree_data_tuple_list_in_network
# sub-function
def sql_generator(database_name, connection_table_name, network_type, is_directed, node1, node2, common_neighbor_str, common_neighbor_num, common_neighbor_rate):
try:
sql = """UPDATE {database_name}.{table_name}
SET common_neighbor_str = '{common_neighbor_str}',
common_neighbor_num = {common_neighbor_num},
common_neighbor_rate = {common_neighbor_rate}
WHERE network_type = '{network_type}' AND
is_directed = {is_directed} AND
node1 = {node1} AND
node2 = {node2}"""\
.format(database_name = database_name, table_name = connection_table_name,\
common_neighbor_str = common_neighbor_str, common_neighbor_num = common_neighbor_num, common_neighbor_rate = common_neighbor_rate,\
network_type = network_type, is_directed = is_directed, node1 = node1, node2 = node2)
except Exception as e:
logging.error("Failed to generate sql for edge{node1}-{node2} {type} network is_directed:{directed}."\
.format(node1 = node1,\
node2 = node2,\
type = network_type,\
directed = is_directed)\
)
logging.error(e)
return sql
# sub-function
def execute_update_sql_for_database(self, network_type, is_directed, database_name, connection_table_table, sql_list):
cursor = self.con.cursor()
sql_list_length = len(sql_list)
success_update = 0
failure_update = 0
for sql_idx in xrange(len(sql_list)):
sql = sql_list[sql_idx]
if (sql_idx % 1000 == 0 and sql_idx > 998) or (sql_idx == sql_list_length-1):
logging.info("============== update {idx}th sql for {network_type}.{is_directed} network ==============".format(idx = sql_idx, network_type = network_type, is_directed = is_directed))
logging.info("sql_index:{idx}, finish rate:{rate}".format(idx = sql_idx, rate = float(sql_idx+1)/sql_list_length))
logging.info("success_rate:{success_rate}".format(success_rate = success_update / float(success_update + failure_update + 0.0001)))
logging.info("success_update:{success}, failure_update:{failure}".format(success = success_update, failure = failure_update))
try:
cursor.execute(sql)
self.con.commit()
success_update = success_update + 1
except MySQLdb.Error, e:
self.con.rollback()
logging.error("Failed in update {database}.{table} in MySQL.".format(database = database_name, table = connection_table_name))
logging.error("MySQL Error {error_num}: {error_info}.".format(error_num = e.args[0], error_info = e.args[1]))
logging.error(sql)
failure_update = failure_update + 1
continue
# start
# get data from database
node_tuple_list, connection_tuple_list = get_node_and_connection_data_from_database(self = self,\
database_name = database_name,\
node_table_name = node_table_name,\
connection_table_name = connection_table_name)
'''
# bio un
bio_undirected_common_node_list = compute_common_degree_in_network(network_type = "bio",\
is_directed = 0,\
node_tuple_list = node_tuple_list,\
connection_tuple_list = connection_tuple_list)
bio_undirected_update_sql_list = map(lambda ((node1,\
node2),\
common_neighbor_str,\
common_neighbor_num,\
common_neighbor_rate):\
sql_generator(database_name = database_name,\
connection_table_name = connection_table_name,\
network_type = "bio",\
is_directed = 0,\
node1 = node1,\
node2 = node2,\
common_neighbor_str = common_neighbor_str,\
common_neighbor_num = common_neighbor_num,\
common_neighbor_rate = common_neighbor_rate\
),\
bio_undirected_common_node_list,\
)
logging.info("len(bio_undirected_update_sql_list:{0}".format(len(bio_undirected_update_sql_list)))
logging.info("bio_undirected_update_sql_list[:3]:{0}".format(bio_undirected_update_sql_list[:3]))
execute_update_sql_for_database(self = self,\
network_type = "bio",\
is_directed = 0,\
database_name = database_name,\
connection_table_table = connection_table_name,\
sql_list = bio_undirected_update_sql_list)
# bio di
bio_directed_common_node_list = compute_common_degree_in_network(network_type = "bio",\
is_directed = 1,\
node_tuple_list = node_tuple_list,\
connection_tuple_list = connection_tuple_list)
bio_directed_update_sql_list = map(lambda ((node1,\
node2),\
common_neighbor_str,\
common_neighbor_num,\
common_neighbor_rate):\
sql_generator(database_name = database_name,\
connection_table_name = connection_table_name,\
network_type = "bio",\
is_directed = 1,\
node1 = node1,\
node2 = node2,\
common_neighbor_str = common_neighbor_str,\
common_neighbor_num = common_neighbor_num,\
common_neighbor_rate = common_neighbor_rate\
),\
bio_directed_common_node_list,\
)
logging.info("len(bio_directed_update_sql_list:{0}".format(len(bio_directed_update_sql_list)))
logging.info("bio_directed_update_sql_list[:3]:{0}".format(bio_directed_update_sql_list[:3]))
execute_update_sql_for_database(self = self,\
network_type = "bio",\
is_directed = 1,\
database_name = database_name,\
connection_table_table = connection_table_name,\
sql_list = bio_directed_update_sql_list)
# info un
info_undirected_common_node_list = compute_common_degree_in_network(network_type = "info",\
is_directed = 0,\
node_tuple_list = node_tuple_list,\
connection_tuple_list = connection_tuple_list)
info_undirected_update_sql_list = map(lambda ((node1,\
node2),\
common_neighbor_str,\
common_neighbor_num,\
common_neighbor_rate):\
sql_generator(database_name = database_name,\
connection_table_name = connection_table_name,\
network_type = "info",\
is_directed = 0,\
node1 = node1,\
node2 = node2,\
common_neighbor_str = common_neighbor_str,\
common_neighbor_num = common_neighbor_num,\
common_neighbor_rate = common_neighbor_rate\
),\
info_undirected_common_node_list,\
)
logging.info("len(info_undirected_update_sql_list:{0}".format(len(info_undirected_update_sql_list)))
logging.info("info_undirected_update_sql_list[:3]:{0}".format(info_undirected_update_sql_list[:3]))
execute_update_sql_for_database(self = self,\
network_type = "info",\
is_directed = 0,\
database_name = database_name,\
connection_table_table = connection_table_name,\
sql_list = info_undirected_update_sql_list)
# info di
info_directed_common_node_list = compute_common_degree_in_network(network_type = "info",\
is_directed = 1,\
node_tuple_list = node_tuple_list,\
connection_tuple_list = connection_tuple_list)
info_directed_update_sql_list = map(lambda ((node1,\
node2),\
common_neighbor_str,\
common_neighbor_num,\
common_neighbor_rate):\
sql_generator(database_name = database_name,\
connection_table_name = connection_table_name,\
network_type = "info",\
is_directed = 1,\
node1 = node1,\
node2 = node2,\
common_neighbor_str = common_neighbor_str,\
common_neighbor_num = common_neighbor_num,\
common_neighbor_rate = common_neighbor_rate\
),\
info_directed_common_node_list,\
)
logging.info("len(info_directed_update_sql_list:{0}".format(len(info_directed_update_sql_list)))
logging.info("info_directed_update_sql_list[:3]:{0}".format(info_directed_update_sql_list[:3]))
execute_update_sql_for_database(self = self,\
network_type = "info",\
is_directed = 1,\
database_name = database_name,\
connection_table_table = connection_table_name,\
sql_list = info_directed_update_sql_list)
# social un
social_undirected_common_node_list = compute_common_degree_in_network(network_type = "social",\
is_directed = 0,\
node_tuple_list = node_tuple_list,\
connection_tuple_list = connection_tuple_list)
social_undirected_update_sql_list = map(lambda ((node1,\
node2),\
common_neighbor_str,\
common_neighbor_num,\
common_neighbor_rate):\
sql_generator(database_name = database_name,\
connection_table_name = connection_table_name,\
network_type = "social",\
is_directed = 0,\
node1 = node1,\
node2 = node2,\
common_neighbor_str = common_neighbor_str,\
common_neighbor_num = common_neighbor_num,\
common_neighbor_rate = common_neighbor_rate\
),\
social_undirected_common_node_list,\
)
logging.info("len(social_undirected_update_sql_list:{0}".format(len(social_undirected_update_sql_list)))
logging.info("social_undirected_update_sql_list[:3]:{0}".format(social_undirected_update_sql_list[:3]))
execute_update_sql_for_database(self = self,\
network_type = "social",\
is_directed = 0,\
database_name = database_name,\
connection_table_table = connection_table_name,\
sql_list = social_undirected_update_sql_list)
'''
# social di
social_directed_common_node_list = compute_common_degree_in_network(network_type = "social",\
is_directed = 1,\
node_tuple_list = node_tuple_list,\
connection_tuple_list = connection_tuple_list)
social_directed_update_sql_list = map(lambda ((node1,\
node2),\
common_neighbor_str,\
common_neighbor_num,\
common_neighbor_rate):\
sql_generator(database_name = database_name,\
connection_table_name = connection_table_name,\
network_type = "social",\
is_directed = 1,\
node1 = node1,\
node2 = node2,\
common_neighbor_str = common_neighbor_str,\
common_neighbor_num = common_neighbor_num,\
common_neighbor_rate = common_neighbor_rate\
),\
social_directed_common_node_list,\
)
logging.info("len(social_directed_update_sql_list:{0}".format(len(social_directed_update_sql_list)))
logging.info("social_directed_update_sql_list[:3]:{0}".format(social_directed_update_sql_list[:3]))
execute_update_sql_for_database(self = self,\
network_type = "social",\
is_directed = 1,\
database_name = database_name,\
connection_table_table = connection_table_name,\
sql_list = social_directed_update_sql_list)
################################### PART3 CLASS TEST ##################################
'''
# Initialization
database_name = "LinkPredictionDB"
connection_table_name = "connection_table"
node_table_name = "node_table"
from pyspark import SparkContext
pyspark_sc = SparkContext()
EdgeComputer = ComputeEdgeProperty(database_name = database_name,\
pyspark_sc = pyspark_sc)
EdgeComputer.compute_common_degree_in_different_network(database_name = database_name,\
node_table_name = node_table_name,\
connection_table_name = connection_table_name)
'''
| |
"""
:codeauthor: :email:`Daniel Wallace <dwallace@saltstack.com`
"""
import os
import re
import shutil
import tempfile
import pytest
import salt.client.ssh.client
import salt.config
import salt.roster
import salt.utils.files
import salt.utils.path
import salt.utils.thin
import salt.utils.yaml
from salt.client import ssh
from tests.support.case import ShellCase
from tests.support.mock import MagicMock, call, patch
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase, skipIf
@skipIf(not salt.utils.path.which("ssh"), "No ssh binary found in path")
class SSHPasswordTests(ShellCase):
@pytest.mark.slow_test
def test_password_failure(self):
"""
Check password failures when trying to deploy keys
"""
opts = salt.config.client_config(self.get_config_file_path("master"))
opts["list_hosts"] = False
opts["argv"] = ["test.ping"]
opts["selected_target_option"] = "glob"
opts["tgt"] = "localhost"
opts["arg"] = []
roster = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "roster")
handle_ssh_ret = [
{
"localhost": {
"retcode": 255,
"stderr": "Permission denied (publickey).\r\n",
"stdout": "",
}
},
]
expected = {"localhost": "Permission denied (publickey)"}
display_output = MagicMock()
with patch(
"salt.roster.get_roster_file", MagicMock(return_value=roster)
), patch(
"salt.client.ssh.SSH.handle_ssh", MagicMock(return_value=handle_ssh_ret)
), patch(
"salt.client.ssh.SSH.key_deploy", MagicMock(return_value=expected)
), patch(
"salt.output.display_output", display_output
):
client = ssh.SSH(opts)
ret = next(client.run_iter())
with self.assertRaises(SystemExit):
client.run()
display_output.assert_called_once_with(expected, "nested", opts)
self.assertIs(ret, handle_ssh_ret[0])
@skipIf(not salt.utils.path.which("ssh"), "No ssh binary found in path")
class SSHReturnEventTests(ShellCase):
def test_not_missing_fun_calling_wfuncs(self):
opts = salt.config.client_config(self.get_config_file_path("master"))
opts["list_hosts"] = False
opts["argv"] = ["state.show_highstate"]
opts["selected_target_option"] = "glob"
opts["tgt"] = "localhost"
opts["arg"] = []
roster = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "roster")
handle_ssh_ret = [
{"localhost": {}},
]
expected = {"localhost": {}}
display_output = MagicMock()
with patch(
"salt.roster.get_roster_file", MagicMock(return_value=roster)
), patch(
"salt.client.ssh.SSH.handle_ssh", MagicMock(return_value=handle_ssh_ret)
), patch(
"salt.client.ssh.SSH.key_deploy", MagicMock(return_value=expected)
), patch(
"salt.output.display_output", display_output
):
client = ssh.SSH(opts)
client.event = MagicMock()
ret = next(client.run_iter())
assert "localhost" in ret
assert "fun" in ret["localhost"]
client.run()
display_output.assert_called_once_with(expected, "nested", opts)
self.assertIs(ret, handle_ssh_ret[0])
assert len(client.event.fire_event.call_args_list) == 2
assert "fun" in client.event.fire_event.call_args_list[0][0][0]
assert "fun" in client.event.fire_event.call_args_list[1][0][0]
class SSHRosterDefaults(TestCase):
def setUp(self):
self.roster = """
localhost:
host: 127.0.0.1
port: 2827
self:
host: 0.0.0.0
port: 42
"""
def test_roster_defaults_flat(self):
"""
Test Roster Defaults on the flat roster
"""
tempdir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
expected = {
"self": {"host": "0.0.0.0", "user": "daniel", "port": 42},
"localhost": {"host": "127.0.0.1", "user": "daniel", "port": 2827},
}
try:
root_dir = os.path.join(tempdir, "foo", "bar")
os.makedirs(root_dir)
fpath = os.path.join(root_dir, "config")
with salt.utils.files.fopen(fpath, "w") as fp_:
fp_.write(
"""
roster_defaults:
user: daniel
"""
)
opts = salt.config.master_config(fpath)
with patch(
"salt.roster.get_roster_file", MagicMock(return_value=self.roster)
):
with patch(
"salt.template.compile_template",
MagicMock(return_value=salt.utils.yaml.safe_load(self.roster)),
):
roster = salt.roster.Roster(opts=opts)
self.assertEqual(roster.targets("*", "glob"), expected)
finally:
if os.path.isdir(tempdir):
shutil.rmtree(tempdir)
class SSHSingleTests(TestCase):
def setUp(self):
self.tmp_cachedir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
self.argv = [
"ssh.set_auth_key",
"root",
"hobn+amNAXSBTiOXEqlBjGB...rsa root@master",
]
self.opts = {
"argv": self.argv,
"__role": "master",
"cachedir": self.tmp_cachedir,
"extension_modules": os.path.join(self.tmp_cachedir, "extmods"),
}
self.target = {
"passwd": "abc123",
"ssh_options": None,
"sudo": False,
"identities_only": False,
"host": "login1",
"user": "root",
"timeout": 65,
"remote_port_forwards": None,
"sudo_user": "",
"port": "22",
"priv": "/etc/salt/pki/master/ssh/salt-ssh.rsa",
}
def test_single_opts(self):
"""Sanity check for ssh.Single options"""
single = ssh.Single(
self.opts,
self.opts["argv"],
"localhost",
mods={},
fsclient=None,
thin=salt.utils.thin.thin_path(self.opts["cachedir"]),
mine=False,
**self.target
)
self.assertEqual(single.shell._ssh_opts(), "")
self.assertEqual(
single.shell._cmd_str("date +%s"),
"ssh login1 "
"-o KbdInteractiveAuthentication=no -o "
"PasswordAuthentication=yes -o ConnectTimeout=65 -o Port=22 "
"-o IdentityFile=/etc/salt/pki/master/ssh/salt-ssh.rsa "
"-o User=root date +%s",
)
def test_run_with_pre_flight(self):
"""
test Single.run() when ssh_pre_flight is set
and script successfully runs
"""
target = self.target.copy()
target["ssh_pre_flight"] = os.path.join(RUNTIME_VARS.TMP, "script.sh")
single = ssh.Single(
self.opts,
self.opts["argv"],
"localhost",
mods={},
fsclient=None,
thin=salt.utils.thin.thin_path(self.opts["cachedir"]),
mine=False,
**target
)
cmd_ret = ("Success", "", 0)
mock_flight = MagicMock(return_value=cmd_ret)
mock_cmd = MagicMock(return_value=cmd_ret)
patch_flight = patch("salt.client.ssh.Single.run_ssh_pre_flight", mock_flight)
patch_cmd = patch("salt.client.ssh.Single.cmd_block", mock_cmd)
patch_exec_cmd = patch(
"salt.client.ssh.shell.Shell.exec_cmd", return_value=("", "", 1)
)
patch_os = patch("os.path.exists", side_effect=[True])
with patch_os, patch_flight, patch_cmd, patch_exec_cmd:
ret = single.run()
mock_cmd.assert_called()
mock_flight.assert_called()
assert ret == cmd_ret
def test_run_with_pre_flight_stderr(self):
"""
test Single.run() when ssh_pre_flight is set
and script errors when run
"""
target = self.target.copy()
target["ssh_pre_flight"] = os.path.join(RUNTIME_VARS.TMP, "script.sh")
single = ssh.Single(
self.opts,
self.opts["argv"],
"localhost",
mods={},
fsclient=None,
thin=salt.utils.thin.thin_path(self.opts["cachedir"]),
mine=False,
**target
)
cmd_ret = ("", "Error running script", 1)
mock_flight = MagicMock(return_value=cmd_ret)
mock_cmd = MagicMock(return_value=cmd_ret)
patch_flight = patch("salt.client.ssh.Single.run_ssh_pre_flight", mock_flight)
patch_cmd = patch("salt.client.ssh.Single.cmd_block", mock_cmd)
patch_exec_cmd = patch(
"salt.client.ssh.shell.Shell.exec_cmd", return_value=("", "", 1)
)
patch_os = patch("os.path.exists", side_effect=[True])
with patch_os, patch_flight, patch_cmd, patch_exec_cmd:
ret = single.run()
mock_cmd.assert_not_called()
mock_flight.assert_called()
assert ret == cmd_ret
def test_run_with_pre_flight_script_doesnot_exist(self):
"""
test Single.run() when ssh_pre_flight is set
and the script does not exist
"""
target = self.target.copy()
target["ssh_pre_flight"] = os.path.join(RUNTIME_VARS.TMP, "script.sh")
single = ssh.Single(
self.opts,
self.opts["argv"],
"localhost",
mods={},
fsclient=None,
thin=salt.utils.thin.thin_path(self.opts["cachedir"]),
mine=False,
**target
)
cmd_ret = ("Success", "", 0)
mock_flight = MagicMock(return_value=cmd_ret)
mock_cmd = MagicMock(return_value=cmd_ret)
patch_flight = patch("salt.client.ssh.Single.run_ssh_pre_flight", mock_flight)
patch_cmd = patch("salt.client.ssh.Single.cmd_block", mock_cmd)
patch_exec_cmd = patch(
"salt.client.ssh.shell.Shell.exec_cmd", return_value=("", "", 1)
)
patch_os = patch("os.path.exists", side_effect=[False])
with patch_os, patch_flight, patch_cmd, patch_exec_cmd:
ret = single.run()
mock_cmd.assert_called()
mock_flight.assert_not_called()
assert ret == cmd_ret
def test_run_with_pre_flight_thin_dir_exists(self):
"""
test Single.run() when ssh_pre_flight is set
and thin_dir already exists
"""
target = self.target.copy()
target["ssh_pre_flight"] = os.path.join(RUNTIME_VARS.TMP, "script.sh")
single = ssh.Single(
self.opts,
self.opts["argv"],
"localhost",
mods={},
fsclient=None,
thin=salt.utils.thin.thin_path(self.opts["cachedir"]),
mine=False,
**target
)
cmd_ret = ("", "", 0)
mock_flight = MagicMock(return_value=cmd_ret)
mock_cmd = MagicMock(return_value=cmd_ret)
patch_flight = patch("salt.client.ssh.Single.run_ssh_pre_flight", mock_flight)
patch_cmd = patch("salt.client.ssh.shell.Shell.exec_cmd", mock_cmd)
patch_cmd_block = patch("salt.client.ssh.Single.cmd_block", mock_cmd)
patch_os = patch("os.path.exists", return_value=True)
with patch_os, patch_flight, patch_cmd, patch_cmd_block:
ret = single.run()
mock_cmd.assert_called()
mock_flight.assert_not_called()
assert ret == cmd_ret
def test_execute_script(self):
"""
test Single.execute_script()
"""
single = ssh.Single(
self.opts,
self.opts["argv"],
"localhost",
mods={},
fsclient=None,
thin=salt.utils.thin.thin_path(self.opts["cachedir"]),
mine=False,
winrm=False,
**self.target
)
exp_ret = ("Success", "", 0)
mock_cmd = MagicMock(return_value=exp_ret)
patch_cmd = patch("salt.client.ssh.shell.Shell.exec_cmd", mock_cmd)
script = os.path.join(RUNTIME_VARS.TMP, "script.sh")
with patch_cmd:
ret = single.execute_script(script=script)
assert ret == exp_ret
assert mock_cmd.call_count == 2
assert [
call("/bin/sh '{}'".format(script)),
call("rm '{}'".format(script)),
] == mock_cmd.call_args_list
def test_shim_cmd(self):
"""
test Single.shim_cmd()
"""
single = ssh.Single(
self.opts,
self.opts["argv"],
"localhost",
mods={},
fsclient=None,
thin=salt.utils.thin.thin_path(self.opts["cachedir"]),
mine=False,
winrm=False,
tty=True,
**self.target
)
exp_ret = ("Success", "", 0)
mock_cmd = MagicMock(return_value=exp_ret)
patch_cmd = patch("salt.client.ssh.shell.Shell.exec_cmd", mock_cmd)
patch_send = patch("salt.client.ssh.shell.Shell.send", return_value=("", "", 0))
patch_rand = patch("os.urandom", return_value=b"5\xd9l\xca\xc2\xff")
with patch_cmd, patch_rand, patch_send:
ret = single.shim_cmd(cmd_str="echo test")
assert ret == exp_ret
assert [
call("/bin/sh '.35d96ccac2ff.py'"),
call("rm '.35d96ccac2ff.py'"),
] == mock_cmd.call_args_list
def test_run_ssh_pre_flight(self):
"""
test Single.run_ssh_pre_flight
"""
target = self.target.copy()
target["ssh_pre_flight"] = os.path.join(RUNTIME_VARS.TMP, "script.sh")
single = ssh.Single(
self.opts,
self.opts["argv"],
"localhost",
mods={},
fsclient=None,
thin=salt.utils.thin.thin_path(self.opts["cachedir"]),
mine=False,
winrm=False,
tty=True,
**target
)
exp_ret = ("Success", "", 0)
mock_cmd = MagicMock(return_value=exp_ret)
patch_cmd = patch("salt.client.ssh.shell.Shell.exec_cmd", mock_cmd)
patch_send = patch("salt.client.ssh.shell.Shell.send", return_value=exp_ret)
exp_tmp = os.path.join(
tempfile.gettempdir(), os.path.basename(target["ssh_pre_flight"])
)
with patch_cmd, patch_send:
ret = single.run_ssh_pre_flight()
assert ret == exp_ret
assert [
call("/bin/sh '{}'".format(exp_tmp)),
call("rm '{}'".format(exp_tmp)),
] == mock_cmd.call_args_list
@skipIf(salt.utils.platform.is_windows(), "SSH_PY_SHIM not set on windows")
def test_cmd_run_set_path(self):
"""
test when set_path is set
"""
target = self.target
target["set_path"] = "$PATH:/tmp/path/"
single = ssh.Single(
self.opts,
self.opts["argv"],
"localhost",
mods={},
fsclient=None,
thin=salt.utils.thin.thin_path(self.opts["cachedir"]),
mine=False,
**self.target
)
ret = single._cmd_str()
assert re.search("\\" + target["set_path"], ret)
@skipIf(salt.utils.platform.is_windows(), "SSH_PY_SHIM not set on windows")
def test_cmd_run_not_set_path(self):
"""
test when set_path is not set
"""
target = self.target
single = ssh.Single(
self.opts,
self.opts["argv"],
"localhost",
mods={},
fsclient=None,
thin=salt.utils.thin.thin_path(self.opts["cachedir"]),
mine=False,
**self.target
)
ret = single._cmd_str()
assert re.search('SET_PATH=""', ret)
@skipIf(not salt.utils.path.which("ssh"), "No ssh binary found in path")
class SSHTests(ShellCase):
def setUp(self):
self.roster = """
localhost:
host: 127.0.0.1
port: 2827
"""
self.opts = salt.config.client_config(self.get_config_file_path("master"))
self.opts["selected_target_option"] = "glob"
def test_expand_target_ip_address(self):
"""
test expand_target when target is root@<ip address>
"""
host = "127.0.0.1"
user = "test-user@"
opts = self.opts
opts["tgt"] = user + host
with patch(
"salt.utils.network.is_reachable_host", MagicMock(return_value=False)
):
client = ssh.SSH(opts)
assert opts["tgt"] == user + host
with patch(
"salt.roster.get_roster_file", MagicMock(return_value="/etc/salt/roster")
), patch(
"salt.client.ssh.compile_template",
MagicMock(return_value=salt.utils.yaml.safe_load(self.roster)),
):
client._expand_target()
assert opts["tgt"] == host
def test_expand_target_no_host(self):
"""
test expand_target when host is not included in the rosterdata
"""
host = "127.0.0.1"
user = "test-user@"
opts = self.opts
opts["tgt"] = user + host
roster = """
localhost: 127.0.0.1
"""
roster_file = os.path.join(RUNTIME_VARS.TMP, "test_roster_no_host")
with salt.utils.files.fopen(roster_file, "w") as fp:
salt.utils.yaml.safe_dump(salt.utils.yaml.safe_load(roster), fp)
with patch(
"salt.utils.network.is_reachable_host", MagicMock(return_value=False)
):
client = ssh.SSH(opts)
assert opts["tgt"] == user + host
with patch("salt.roster.get_roster_file", MagicMock(return_value=roster_file)):
client._expand_target()
assert opts["tgt"] == host
def test_expand_target_dns(self):
"""
test expand_target when target is root@<dns>
"""
host = "localhost"
user = "test-user@"
opts = self.opts
opts["tgt"] = user + host
with patch(
"salt.utils.network.is_reachable_host", MagicMock(return_value=False)
):
client = ssh.SSH(opts)
assert opts["tgt"] == user + host
with patch(
"salt.roster.get_roster_file", MagicMock(return_value="/etc/salt/roster")
), patch(
"salt.client.ssh.compile_template",
MagicMock(return_value=salt.utils.yaml.safe_load(self.roster)),
):
client._expand_target()
assert opts["tgt"] == host
def test_expand_target_no_user(self):
"""
test expand_target when no user defined
"""
host = "127.0.0.1"
opts = self.opts
opts["tgt"] = host
with patch(
"salt.utils.network.is_reachable_host", MagicMock(return_value=False)
):
client = ssh.SSH(opts)
assert opts["tgt"] == host
with patch(
"salt.roster.get_roster_file", MagicMock(return_value="/etc/salt/roster")
), patch(
"salt.client.ssh.compile_template",
MagicMock(return_value=salt.utils.yaml.safe_load(self.roster)),
):
client._expand_target()
assert opts["tgt"] == host
def test_update_targets_ip_address(self):
"""
test update_targets when host is ip address
"""
host = "127.0.0.1"
user = "test-user@"
opts = self.opts
opts["tgt"] = user + host
with patch(
"salt.utils.network.is_reachable_host", MagicMock(return_value=False)
):
client = ssh.SSH(opts)
assert opts["tgt"] == user + host
client._update_targets()
assert opts["tgt"] == host
assert client.targets[host]["user"] == user.split("@")[0]
def test_update_targets_dns(self):
"""
test update_targets when host is dns
"""
host = "localhost"
user = "test-user@"
opts = self.opts
opts["tgt"] = user + host
with patch(
"salt.utils.network.is_reachable_host", MagicMock(return_value=False)
):
client = ssh.SSH(opts)
assert opts["tgt"] == user + host
client._update_targets()
assert opts["tgt"] == host
assert client.targets[host]["user"] == user.split("@")[0]
def test_update_targets_no_user(self):
"""
test update_targets when no user defined
"""
host = "127.0.0.1"
opts = self.opts
opts["tgt"] = host
with patch(
"salt.utils.network.is_reachable_host", MagicMock(return_value=False)
):
client = ssh.SSH(opts)
assert opts["tgt"] == host
client._update_targets()
assert opts["tgt"] == host
def test_update_expand_target_dns(self):
"""
test update_targets and expand_target when host is dns
"""
host = "localhost"
user = "test-user@"
opts = self.opts
opts["tgt"] = user + host
with patch(
"salt.utils.network.is_reachable_host", MagicMock(return_value=False)
):
client = ssh.SSH(opts)
assert opts["tgt"] == user + host
with patch(
"salt.roster.get_roster_file", MagicMock(return_value="/etc/salt/roster")
), patch(
"salt.client.ssh.compile_template",
MagicMock(return_value=salt.utils.yaml.safe_load(self.roster)),
):
client._expand_target()
client._update_targets()
assert opts["tgt"] == host
assert client.targets[host]["user"] == user.split("@")[0]
def test_parse_tgt(self):
"""
test parse_tgt when user and host set on
the ssh cli tgt
"""
host = "localhost"
user = "test-user@"
opts = self.opts
opts["tgt"] = user + host
with patch(
"salt.utils.network.is_reachable_host", MagicMock(return_value=False)
):
assert not self.opts.get("ssh_cli_tgt")
client = ssh.SSH(opts)
assert client.parse_tgt["hostname"] == host
assert client.parse_tgt["user"] == user.split("@")[0]
assert self.opts.get("ssh_cli_tgt") == user + host
def test_parse_tgt_no_user(self):
"""
test parse_tgt when only the host set on
the ssh cli tgt
"""
host = "localhost"
opts = self.opts
opts["ssh_user"] = "ssh-usr"
opts["tgt"] = host
with patch(
"salt.utils.network.is_reachable_host", MagicMock(return_value=False)
):
assert not self.opts.get("ssh_cli_tgt")
client = ssh.SSH(opts)
assert client.parse_tgt["hostname"] == host
assert client.parse_tgt["user"] == opts["ssh_user"]
assert self.opts.get("ssh_cli_tgt") == host
def test_extra_filerefs(self):
"""
test "extra_filerefs" are not excluded from kwargs
when preparing the SSH opts
"""
opts = {
"eauth": "auto",
"username": "test",
"password": "test",
"client": "ssh",
"tgt": "localhost",
"fun": "test.ping",
"ssh_port": 22,
"extra_filerefs": "salt://foobar",
}
roster = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "roster")
client = salt.client.ssh.client.SSHClient(
mopts=self.opts, disable_custom_roster=True
)
with patch("salt.roster.get_roster_file", MagicMock(return_value=roster)):
ssh_obj = client._prep_ssh(**opts)
assert ssh_obj.opts.get("extra_filerefs", None) == "salt://foobar"
| |
# -*- coding: utf-8 -*-
import json
from os import path
from gluon import current
from gluon.html import *
from gluon.storage import Storage
from s3.s3filter import S3DateFilter, S3LocationFilter, S3OptionsFilter, S3TextFilter, S3FilterForm
from s3.s3utils import S3CustomController
THEME = "DRMP"
# =============================================================================
class index(S3CustomController):
""" Custom Home Page """
def __call__(self):
response = current.response
output = {}
#output["title"] = response.title = current.deployment_settings.get_system_name()
s3 = response.s3
# Image Carousel
s3.jquery_ready.append('''$('#myCarousel').carousel()''')
# Latest 4 Events and Alerts
from s3.s3query import FS
s3db = current.s3db
layout = s3.render_posts
list_id = "news_datalist"
limit = 4
list_fields = ["series_id",
"location_id",
"date",
"body",
"created_by",
"created_by$organisation_id",
"document.file",
"event_post.event_id",
]
resource = s3db.resource("cms_post")
resource.add_filter(FS("series_id$name") == "Event")
# Only show Future Events
resource.add_filter(resource.table.date >= current.request.now)
# Order with next Event first
orderby = "date"
output["events"] = latest_records(resource, layout, list_id, limit, list_fields, orderby)
resource = s3db.resource("cms_post")
resource.add_filter(FS("series_id$name") == "Alert")
# Order with most recent Alert first
orderby = "date desc"
output["alerts"] = latest_records(resource, layout, list_id, limit, list_fields, orderby)
self._view(THEME, "index.html")
return output
# =============================================================================
class contact(S3CustomController):
"""
Custom page
"""
def __call__(self):
title = current.T("Contact Us")
self._view(THEME, "contact.html")
return dict(title = title)
# =============================================================================
class glossary(S3CustomController):
"""
Custom page
"""
def __call__(self):
title = current.T("Glossary")
self._view(THEME, "glossary.html")
return dict(title = title)
# =============================================================================
class links(S3CustomController):
"""
Custom page
"""
def __call__(self):
title = current.T("Links")
self._view(THEME, "links.html")
return dict(title = title)
# =============================================================================
class datalist():
""" Alternate URL for News Feed page """
def __call__(self):
return _newsfeed()
# =============================================================================
class datalist_dl_post():
""" AJAX URL for CMS Posts (for News Feed page) """
def __call__(self):
return _newsfeed()
# =============================================================================
class datalist_dl_filter():
""" AJAX URL for CMS Posts Filter Form (for News Feed page) """
def __call__(self):
return _newsfeed()
# =============================================================================
class login():
""" Custom Login page """
def __call__(self):
return _login()
# =============================================================================
class newsfeed():
""" Newsfeed page """
def __call__(self):
return _newsfeed()
# =============================================================================
class validate():
""" Alternate URL for News Feed page """
def __call__(self):
return _newsfeed()
# =============================================================================
def _newsfeed():
"""
Custom Page
- Filterable DataList of CMS Posts & a DataList of Events
"""
#if not current.auth.is_logged_in():
# current.auth.permission.fail()
T = current.T
s3db = current.s3db
request = current.request
response = current.response
s3 = response.s3
# Ensure that filtered views translate into options which update the Widget
get_vars = request.get_vars
if "~.series_id$name" in get_vars:
series_name = get_vars["~.series_id$name"]
table = s3db.cms_series
series = current.db(table.name == series_name).select(table.id,
limitby=(0, 1)).first()
if series:
series_id = str(series.id)
get_vars.pop("~.series_id$name")
get_vars["~.series_id__belongs"] = series_id
current.deployment_settings.customise_controller("cms_post")
list_layout = s3.render_posts
filter_widgets = [S3TextFilter(["body"],
label="",
_class="filter-search",
#_placeholder=T("Search").upper(),
),
S3OptionsFilter("series_id",
label=T("Filter by Type"),
represent="%(name)s",
widget="multiselect",
hidden=True,
),
S3LocationFilter("location_id",
label=T("Filter by Location"),
levels=("L1", "L2", "L3"),
widget="multiselect",
hidden=True,
),
S3OptionsFilter("created_by$organisation_id",
label=T("Filter by Organization"),
# Can't use this for integers, use field.represent instead
#represent="%(name)s",
widget="multiselect",
hidden=True,
),
S3DateFilter("created_on",
label=T("Filter by Date"),
hide_time=True,
hidden=True,
),
]
s3db.configure("cms_post",
# We use a custom Advanced widget
filter_advanced = False,
filter_formstyle = filter_formstyle,
filter_submit = (T("SEARCH"), "btn btn-primary"),
filter_widgets = filter_widgets,
list_layout = list_layout,
# Create form comes via AJAX in a Modal
insertable = False,
notify_fields = [(T("Type"), "series_id"),
(T("Date"), "date"),
(T("Location"), "location_id"),
(T("Description"), "body"),
],
notify_template = "notify_post",
)
s3.dl_pagelength = 6 # 5 forces an AJAX call
old_args = request.args
if "datalist_dl_post" in old_args:
# DataList pagination or Ajax-deletion request
request.args = ["datalist_f"]
ajax = "list"
elif "datalist_dl_filter" in old_args:
# FilterForm options update request
request.args = ["filter"]
ajax = "filter"
elif "validate.json" in old_args:
# Inline component validation request
request.args = []
ajax = True
elif current.auth.permission.format == "msg":
# Subscription lookup request
request.args = []
ajax = True
else:
# Default
request.args = ["datalist_f"]
ajax = None
def prep(r):
if ajax == "list":
r.representation = "dl"
elif ajax == "filter":
r.representation = "json"
return True
s3.prep = prep
output = current.rest_controller("cms", "post",
list_ajaxurl = URL(f="index",
args="datalist_dl_post"),
filter_ajax_url = URL(f="index",
args="datalist_dl_filter",
vars={}),
)
request.args = old_args
if ajax == "list":
# Don't override view if this is an Ajax-deletion request
if not "delete" in request.get_vars:
response.view = "plain.html"
elif not ajax:
# Set Title & View after REST Controller, in order to override
output["title"] = T("News Feed")
view = path.join(request.folder, "modules", "templates",
THEME, "views", "newsfeed.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP(404, "Unable to open Custom View: %s" % view)
s3.js_global.append('''i18n.adv_search="%s"''' % T("Advanced Search"))
s3.scripts.append("/%s/static/themes/%s/js/newsfeed.js" % (request.application, THEME))
# Latest 5 Disasters
resource = s3db.resource("event_event")
layout = render_events
list_id = "event_datalist"
limit = 5
orderby = "start_date desc"
list_fields = ["name",
"event_type_id$name",
"start_date",
"closed",
]
output["disasters"] = latest_records(resource, layout, list_id, limit, list_fields, orderby)
return output
# =============================================================================
def latest_records(resource, layout, list_id, limit, list_fields, orderby):
"""
Display a dataList of the latest records for a resource
@todo: remove this wrapper
"""
#orderby = resource.table[orderby]
datalist, numrows, ids = resource.datalist(fields=list_fields,
start=None,
limit=limit,
list_id=list_id,
orderby=orderby,
layout=layout)
if numrows == 0:
# Empty table or just no match?
from s3.s3crud import S3CRUD
table = resource.table
if "deleted" in table:
available_records = current.db(table.deleted != True)
else:
available_records = current.db(table._id > 0)
if available_records.select(table._id,
limitby=(0, 1)).first():
msg = DIV(S3CRUD.crud_string(resource.tablename,
"msg_no_match"),
_class="empty")
else:
msg = DIV(S3CRUD.crud_string(resource.tablename,
"msg_list_empty"),
_class="empty")
data = msg
else:
# Render the list
data = datalist.html()
return data
# -----------------------------------------------------------------------------
def filter_formstyle(row_id, label, widget, comment, hidden=False):
"""
Custom Formstyle for FilterForm
@param row_id: HTML id for the row
@param label: the label
@param widget: the form widget
@param comment: the comment
@param hidden: whether the row should initially be hidden or not
"""
if hidden:
_class = "advanced hide"
else:
_class= ""
if label:
return DIV(label, widget, _id=row_id, _class=_class)
else:
return DIV(widget, _id=row_id, _class=_class)
# -----------------------------------------------------------------------------
def render_events(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for 'Disasters' on the News Feed page
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["event_event.id"]
item_class = "thumbnail"
raw = record._row
name = record["event_event.name"]
date = record["event_event.start_date"]
closed = raw["event_event.closed"]
event_type = record["event_event_type.name"]
if closed:
edit_bar = DIV()
else:
item_class = "%s disaster" % item_class
permit = current.auth.s3_has_permission
table = resource.table
if permit("update", table, record_id=record_id):
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="event", f="event",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}),
_class="s3_modal",
_title=current.response.s3.crud_strings.event_event.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-remove-sign"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Render the item
item = DIV(DIV(A(IMG(_class="media-object",
_src=URL(c="static",
f="img",
args=["event", "%s.png" % event_type]),
),
_class="pull-left",
_href="#",
),
edit_bar,
DIV(A(H5(name,
_class="media-heading"),
SPAN(date,
_class="date-title",
),
_href=URL(c="event", f="event",
args=[record_id, "profile"]),
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
# =============================================================================
class subscriptions(S3CustomController):
""" Custom page to manage subscriptions """
# -------------------------------------------------------------------------
def __call__(self):
""" Main entry point, configuration """
T = current.T
# Must be logged in
auth = current.auth
if not auth.s3_logged_in():
auth.permission.fail()
# Available resources
resources = [dict(resource="cms_post",
url="default/index/newsfeed",
label=T("Updates")),
]
# Filter widgets
# @note: subscription manager has no resource context, so
# must configure fixed options or lookup resources
# for filter widgets which need it.
filters = [S3OptionsFilter("series_id",
label = T("Subscribe to"),
represent = "%(name)s",
resource = "cms_post",
_name = "type-filter",
),
S3LocationFilter("location_id",
label = T("Location(s)"),
levels = ("L1",),
resource = "cms_post",
_name = "location-filter",
),
#S3OptionsFilter("created_by$organisation_id",
# label = T("Filter by Organization"),
# represent = s3db.org_organisation_represent,
# #represent = "%(name)s",
# resource = "cms_post",
# _name = "organisation-filter",
# ),
]
# Title and view
title = T("Notification Settings")
self._view(THEME, "subscriptions.html")
# Form
form = self._manage_subscriptions(resources, filters)
return dict(title=title, form=form)
# -------------------------------------------------------------------------
@staticmethod
def _options(fieldname):
"""
Lookup the full set of options for a Filter Widget
- for Subscriptions we don't want to see just the options available in current data
"""
db = current.db
if fieldname == "series_id":
table = current.s3db.cms_series
rows = db(table.deleted == False).select(table.id,
table.name)
options = {}
for row in rows:
options[row.id] = row.name
elif fieldname == "location_id":
table = current.s3db.gis_location
query = (table.deleted == False) & \
(table.level == "L1")
# IDs converted inside widget's _options() function
rows = db(query).select(table.id)
options = [row.id for row in rows]
return options
# -------------------------------------------------------------------------
def _manage_subscriptions(self, resources, filters):
"""
Custom form to manage subscriptions
@param resources: available resources config
@param filters: filter widgets
"""
from gluon.sqlhtml import SQLFORM
from gluon.validators import IS_IN_SET
from s3.s3widgets import S3GroupedOptionsWidget
# L10n
T = current.T
labels = Storage(
RESOURCES = T("Subscribe To"),
NOTIFY_ON = T("Notify On"),
FREQUENCY = T("Frequency"),
NOTIFY_BY = T("Notify By"),
MORE = T("More Options"),
LESS = T("Less Options"),
)
messages = Storage(
ERROR = T("Error: could not update notification settings"),
SUCCESS = T("Notification settings updated"),
)
# Get current subscription settings resp. form defaults
subscription = self._get_subscription()
# Formstyle bootstrap
formstyle = SQLFORM.formstyles.bootstrap
# Initialize form
form = FORM(_id="subscription-form",
hidden={"subscription-filters": ""})
# Deactivated: resource selector
#options = []
#selected_resources = set()
#subscribed = subscription["resources"]
#for idx, rconfig in enumerate(resources):
#options.append((idx, rconfig["label"]))
#if subscribed:
#for s in subscribed:
#if s.resource == rconfig["resource"] and \
#s.url == rconfig["url"]:
#selected_resources.add(idx)
#dummy = Storage(name="resources", requires = IS_IN_SET(options))
#selector = S3GroupedOptionsWidget(cols=2)
#row = ("resource_selector__row",
#"%s:" % labels.RESOURCES,
#selector(dummy,
#list(selected_resources),
#_id="resource_selector"),
#"")
#fieldset = formstyle(form, [row])
#form.append(fieldset)
# Filters
filter_form = S3FilterForm(filters, clear=False)
fieldset = FIELDSET(filter_form.fields(None,
subscription["get_vars"]),
_id="subscription-filter-form")
form.append(fieldset)
# Notification options
rows = []
stable = current.s3db.pr_subscription
selector = S3GroupedOptionsWidget(cols=1)
rows.append(("trigger_selector__row",
"%s:" % labels.NOTIFY_ON,
selector(stable.notify_on,
subscription["notify_on"],
_id="trigger_selector"),
""))
switch = S3GroupedOptionsWidget(cols=1, multiple=False, sort=False)
rows.append(("frequency_selector__row",
"%s:" % labels.FREQUENCY,
switch(stable.frequency,
subscription["frequency"],
_id="frequency_selector"),
""))
# Deactivated: method selector
#rows.append(("method_selector__row",
#"%s:" % labels.NOTIFY_BY,
#selector(stable.method,
#subscription["method"],
#_id="method_selector"),
#""))
fieldset = formstyle(form, rows)
fieldset.insert(0,
DIV(SPAN([I(_class="icon-reorder"), labels.MORE],
_class="toggle-text",
_style="display:none"),
SPAN([I(_class="icon-reorder"), labels.LESS],
_class="toggle-text"),
_id="notification-options",
_class="control-group"))
form.append(fieldset)
# Submit button
row = ("submit__row", "",
INPUT(_type="submit", _value="Update Settings"), "")
fieldset = formstyle(form, [row])
form.append(fieldset)
# Script (to extract filters on submit and toggle options visibility)
script = URL(c="static", f="scripts", args=["S3", "s3.subscriptions.js"])
response = current.response
response.s3.scripts.append(script)
# Accept form
if form.accepts(current.request.post_vars,
current.session,
formname="subscription",
keepvalues=True):
formvars = form.vars
listify = lambda x: None if not x else x if type(x) is list else [x]
# Fixed resource selection:
subscription["subscribe"] = [resources[0]]
# Alternatively, with resource selector:
#subscribe = listify(formvars.resources)
#if subscribe:
#subscription["subscribe"] = \
#[r for idx, r in enumerate(resources)
#if str(idx) in subscribe]
subscription["filters"] = form.request_vars \
.get("subscription-filters", None)
subscription["notify_on"] = listify(formvars.notify_on)
subscription["frequency"] = formvars.frequency
# Fixed method:
subscription["method"] = ["EMAIL"]
# Alternatively, with method selector:
#subscription["method"] = listify(formvars.method)
success = self._update_subscription(subscription)
if success:
response.confirmation = messages.SUCCESS
else:
response.error = messages.ERROR
return form
# -------------------------------------------------------------------------
def _get_subscription(self):
""" Get current subscription settings """
db = current.db
s3db = current.s3db
pe_id = current.auth.user.pe_id
stable = s3db.pr_subscription
ftable = s3db.pr_filter
query = (stable.pe_id == pe_id) & \
(stable.deleted != True)
left = ftable.on(ftable.id == stable.filter_id)
row = db(query).select(stable.id,
stable.notify_on,
stable.frequency,
#stable.method,
ftable.id,
ftable.query,
left=left,
limitby=(0, 1)).first()
output = {"pe_id": pe_id}
get_vars = {}
if row:
# Existing settings
s = getattr(row, "pr_subscription")
f = getattr(row, "pr_filter")
rtable = s3db.pr_subscription_resource
query = (rtable.subscription_id == s.id) & \
(rtable.deleted != True)
rows = db(query).select(rtable.id,
rtable.resource,
rtable.url,
rtable.last_check_time,
rtable.next_check_time)
if f.query:
filters = json.loads(f.query)
for k, v in filters:
if v is None:
continue
if k in get_vars:
if type(get_vars[k]) is list:
get_vars[k].append(v)
else:
get_vars[k] = [get_vars[k], v]
else:
get_vars[k] = v
output.update({"id": s.id,
"filter_id": f.id,
"get_vars" : get_vars,
"resources": rows,
"notify_on": s.notify_on,
"frequency": s.frequency,
"method": ["EMAIL"] #s.method,
})
else:
# Form defaults
output.update({"id": None,
"filter_id": None,
"get_vars" : get_vars,
"resources": None,
"notify_on": stable.notify_on.default,
"frequency": stable.frequency.default,
"method": ["EMAIL"] #stable.method.default
})
return output
# -------------------------------------------------------------------------
def _update_subscription(self, subscription):
""" Update subscription settings """
db = current.db
s3db = current.s3db
pe_id = subscription["pe_id"]
# Save filters
filter_id = subscription["filter_id"]
filters = subscription.get("filters")
if filters:
ftable = s3db.pr_filter
if not filter_id:
success = ftable.insert(pe_id=pe_id, query=filters)
filter_id = success
else:
success = db(ftable.id == filter_id).update(query=filters)
if not success:
return None
# Save subscription settings
stable = s3db.pr_subscription
subscription_id = subscription["id"]
frequency = subscription["frequency"]
if not subscription_id:
success = stable.insert(pe_id=pe_id,
filter_id=filter_id,
notify_on=subscription["notify_on"],
frequency=frequency,
method=subscription["method"])
subscription_id = success
else:
success = db(stable.id == subscription_id).update(
pe_id=pe_id,
filter_id=filter_id,
notify_on=subscription["notify_on"],
frequency=frequency,
method=subscription["method"])
if not success:
return None
# Save subscriptions
rtable = s3db.pr_subscription_resource
subscribe = subscription.get("subscribe")
if subscribe:
from datetime import datetime, timedelta
now = datetime.utcnow()
resources = subscription["resources"]
subscribed = {}
timestamps = {}
if resources:
for r in resources:
subscribed[(r.resource, r.url)] = r.id
timestamps[r.id] = (r.last_check_time,
r.next_check_time)
intervals = s3db.pr_subscription_check_intervals
interval = timedelta(minutes=intervals.get(frequency, 0))
keep = set()
fk = '''{"subscription_id": %s}''' % subscription_id
for new in subscribe:
resource, url = new["resource"], new["url"]
if (resource, url) not in subscribed:
# Restore subscription if previously unsubscribed, else
# insert new record
unsubscribed = {"deleted": True,
"deleted_fk": fk,
"resource": resource,
"url": url}
rtable.update_or_insert(_key=unsubscribed,
deleted=False,
deleted_fk=None,
subscription_id=subscription_id,
resource=resource,
url=url,
last_check_time=now,
next_check_time=None)
else:
# Keep it
record_id = subscribed[(resource, url)]
last_check_time, next_check_time = timestamps[record_id]
data = {}
if not last_check_time:
# Someone has tampered with the timestamps, so
# we need to reset them and start over
last_check_time = now
data["last_check_time"] = last_check_time
due = last_check_time + interval
if next_check_time != due:
# Time interval has changed
data["next_check_time"] = due
if data:
db(rtable.id == record_id).update(**data)
keep.add(record_id)
# Unsubscribe all others
unsubscribe = set(subscribed.values()) - keep
db(rtable.id.belongs(unsubscribe)).update(deleted=True,
deleted_fk=fk,
subscription_id=None)
# Update subscription
subscription["id"] = subscription_id
subscription["filter_id"] = filter_id
return subscription
# END =========================================================================
| |
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compute stats, infer schema, and validate stats for chicago taxi example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import tensorflow as tf
from tensorflow.python.lib.io import file_io
import tensorflow_data_validation as tfdv
from tensorflow_metadata.proto.v0 import statistics_pb2
import apache_beam as beam
from apache_beam.metrics.metric import MetricsFilter
from apache_beam.testing.load_tests.load_test_metrics_utils import MeasureTime
from apache_beam.testing.load_tests.load_test_metrics_utils import MetricsReader
from google.protobuf import text_format
from trainer import taxi
def infer_schema(stats_path, schema_path):
"""Infers a schema from stats in stats_path.
Args:
stats_path: Location of the stats used to infer the schema.
schema_path: Location where the inferred schema is materialized.
"""
print('Infering schema from statistics.')
schema = tfdv.infer_schema(
tfdv.load_statistics(stats_path), infer_feature_shape=False)
print(text_format.MessageToString(schema))
print('Writing schema to output path.')
file_io.write_string_to_file(schema_path, text_format.MessageToString(schema))
def validate_stats(stats_path, schema_path, anomalies_path):
"""Validates the statistics against the schema and materializes anomalies.
Args:
stats_path: Location of the stats used to infer the schema.
schema_path: Location of the schema to be used for validation.
anomalies_path: Location where the detected anomalies are materialized.
"""
print('Validating schema against the computed statistics.')
schema = taxi.read_schema(schema_path)
stats = tfdv.load_statistics(stats_path)
anomalies = tfdv.validate_statistics(stats, schema)
print('Detected following anomalies:')
print(text_format.MessageToString(anomalies))
print('Writing anomalies to anomalies path.')
file_io.write_string_to_file(anomalies_path,
text_format.MessageToString(anomalies))
def compute_stats(input_handle,
stats_path,
max_rows=None,
for_eval=False,
pipeline_args=None,
publish_to_bq=None,
metrics_dataset=None,
metrics_table=None,
project=None):
"""Computes statistics on the input data.
Args:
input_handle: BigQuery table name to process specified as DATASET.TABLE or
path to csv file with input data.
stats_path: Directory in which stats are materialized.
max_rows: Number of rows to query from BigQuery
for_eval: Query for eval set rows from BigQuery
pipeline_args: additional DataflowRunner or DirectRunner args passed to the
beam pipeline.
"""
namespace = metrics_table
pipeline = beam.Pipeline(argv=pipeline_args)
metrics_monitor = None
if publish_to_bq:
metrics_monitor = MetricsReader(
project_name=project,
bq_table=metrics_table,
bq_dataset=metrics_dataset,
filters=MetricsFilter().with_namespace(namespace),
)
query = taxi.make_sql(
table_name=input_handle, max_rows=max_rows, for_eval=for_eval)
raw_data = (
pipeline
| 'ReadBigQuery' >> beam.io.Read(
beam.io.BigQuerySource(query=query, use_standard_sql=True))
| 'Measure time: Start' >> beam.ParDo(MeasureTime(namespace))
| 'ConvertToTFDVInput' >> beam.Map(
lambda x: {key: np.asarray([x[key]])
for key in x if x[key] is not None}))
_ = (
raw_data
| 'GenerateStatistics' >> tfdv.GenerateStatistics()
| 'Measure time: End' >> beam.ParDo(MeasureTime(namespace))
| 'WriteStatsOutput' >> beam.io.WriteToTFRecord(
stats_path,
shard_name_template='',
coder=beam.coders.ProtoCoder(
statistics_pb2.DatasetFeatureStatisticsList)))
result = pipeline.run()
result.wait_until_finish()
if metrics_monitor:
metrics_monitor.publish_metrics(result)
def main():
tf.logging.set_verbosity(tf.logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
help=('Input BigQuery table to process specified as: '
'DATASET.TABLE or path to csv file with input data.'))
parser.add_argument(
'--stats_path',
help='Location for the computed stats to be materialized.')
parser.add_argument(
'--for_eval',
help='Query for eval set rows from BigQuery',
action='store_true')
parser.add_argument(
'--max_rows',
help='Number of rows to query from BigQuery',
default=None,
type=int)
parser.add_argument(
'--schema_path',
help='Location for the computed schema is located.',
default=None,
type=str)
parser.add_argument(
'--infer_schema',
help='If specified, also infers a schema based on the computed stats.',
action='store_true')
parser.add_argument(
'--validate_stats',
help='If specified, also validates the stats against the schema.',
action='store_true')
parser.add_argument(
'--anomalies_path',
help='Location for detected anomalies are materialized.',
default=None,
type=str)
parser.add_argument(
'--publish_to_big_query',
help='Whether to publish to BQ',
default=None,
type=bool)
parser.add_argument(
'--metrics_dataset',
help='BQ dataset',
default=None,
type=str)
parser.add_argument(
'--metrics_table',
help='BQ table',
default=None,
type=str)
parser.add_argument(
'--metric_reporting_project',
help='BQ table project',
default=None,
type=str)
known_args, pipeline_args = parser.parse_known_args()
compute_stats(
input_handle=known_args.input,
stats_path=known_args.stats_path,
max_rows=known_args.max_rows,
for_eval=known_args.for_eval,
pipeline_args=pipeline_args,
publish_to_bq=known_args.publish_to_big_query,
metrics_dataset=known_args.metrics_dataset,
metrics_table=known_args.metrics_table,
project=known_args.metric_reporting_project)
print('Stats computation done.')
if known_args.infer_schema:
infer_schema(
stats_path=known_args.stats_path, schema_path=known_args.schema_path)
if known_args.validate_stats:
validate_stats(
stats_path=known_args.stats_path,
schema_path=known_args.schema_path,
anomalies_path=known_args.anomalies_path)
if __name__ == '__main__':
main()
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Configuration of the worker"""
import os
from typing import Dict, List
import kubernetes.client.models as k8s
from airflow.configuration import conf
from airflow.kubernetes.k8s_model import append_to_pod
from airflow.kubernetes.pod_generator import PodGenerator
from airflow.kubernetes.secret import Secret
from airflow.utils.log.logging_mixin import LoggingMixin
class WorkerConfiguration(LoggingMixin):
"""
Contains Kubernetes Airflow Worker configuration logic
:param kube_config: the kubernetes configuration from airflow.cfg
:type kube_config: airflow.executors.kubernetes_executor.KubeConfig
"""
dags_volume_name = 'airflow-dags'
logs_volume_name = 'airflow-logs'
git_sync_ssh_secret_volume_name = 'git-sync-ssh-key'
git_ssh_key_secret_key = 'gitSshKey'
git_sync_ssh_known_hosts_volume_name = 'git-sync-known-hosts'
def __init__(self, kube_config):
self.kube_config = kube_config
self.worker_airflow_home = self.kube_config.airflow_home
self.worker_airflow_dags = self.kube_config.dags_folder
self.worker_airflow_logs = self.kube_config.base_log_folder
super().__init__()
def _get_init_containers(self) -> List[k8s.V1Container]:
"""When using git to retrieve the DAGs, use the GitSync Init Container"""
# If we're using volume claims to mount the dags, no init container is needed
if self.kube_config.dags_volume_claim or \
self.kube_config.dags_volume_host or self.kube_config.dags_in_image:
return []
# Otherwise, define a git-sync init container
init_environment = [k8s.V1EnvVar(
name='GIT_SYNC_REPO',
value=self.kube_config.git_repo
), k8s.V1EnvVar(
name='GIT_SYNC_BRANCH',
value=self.kube_config.git_branch
), k8s.V1EnvVar(
name='GIT_SYNC_ROOT',
value=self.kube_config.git_sync_root
), k8s.V1EnvVar(
name='GIT_SYNC_DEST',
value=self.kube_config.git_sync_dest
), k8s.V1EnvVar(
name='GIT_SYNC_REV',
value=self.kube_config.git_sync_rev
), k8s.V1EnvVar(
name='GIT_SYNC_DEPTH',
value='1'
), k8s.V1EnvVar(
name='GIT_SYNC_ONE_TIME',
value='true'
)]
if self.kube_config.git_user:
init_environment.append(k8s.V1EnvVar(
name='GIT_SYNC_USERNAME',
value=self.kube_config.git_user
))
if self.kube_config.git_password:
init_environment.append(k8s.V1EnvVar(
name='GIT_SYNC_PASSWORD',
value=self.kube_config.git_password
))
volume_mounts = [k8s.V1VolumeMount(
mount_path=self.kube_config.git_sync_root,
name=self.dags_volume_name,
read_only=False
)]
if self.kube_config.git_sync_credentials_secret:
init_environment.extend([
k8s.V1EnvVar(
name='GIT_SYNC_USERNAME',
value_from=k8s.V1EnvVarSource(
secret_key_ref=k8s.V1SecretKeySelector(
name=self.kube_config.git_sync_credentials_secret,
key='GIT_SYNC_USERNAME')
)
),
k8s.V1EnvVar(
name='GIT_SYNC_PASSWORD',
value_from=k8s.V1EnvVarSource(
secret_key_ref=k8s.V1SecretKeySelector(
name=self.kube_config.git_sync_credentials_secret,
key='GIT_SYNC_PASSWORD')
)
)
])
if self.kube_config.git_ssh_key_secret_name:
volume_mounts.append(k8s.V1VolumeMount(
name=self.git_sync_ssh_secret_volume_name,
mount_path='/etc/git-secret/ssh',
sub_path='ssh'
))
init_environment.extend([
k8s.V1EnvVar(
name='GIT_SSH_KEY_FILE',
value='/etc/git-secret/ssh'
),
k8s.V1EnvVar(
name='GIT_SYNC_SSH',
value='true'
)
])
if self.kube_config.git_ssh_known_hosts_configmap_name:
volume_mounts.append(k8s.V1VolumeMount(
name=self.git_sync_ssh_known_hosts_volume_name,
mount_path='/etc/git-secret/known_hosts',
sub_path='known_hosts'
))
init_environment.extend([k8s.V1EnvVar(
name='GIT_KNOWN_HOSTS',
value='true'
), k8s.V1EnvVar(
name='GIT_SSH_KNOWN_HOSTS_FILE',
value='/etc/git-secret/known_hosts'
)])
else:
init_environment.append(k8s.V1EnvVar(
name='GIT_KNOWN_HOSTS',
value='false'
))
init_containers = k8s.V1Container(
name=self.kube_config.git_sync_init_container_name,
image=self.kube_config.git_sync_container,
env=init_environment,
volume_mounts=volume_mounts
)
if self.kube_config.git_sync_run_as_user != "":
init_containers.security_context = k8s.V1SecurityContext(
run_as_user=self.kube_config.git_sync_run_as_user
) # git-sync user
return [init_containers]
def _get_environment(self) -> Dict[str, str]:
"""Defines any necessary environment variables for the pod executor"""
env = {}
for env_var_name, env_var_val in self.kube_config.kube_env_vars.items():
env[env_var_name] = env_var_val
env["AIRFLOW__CORE__EXECUTOR"] = "LocalExecutor"
if self.kube_config.airflow_configmap:
env['AIRFLOW_HOME'] = self.worker_airflow_home
env['AIRFLOW__CORE__DAGS_FOLDER'] = self.worker_airflow_dags
if (not self.kube_config.airflow_configmap and
'AIRFLOW__CORE__SQL_ALCHEMY_CONN' not in self.kube_config.kube_secrets):
env['AIRFLOW__CORE__SQL_ALCHEMY_CONN'] = conf.get("core", "SQL_ALCHEMY_CONN")
if self.kube_config.git_dags_folder_mount_point:
# /root/airflow/dags/repo/dags
dag_volume_mount_path = os.path.join(
self.kube_config.git_dags_folder_mount_point,
self.kube_config.git_sync_dest, # repo
self.kube_config.git_subpath # dags
)
env['AIRFLOW__CORE__DAGS_FOLDER'] = dag_volume_mount_path
return env
def _get_env_from(self) -> List[k8s.V1EnvFromSource]:
"""Extracts any configmapRefs to envFrom"""
env_from = []
if self.kube_config.env_from_configmap_ref:
for config_map_ref in self.kube_config.env_from_configmap_ref.split(','):
env_from.append(
k8s.V1EnvFromSource(config_map_ref=k8s.V1ConfigMapEnvSource(config_map_ref))
)
if self.kube_config.env_from_secret_ref:
for secret_ref in self.kube_config.env_from_secret_ref.split(','):
env_from.append(
k8s.V1EnvFromSource(secret_ref=k8s.V1SecretEnvSource(secret_ref))
)
return env_from
def _get_secrets(self):
"""Defines any necessary secrets for the pod executor"""
worker_secrets = []
for env_var_name, obj_key_pair in self.kube_config.kube_secrets.items():
k8s_secret_obj, k8s_secret_key = obj_key_pair.split('=')
worker_secrets.append(
Secret('env', env_var_name, k8s_secret_obj, k8s_secret_key)
)
if self.kube_config.env_from_secret_ref:
for secret_ref in self.kube_config.env_from_secret_ref.split(','):
worker_secrets.append(
Secret('env', None, secret_ref)
)
return worker_secrets
def _get_security_context(self) -> k8s.V1PodSecurityContext:
"""Defines the security context"""
security_context = k8s.V1PodSecurityContext()
if self.kube_config.worker_run_as_user != "":
security_context.run_as_user = self.kube_config.worker_run_as_user
if self.kube_config.worker_fs_group != "":
security_context.fs_group = self.kube_config.worker_fs_group
# set fs_group to 65533 if not explicitly specified and using git ssh keypair auth
if self.kube_config.git_ssh_key_secret_name and security_context.fs_group is None:
security_context.fs_group = 65533
return security_context
def _get_labels(self, kube_executor_labels, labels) -> k8s.V1LabelSelector:
copy = self.kube_config.kube_labels.copy()
copy.update(kube_executor_labels)
copy.update(labels)
return copy
def _get_volume_mounts(self) -> List[k8s.V1VolumeMount]:
volume_mounts = {
self.dags_volume_name: k8s.V1VolumeMount(
name=self.dags_volume_name,
mount_path=self.generate_dag_volume_mount_path(),
read_only=True,
),
self.logs_volume_name: k8s.V1VolumeMount(
name=self.logs_volume_name,
mount_path=self.worker_airflow_logs,
)
}
if self.kube_config.dags_volume_subpath:
volume_mounts[self.dags_volume_name].sub_path = self.kube_config.dags_volume_subpath
if self.kube_config.logs_volume_subpath:
volume_mounts[self.logs_volume_name].sub_path = self.kube_config.logs_volume_subpath
if self.kube_config.dags_in_image:
del volume_mounts[self.dags_volume_name]
# Mount the airflow.cfg file via a configmap the user has specified
if self.kube_config.airflow_configmap:
config_volume_name = 'airflow-config'
config_path = '{}/airflow.cfg'.format(self.worker_airflow_home)
volume_mounts[config_volume_name] = k8s.V1VolumeMount(
name=config_volume_name,
mount_path=config_path,
sub_path='airflow.cfg',
read_only=True
)
# Mount the airflow_local_settings.py file via a configmap the user has specified
if self.kube_config.airflow_local_settings_configmap:
config_volume_name = 'airflow-local-settings'
config_path = '{}/config/airflow_local_settings.py'.format(self.worker_airflow_home)
volume_mounts[config_volume_name] = k8s.V1VolumeMount(
name='airflow-config',
mount_path=config_path,
sub_path='airflow_local_settings.py',
read_only=True
)
return list(volume_mounts.values())
def _get_volumes(self) -> List[k8s.V1Volume]:
def _construct_volume(name, claim, host) -> k8s.V1Volume:
volume = k8s.V1Volume(name=name)
if claim:
volume.persistent_volume_claim = k8s.V1PersistentVolumeClaimVolumeSource(
claim_name=claim
)
elif host:
volume.host_path = k8s.V1HostPathVolumeSource(
path=host,
type=''
)
else:
volume.empty_dir = {}
return volume
volumes = {
self.dags_volume_name: _construct_volume(
self.dags_volume_name,
self.kube_config.dags_volume_claim,
self.kube_config.dags_volume_host
),
self.logs_volume_name: _construct_volume(
self.logs_volume_name,
self.kube_config.logs_volume_claim,
self.kube_config.logs_volume_host
)
}
if self.kube_config.dags_in_image:
del volumes[self.dags_volume_name]
# Get the SSH key from secrets as a volume
if self.kube_config.git_ssh_key_secret_name:
volumes[self.git_sync_ssh_secret_volume_name] = k8s.V1Volume(
name=self.git_sync_ssh_secret_volume_name,
secret=k8s.V1SecretVolumeSource(
secret_name=self.kube_config.git_ssh_key_secret_name,
items=[k8s.V1KeyToPath(
key=self.git_ssh_key_secret_key,
path='ssh',
mode=0o440
)]
)
)
if self.kube_config.git_ssh_known_hosts_configmap_name:
volumes[self.git_sync_ssh_known_hosts_volume_name] = k8s.V1Volume(
name=self.git_sync_ssh_known_hosts_volume_name,
config_map=k8s.V1ConfigMapVolumeSource(
name=self.kube_config.git_ssh_known_hosts_configmap_name,
default_mode=0o440
)
)
# Mount the airflow_local_settings.py file via a configmap the user has specified
if self.kube_config.airflow_local_settings_configmap:
config_volume_name = 'airflow-config'
volumes[config_volume_name] = k8s.V1Volume(
name=config_volume_name,
config_map=k8s.V1ConfigMapVolumeSource(
name=self.kube_config.airflow_local_settings_configmap
)
)
# Mount the airflow.cfg file via a configmap the user has specified
if self.kube_config.airflow_configmap:
config_volume_name = 'airflow-config'
volumes[config_volume_name] = k8s.V1Volume(
name=config_volume_name,
config_map=k8s.V1ConfigMapVolumeSource(
name=self.kube_config.airflow_configmap
)
)
return list(volumes.values())
def generate_dag_volume_mount_path(self) -> str:
"""Generate path for DAG volume"""
if self.kube_config.dags_volume_claim or self.kube_config.dags_volume_host:
return self.worker_airflow_dags
return self.kube_config.git_dags_folder_mount_point
def as_pod(self) -> k8s.V1Pod:
"""Creates POD."""
if self.kube_config.pod_template_file:
return PodGenerator(pod_template_file=self.kube_config.pod_template_file).gen_pod()
pod = PodGenerator(
image=self.kube_config.kube_image,
image_pull_policy=self.kube_config.kube_image_pull_policy or 'IfNotPresent',
image_pull_secrets=self.kube_config.image_pull_secrets,
volumes=self._get_volumes(),
volume_mounts=self._get_volume_mounts(),
init_containers=self._get_init_containers(),
annotations=self.kube_config.kube_annotations,
affinity=self.kube_config.kube_affinity,
tolerations=self.kube_config.kube_tolerations,
envs=self._get_environment(),
node_selectors=self.kube_config.kube_node_selectors,
service_account_name=self.kube_config.worker_service_account_name or 'default',
restart_policy='Never'
).gen_pod()
pod.spec.containers[0].env_from = pod.spec.containers[0].env_from or []
pod.spec.containers[0].env_from.extend(self._get_env_from())
pod.spec.security_context = self._get_security_context()
return append_to_pod(pod, self._get_secrets())
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from testscenarios import load_tests_apply_scenarios as load_tests # noqa
import mock
import six
from openstack import exceptions
from openstack.orchestration.v1 import _proxy
from openstack.orchestration.v1 import resource
from openstack.orchestration.v1 import software_config as sc
from openstack.orchestration.v1 import software_deployment as sd
from openstack.orchestration.v1 import stack
from openstack.orchestration.v1 import stack_environment
from openstack.orchestration.v1 import stack_files
from openstack.orchestration.v1 import stack_template
from openstack.orchestration.v1 import template
from openstack.tests.unit import test_proxy_base
class TestOrchestrationProxy(test_proxy_base.TestProxyBase):
def setUp(self):
super(TestOrchestrationProxy, self).setUp()
self.proxy = _proxy.Proxy(self.session)
def test_create_stack(self):
self.verify_create(self.proxy.create_stack, stack.Stack)
def test_create_stack_preview(self):
method_kwargs = {"preview": True, "x": 1, "y": 2, "z": 3}
self.verify_create(self.proxy.create_stack, stack.Stack,
method_kwargs=method_kwargs)
def test_find_stack(self):
self.verify_find(self.proxy.find_stack, stack.Stack,
expected_kwargs={'resolve_outputs': True})
# mock_method="openstack.proxy.Proxy._find"
# test_method=self.proxy.find_stack
# method_kwargs = {
# 'resolve_outputs': False,
# 'ignore_missing': False
# }
# method_args=["name_or_id"]
# self._verify2(mock_method, test_method,
# method_args=method_args,
# method_kwargs=method_kwargs,
# expected_args=[stack.Stack, "name_or_id"],
# expected_kwargs=method_kwargs,
# expected_result="result")
#
# method_kwargs = {
# 'resolve_outputs': True,
# 'ignore_missing': True
# }
# self._verify2(mock_method, test_method,
# method_args=method_args,
# method_kwargs=method_kwargs,
# expected_args=[stack.Stack, "name_or_id"],
# expected_kwargs=method_kwargs,
# expected_result="result")
def test_stacks(self):
self.verify_list(self.proxy.stacks, stack.Stack)
def test_get_stack(self):
self.verify_get(self.proxy.get_stack, stack.Stack,
method_kwargs={'resolve_outputs': False},
expected_kwargs={'resolve_outputs': False})
self.verify_get_overrided(
self.proxy, stack.Stack,
'openstack.orchestration.v1.stack.Stack')
def test_update_stack(self):
self._verify2('openstack.orchestration.v1.stack.Stack.update',
self.proxy.update_stack,
expected_result='result',
method_args=['stack'],
method_kwargs={'preview': False},
expected_args=[self.proxy, False])
def test_update_stack_preview(self):
self._verify2('openstack.orchestration.v1.stack.Stack.update',
self.proxy.update_stack,
expected_result='result',
method_args=['stack'],
method_kwargs={'preview': True},
expected_args=[self.proxy, True])
def test_abandon_stack(self):
self._verify2('openstack.orchestration.v1.stack.Stack.abandon',
self.proxy.abandon_stack,
expected_result='result',
method_args=['stack'],
expected_args=[self.proxy])
def test_delete_stack(self):
self.verify_delete(self.proxy.delete_stack, stack.Stack, False)
def test_delete_stack_ignore(self):
self.verify_delete(self.proxy.delete_stack, stack.Stack, True)
@mock.patch.object(stack.Stack, 'check')
def test_check_stack_with_stack_object(self, mock_check):
stk = stack.Stack(id='FAKE_ID')
res = self.proxy.check_stack(stk)
self.assertIsNone(res)
mock_check.assert_called_once_with(self.proxy)
@mock.patch.object(stack.Stack, 'existing')
def test_check_stack_with_stack_ID(self, mock_stack):
stk = mock.Mock()
mock_stack.return_value = stk
res = self.proxy.check_stack('FAKE_ID')
self.assertIsNone(res)
mock_stack.assert_called_once_with(id='FAKE_ID')
stk.check.assert_called_once_with(self.proxy)
@mock.patch.object(stack.Stack, 'find')
def test_get_stack_environment_with_stack_identity(self, mock_find):
stack_id = '1234'
stack_name = 'test_stack'
stk = stack.Stack(id=stack_id, name=stack_name)
mock_find.return_value = stk
self._verify2('openstack.proxy.Proxy._get',
self.proxy.get_stack_environment,
method_args=['IDENTITY'],
expected_args=[stack_environment.StackEnvironment],
expected_kwargs={'requires_id': False,
'stack_name': stack_name,
'stack_id': stack_id})
mock_find.assert_called_once_with(mock.ANY, 'IDENTITY',
ignore_missing=False)
def test_get_stack_environment_with_stack_object(self):
stack_id = '1234'
stack_name = 'test_stack'
stk = stack.Stack(id=stack_id, name=stack_name)
self._verify2('openstack.proxy.Proxy._get',
self.proxy.get_stack_environment,
method_args=[stk],
expected_args=[stack_environment.StackEnvironment],
expected_kwargs={'requires_id': False,
'stack_name': stack_name,
'stack_id': stack_id})
@mock.patch.object(stack_files.StackFiles, 'fetch')
@mock.patch.object(stack.Stack, 'find')
def test_get_stack_files_with_stack_identity(self, mock_find, mock_fetch):
stack_id = '1234'
stack_name = 'test_stack'
stk = stack.Stack(id=stack_id, name=stack_name)
mock_find.return_value = stk
mock_fetch.return_value = {'file': 'content'}
res = self.proxy.get_stack_files('IDENTITY')
self.assertEqual({'file': 'content'}, res)
mock_find.assert_called_once_with(mock.ANY, 'IDENTITY',
ignore_missing=False)
mock_fetch.assert_called_once_with(self.proxy)
@mock.patch.object(stack_files.StackFiles, 'fetch')
def test_get_stack_files_with_stack_object(self, mock_fetch):
stack_id = '1234'
stack_name = 'test_stack'
stk = stack.Stack(id=stack_id, name=stack_name)
mock_fetch.return_value = {'file': 'content'}
res = self.proxy.get_stack_files(stk)
self.assertEqual({'file': 'content'}, res)
mock_fetch.assert_called_once_with(self.proxy)
@mock.patch.object(stack.Stack, 'find')
def test_get_stack_template_with_stack_identity(self, mock_find):
stack_id = '1234'
stack_name = 'test_stack'
stk = stack.Stack(id=stack_id, name=stack_name)
mock_find.return_value = stk
self._verify2('openstack.proxy.Proxy._get',
self.proxy.get_stack_template,
method_args=['IDENTITY'],
expected_args=[stack_template.StackTemplate],
expected_kwargs={'requires_id': False,
'stack_name': stack_name,
'stack_id': stack_id})
mock_find.assert_called_once_with(mock.ANY, 'IDENTITY',
ignore_missing=False)
def test_get_stack_template_with_stack_object(self):
stack_id = '1234'
stack_name = 'test_stack'
stk = stack.Stack(id=stack_id, name=stack_name)
self._verify2('openstack.proxy.Proxy._get',
self.proxy.get_stack_template,
method_args=[stk],
expected_args=[stack_template.StackTemplate],
expected_kwargs={'requires_id': False,
'stack_name': stack_name,
'stack_id': stack_id})
@mock.patch.object(stack.Stack, 'find')
def test_resources_with_stack_object(self, mock_find):
stack_id = '1234'
stack_name = 'test_stack'
stk = stack.Stack(id=stack_id, name=stack_name)
self.verify_list(self.proxy.resources, resource.Resource,
method_args=[stk],
expected_kwargs={'stack_name': stack_name,
'stack_id': stack_id})
self.assertEqual(0, mock_find.call_count)
@mock.patch.object(stack.Stack, 'find')
def test_resources_with_stack_name(self, mock_find):
stack_id = '1234'
stack_name = 'test_stack'
stk = stack.Stack(id=stack_id, name=stack_name)
mock_find.return_value = stk
self.verify_list(self.proxy.resources, resource.Resource,
method_args=[stack_id],
expected_kwargs={'stack_name': stack_name,
'stack_id': stack_id})
mock_find.assert_called_once_with(mock.ANY, stack_id,
ignore_missing=False)
@mock.patch.object(stack.Stack, 'find')
@mock.patch.object(resource.Resource, 'list')
def test_resources_stack_not_found(self, mock_list, mock_find):
stack_name = 'test_stack'
mock_find.side_effect = exceptions.ResourceNotFound(
'No stack found for test_stack')
ex = self.assertRaises(exceptions.ResourceNotFound,
self.proxy.resources, stack_name)
self.assertEqual('No stack found for test_stack', six.text_type(ex))
def test_create_software_config(self):
self.verify_create(self.proxy.create_software_config,
sc.SoftwareConfig)
def test_software_configs(self):
self.verify_list(self.proxy.software_configs, sc.SoftwareConfig)
def test_get_software_config(self):
self.verify_get(self.proxy.get_software_config, sc.SoftwareConfig)
def test_delete_software_config(self):
self.verify_delete(self.proxy.delete_software_config,
sc.SoftwareConfig, True)
self.verify_delete(self.proxy.delete_software_config,
sc.SoftwareConfig, False)
def test_create_software_deployment(self):
self.verify_create(self.proxy.create_software_deployment,
sd.SoftwareDeployment)
def test_software_deployments(self):
self.verify_list(self.proxy.software_deployments,
sd.SoftwareDeployment)
def test_get_software_deployment(self):
self.verify_get(self.proxy.get_software_deployment,
sd.SoftwareDeployment)
def test_update_software_deployment(self):
self.verify_update(self.proxy.update_software_deployment,
sd.SoftwareDeployment)
def test_delete_software_deployment(self):
self.verify_delete(self.proxy.delete_software_deployment,
sd.SoftwareDeployment, True)
self.verify_delete(self.proxy.delete_software_deployment,
sd.SoftwareDeployment, False)
@mock.patch.object(template.Template, 'validate')
def test_validate_template(self, mock_validate):
tmpl = mock.Mock()
env = mock.Mock()
tmpl_url = 'A_URI'
ignore_errors = 'a_string'
res = self.proxy.validate_template(tmpl, env, tmpl_url, ignore_errors)
mock_validate.assert_called_once_with(
self.proxy, tmpl, environment=env, template_url=tmpl_url,
ignore_errors=ignore_errors)
self.assertEqual(mock_validate.return_value, res)
def test_validate_template_no_env(self):
tmpl = "openstack/tests/unit/orchestration/v1/hello_world.yaml"
res = self.proxy.read_env_and_templates(tmpl)
self.assertIsInstance(res, dict)
self.assertIsInstance(res["files"], dict)
def test_validate_template_invalid_request(self):
err = self.assertRaises(exceptions.InvalidRequest,
self.proxy.validate_template,
None, template_url=None)
self.assertEqual("'template_url' must be specified when template is "
"None", six.text_type(err))
class TestExtractName(TestOrchestrationProxy):
scenarios = [
('stacks', dict(url='/stacks', parts=['stacks'])),
('name_id', dict(url='/stacks/name/id', parts=['stack'])),
('identity', dict(url='/stacks/id', parts=['stack'])),
('preview', dict(url='/stacks/name/preview',
parts=['stack', 'preview'])),
('stack_act', dict(url='/stacks/name/id/preview',
parts=['stack', 'preview'])),
('stack_subres', dict(url='/stacks/name/id/resources',
parts=['stack', 'resources'])),
('stack_subres_id', dict(url='/stacks/name/id/resources/id',
parts=['stack', 'resource'])),
('stack_subres_id_act',
dict(url='/stacks/name/id/resources/id/action',
parts=['stack', 'resource', 'action'])),
('event',
dict(url='/stacks/ignore/ignore/resources/ignore/events/id',
parts=['stack', 'resource', 'event'])),
('sd_metadata', dict(url='/software_deployments/metadata/ignore',
parts=['software_deployment', 'metadata']))
]
def test_extract_name(self):
results = self.proxy._extract_name(self.url)
self.assertEqual(self.parts, results)
| |
#!/usr/bin/env python
"""
Written by Stefan Badelt (stef@tbi.univie.ac.at)
Department of Theoretical Chemistry, University of Vienna
http://www.tbi.univie.ac.at
vim-config = set: ts=2 et sw=2 sts=2
"""
import re
import sys
import argparse
import string
import math
import RNA
import ribolands as ril
def aptamer_energy(seq, ss, verb=False,
# Default Theophylline
apt='GAUACCAG' + '&' + 'CCCUUGGCAGC',
poc='(...((((' + '&' + ')...)))...)',
bfe=-8.86): # at 25*C; -9.22 at 37*C
"""
Check if a sequence/structure pair contains the
ligand binding pocket (apt/poc). If so, return the
binding free energy (bfe), otherwise return 0.
Multiple pockets will return bfe multiple times!
TODO: allow hairpin pockets (e.g. tetracycline)
"""
[aptL, aptR] = apt.split('&')
[pocL, pocR] = poc.split('&')
patL = re.compile(aptL)
patR = re.compile(aptR)
sites = 0
for mL in patL.finditer(seq):
if pocL != ss[mL.start():mL.end()]:
continue
for mR in patR.finditer(seq):
if mR.start() < mL.end():
continue
if pocR != ss[mR.start():mR.end()]:
continue
# Now make sure that these are really base-pairs
ptable = ril.make_pair_table(ss, base=0)
if mL.start() == ptable[mR.end() - 1] and \
mL.end() - 1 == ptable[mR.start()] and \
mR.start() == ptable[mL.end() - 1] and \
mR.end() - 1 == ptable[mL.start()]:
# if verb :
# print >> sys.stderr, "{:s} {:6.2f}".format(ss, bfe)
sites += 1
return bfe * sites
def check_symmetry(sym, seq):
""" Check if a cofolded sequence is a homo-dimer """
if seq.find('&') == -1:
return False
[s1, s2] = seq.split('&')
if sym:
return (s1 == s2)
elif (s1 == s2):
print >> sys.stderr, "spatch.py: Ignoring symmetry correction for homo-dimer (see Option -s)!"
return False
def is_symmetric(ss):
""" See if a homo-dimer secondary structure has rotational symmetry
Test e.g. with "CCCGGCCGGG&CCCGGCCGGG" suboptimals
Of the following secondary structures:
1 .(.(..(.(.&.).)..).).
2 .(.(..(.).&.(.)..).).
3 .(.(..(.).&.).)..(.).
4 .(.)..(.).&.(.)..(.).
5 .(.(..(.)..(.(.&.).)..(.)..).).
6 .(.(..(.).(.(.&.).)..(.).).).
7 .(.(..(.).(.(.&.).).(.)..).).
only 1 and 5 are considered as rotational symmetric,
only 6 and 7 are filtered by the fast exclusion of rotational symmetry
:return: True/False
"""
[s1, s2] = ss.split('&')
""" fast way to exclude rotational symmetry """
t1 = s1.translate(string.maketrans("()", "xx"))
t2 = s2.translate(string.maketrans("()", "xx"))
if t1 != t2 or t1 != t1[::-1]:
return False
""" slow way to ensure rotational symmetry """
stack = []
pt = [0] * len(s1)
for j, char in enumerate(s1):
if char == '(':
stack.append(j)
elif char == ')':
i = stack.pop()
pt[i] = j
pt[j] = i
if not stack:
""" Got a fake-dimer, no '&' crossing base-pairs """
return False
else:
for i in stack:
pt[i] = None
palin = ''
for i in pt:
if i is None:
palin += '|'
elif i == 0:
palin += '.'
else:
palin += 'x'
return (palin == palin[::-1])
def cofold_noLP_energy(ss, seq, noLC=False):
""" Correct cofold energies returned from RNAsubopt """
if noLC and ((re.search(re.escape('.(&'), ss) or
re.search(re.escape('&).'), ss) or
re.search(re.escape('&(.'), ss) or
re.search(re.escape('.)&'), ss))):
return None
if seq:
bup = RNA.cvar.cut_point
[r1, r2] = seq.split('&')
[s1, s2] = ss.split('&')
RNA.cvar.cut_point = len(r1) + 1
en = RNA.energy_of_structure(r1 + r2, s1 + s2, 0)
RNA.cvar.cut_point = bup
return en
def is_true_dimer(ss):
""" return True if there is at least one
base pair crossing the '&' character
"""
o, c = 0, 0
for char in ss:
if char == '(':
o += 1
elif char == ')':
c += 1
elif char == '&':
break
return o != c
def main():
""" A collection of utils to modify the output of RNAsubopt
TODO: tetracycline binding pockets
TODO: logarithmic multiloops?
TODO: add temperature
"""
parser = argparse.ArgumentParser()
parser.add_argument("--theophylline",
help="Add an energy term (-8.86 kcal/mol) to strucutes with a \
theophylline binding pocket",
action="store_true")
parser.add_argument("-d", "--dimers",
help="Chose to print only true dimers, i.e structures with at least one \
base pair crossing the '&' character",
action="store_true")
parser.add_argument("-s", "--symmetry",
help="Add an entropic symmetry correction penalty to symmetric homo-dimers \
to compensate for their two-fold rotational symmetry",
action="store_true")
parser.add_argument("--fix_cofold_noLP",
help="Remove lonely pairs around the '&' character and correct the energy",
action="store_true")
""" Sthg to consider in the future
parser.add_argument("-T","--temperature",
help="Set the temperature for symmetry correction",
type=float,
default=37.0)
"""
parser.add_argument("-v", "--verbose",
help="Verbose output",
action="store_true")
args = parser.parse_args()
RT = 0.61632077549999997
seq = ''
""" Main loop, parse and modify RNAsubopt """
for e, line in enumerate(sys.stdin):
if e == 0:
[seq, mfe, enr] = line.strip().split()
args.symmetry = check_symmetry(args.symmetry, seq)
print line.strip()
continue
""" Read Structure and Energy """
[ss, en] = line.strip().split()
if args.theophylline:
en = float(en) + aptamer_energy(seq, ss,
verb=args.verbose, bfe=-9.32)
if args.fix_cofold_noLP:
en = cofold_noLP_energy(ss, seq, True)
if en is None:
continue
if args.dimers and not is_true_dimer(ss):
""" Structure is not a real dimer """
continue
# if args.symmetry and is_symmetric(ss) : # how it should be
if args.symmetry and is_true_dimer(ss): # consistent with RNAcofold
""" In order to be consistent with partition function calculations:
the structures need a symmetry correction in case they are true dimers
is it a problem that they are counted twice if they are not true dimers?
=> in both cases, the correction factor -RT*ln(2) should do it!
"""
en = float(en) + RT * math.log(2)
print "%s %6.2f" % (ss, float(en))
return
if __name__ == '__main__':
main()
| |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2019 KuraLabs S.R.L
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Google Test
===========
This source parses the JUnit like results XML file generated by
`Google Test <https://github.com/google/googletest>`_.
**Data collected:**
.. code-block:: json
{
"failures": 1,
"disabled": 1,
"errors": 1,
"tests": 1,
"time": 10.555,
"timestamp": "2017-09-13T00:51:51",
"properties": {
"<propname1>": "<propvalue1>"
},
"suites": {
"<suitename1>": {
"cases": {
"<casename1>": {
"status": "<PASS|FAIL|SKIP>",
"time": 0.05,
"properties": {
"<propname1>": "<propvalue1>"
}
},
"<casename2>": {
"status": "<PASS|FAIL|SKIP>",
"time": 0.05,
"properties": {
"<propname1>": "<propvalue1>"
}
}
},
"properties": {
"<propname1>": "<propvalue1>"
},
"failures": 1,
"passed": 1,
"disabled": 1,
"errors": 1,
"tests": 1,
"time": 0.456
}
}
}
In addition to the previous data structure, if status is ``FAIL`` an additional
key ``failures`` will be available with a list of failures found:
.. code-block:: python3
{
# ...
'failures': [
'/home/kuralabs/googletest-example/tests/test2.cpp:12\\n'
'Expected: 0\\n'
'To be equal to: 1',
]
}
**Dependencies:**
.. code-block:: sh
pip3 install flowbber[gtest]
**Usage:**
.. code-block:: toml
[[sources]]
type = "gtest"
id = "..."
[sources.config]
xmlpath = "tests.xml"
.. code-block:: json
{
"sources": [
{
"type": "gtest",
"id": "...",
"config": {
"xmlpath": "tests.xml"
}
}
]
}
xmlpath
-------
Path to the JUnit like XML results ``tests.xml`` file to be parsed.
- **Default**: ``N/A``
- **Optional**: ``False``
- **Schema**:
.. code-block:: python3
{
'type': 'string',
'empty': False,
}
- **Secret**: ``False``
""" # noqa
from pathlib import Path
from xml.etree import ElementTree
from collections import OrderedDict
from flowbber.components import Source
from flowbber.logging import get_logger
log = get_logger(__name__)
def trycast(value):
"""
Try to cast a string attribute from an XML tag to an integer, then to a
float. If both fails, return the original string.
"""
for cast in (int, float):
try:
return cast(value)
except ValueError:
continue
return value
def element_to_dict(element, spec):
"""
Transform a XML element into a dictionary with its properties identified.
"""
name = element.attrib.pop('name')
data = {
key: cast(element.attrib[key])
for key, cast in spec
}
properties = set(element.attrib) - set(data)
if properties:
data.update({
'properties': {
key: trycast(value)
for key, value in element.attrib.items()
if key in properties
}
})
return name, data
class GTestSource(Source):
def declare_config(self, config):
config.add_option(
'xmlpath',
schema={
'type': 'string',
'empty': False,
},
)
def collect(self):
# Check if file exists
infile = Path(self.config.xmlpath.value)
if not infile.is_file():
raise FileNotFoundError(
'No such file {}'.format(infile)
)
tree = ElementTree.parse(str(infile))
root = tree.getroot()
assert root.tag == 'testsuites', 'Malformed XML root element'
# Create top level suites object
_, data = element_to_dict(root, [
('tests', int),
('failures', int),
('disabled', int),
('errors', int),
('timestamp', str),
('time', float),
])
data['passed'] = 0
testsuites = OrderedDict()
data['suites'] = testsuites
# Add test suites
for child in root:
assert child.tag == 'testsuite', \
'Malformed XML child element'
suitename, testsuite = element_to_dict(child, [
('tests', int),
('failures', int),
('disabled', int),
('errors', int),
('time', float),
])
testsuites[suitename] = testsuite
testcases = OrderedDict()
testsuite['cases'] = testcases
# Count passed
testsuite['passed'] = 0
# Add test case
for subchild in child:
assert subchild.tag == 'testcase', \
'Malformed XML subchild element'
# Pop classname, as it is redundant from testsuite name
del subchild.attrib['classname']
casename, testcase = element_to_dict(subchild, [
('status', str),
('time', float),
])
# Fetch properties: the properties are no longer attributes
# in the testcase. After the release of gtest v1.8.1 they
# are saved in the format <property name='' value''> inside
# <properties> under each testcase.
propertiesnode = subchild.find('properties')
if propertiesnode: # We are dealing with a 1.8.1+ XML format
properties = testcase.setdefault('properties', {})
if properties:
log.warning(
'File {} has old style (pre-1.8.1) '
'properties ({}) and new style properties '
'(post 1.8.1)'.format(
infile, ', '.join(
map(str, properties.keys())
),
)
)
for propertynode in propertiesnode:
assert propertynode.tag == 'property', \
'Malformed XML properties element'
attributes = propertynode.attrib
assert (
'name' in attributes and 'value' in attributes
), 'Malformed XML property element'
name = attributes['name']
value = trycast(attributes['value'])
if name in properties:
log.warning(
'Overriding property '
'"{}" from "{}" to "{}"'.format(
name, properties[name], value,
)
)
properties[name] = value
# Fetch failures
failures = [
failure.text for failure in subchild
if failure.tag == 'failure'
]
# Change the status
if failures:
testcase['failures'] = failures
testcase['status'] = 'FAIL'
elif casename.startswith('DISABLED_'):
casename = casename[len('DISABLED_'):]
testcase['status'] = 'SKIP'
else:
testcase['status'] = 'PASS'
testsuite['passed'] += 1
testcases[casename] = testcase
data['passed'] += testsuite['passed']
return data
__all__ = ['GTestSource']
| |
# Copyright (C) 2012 - 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all requests relating to consistency groups.
"""
import functools
from oslo.config import cfg
from cinder.db import base
from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
import cinder.policy
from cinder import quota
from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder.volume import api as volume_api
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import volume_types
CONF = cfg.CONF
CONF.import_opt('storage_availability_zone', 'cinder.volume.manager')
LOG = logging.getLogger(__name__)
CGQUOTAS = quota.CGQUOTAS
def wrap_check_policy(func):
"""Check policy corresponding to the wrapped methods prior to execution.
This decorator requires the first 3 args of the wrapped function
to be (self, context, consistencygroup)
"""
@functools.wraps(func)
def wrapped(self, context, target_obj, *args, **kwargs):
check_policy(context, func.__name__, target_obj)
return func(self, context, target_obj, *args, **kwargs)
return wrapped
def check_policy(context, action, target_obj=None):
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
target.update(target_obj or {})
_action = 'consistencygroup:%s' % action
cinder.policy.enforce(context, _action, target)
class API(base.Base):
"""API for interacting with the volume manager for consistency groups."""
def __init__(self, db_driver=None):
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
self.availability_zone_names = ()
self.volume_api = volume_api.API()
super(API, self).__init__(db_driver)
def _valid_availability_zone(self, availability_zone):
if availability_zone in self.availability_zone_names:
return True
if CONF.storage_availability_zone == availability_zone:
return True
azs = self.volume_api.list_availability_zones()
self.availability_zone_names = [az['name'] for az in azs]
return availability_zone in self.availability_zone_names
def _extract_availability_zone(self, availability_zone):
if availability_zone is None:
if CONF.default_availability_zone:
availability_zone = CONF.default_availability_zone
else:
# For backwards compatibility use the storage_availability_zone
availability_zone = CONF.storage_availability_zone
valid = self._valid_availability_zone(availability_zone)
if not valid:
msg = _("Availability zone '%s' is invalid") % (availability_zone)
LOG.warn(msg)
raise exception.InvalidInput(reason=msg)
return availability_zone
def create(self, context, name, description,
cg_volume_types=None, availability_zone=None):
check_policy(context, 'create')
volume_type_list = None
if cg_volume_types:
volume_type_list = cg_volume_types.split(',')
req_volume_types = []
if volume_type_list:
req_volume_types = (self.db.volume_types_get_by_name_or_id(
context, volume_type_list))
if not req_volume_types:
volume_type = volume_types.get_default_volume_type()
req_volume_types.append(volume_type)
req_volume_type_ids = ""
for voltype in req_volume_types:
if voltype:
req_volume_type_ids = (
req_volume_type_ids + voltype.get('id') + ",")
if len(req_volume_type_ids) == 0:
req_volume_type_ids = None
availability_zone = self._extract_availability_zone(availability_zone)
options = {'user_id': context.user_id,
'project_id': context.project_id,
'availability_zone': availability_zone,
'status': "creating",
'name': name,
'description': description,
'volume_type_id': req_volume_type_ids}
group = None
try:
group = self.db.consistencygroup_create(context, options)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_("Error occurred when creating consistency group"
" %s."), name)
request_spec_list = []
filter_properties_list = []
for req_volume_type in req_volume_types:
request_spec = {'volume_type': req_volume_type.copy(),
'consistencygroup_id': group['id']}
filter_properties = {}
request_spec_list.append(request_spec)
filter_properties_list.append(filter_properties)
# Update quota for consistencygroups
self.update_quota(context, group['id'])
self._cast_create_consistencygroup(context, group['id'],
request_spec_list,
filter_properties_list)
return group
def _cast_create_consistencygroup(self, context, group_id,
request_spec_list,
filter_properties_list):
try:
for request_spec in request_spec_list:
volume_type = request_spec.get('volume_type', None)
volume_type_id = None
if volume_type:
volume_type_id = volume_type.get('id', None)
specs = {}
if volume_type_id:
qos_specs = volume_types.get_volume_type_qos_specs(
volume_type_id)
specs = qos_specs['qos_specs']
if not specs:
# to make sure we don't pass empty dict
specs = None
volume_properties = {
'size': 0, # Need to populate size for the scheduler
'user_id': context.user_id,
'project_id': context.project_id,
'status': 'creating',
'attach_status': 'detached',
'encryption_key_id': request_spec.get('encryption_key_id',
None),
'display_description': request_spec.get('description',
None),
'display_name': request_spec.get('name', None),
'volume_type_id': volume_type_id,
}
request_spec['volume_properties'] = volume_properties
request_spec['qos_specs'] = specs
except Exception:
with excutils.save_and_reraise_exception():
try:
self.db.consistencygroup_destroy(context, group_id)
finally:
LOG.error(_("Error occurred when building "
"request spec list for consistency group "
"%s."), group_id)
# Cast to the scheduler and let it handle whatever is needed
# to select the target host for this group.
self.scheduler_rpcapi.create_consistencygroup(
context,
CONF.volume_topic,
group_id,
request_spec_list=request_spec_list,
filter_properties_list=filter_properties_list)
def update_quota(self, context, group_id):
reserve_opts = {'consistencygroups': 1}
try:
reservations = CGQUOTAS.reserve(context, **reserve_opts)
CGQUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
try:
self.db.consistencygroup_destroy(context, group_id)
finally:
LOG.error(_("Failed to update quota for creating"
"consistency group %s."), group_id)
@wrap_check_policy
def delete(self, context, group, force=False):
if not force and group['status'] not in ["available", "error"]:
msg = _("Consistency group status must be available or error, "
"but current status is: %s") % group['status']
raise exception.InvalidConsistencyGroup(reason=msg)
cgsnaps = self.db.cgsnapshot_get_all_by_group(
context.elevated(),
group['id'])
if cgsnaps:
msg = _("Consistency group %s still has dependent "
"cgsnapshots.") % group['id']
LOG.error(msg)
raise exception.InvalidConsistencyGroup(reason=msg)
volumes = self.db.volume_get_all_by_group(context.elevated(),
group['id'])
if volumes and not force:
msg = _("Consistency group %s still contains volumes. "
"The force flag is required to delete it.") % group['id']
LOG.error(msg)
raise exception.InvalidConsistencyGroup(reason=msg)
for volume in volumes:
if volume['attach_status'] == "attached":
msg = _("Volume in consistency group %s is attached. "
"Need to detach first.") % group['id']
LOG.error(msg)
raise exception.InvalidConsistencyGroup(reason=msg)
snapshots = self.db.snapshot_get_all_for_volume(context,
volume['id'])
if snapshots:
msg = _("Volume in consistency group still has "
"dependent snapshots.")
LOG.error(msg)
raise exception.InvalidConsistencyGroup(reason=msg)
now = timeutils.utcnow()
self.db.consistencygroup_update(context, group['id'],
{'status': 'deleting',
'terminated_at': now})
self.volume_rpcapi.delete_consistencygroup(context, group)
@wrap_check_policy
def update(self, context, group, fields):
self.db.consistencygroup_update(context, group['id'], fields)
def get(self, context, group_id):
rv = self.db.consistencygroup_get(context, group_id)
group = dict(rv.iteritems())
check_policy(context, 'get', group)
return group
def get_all(self, context, marker=None, limit=None, sort_key='created_at',
sort_dir='desc', filters=None):
check_policy(context, 'get_all')
if filters is None:
filters = {}
try:
if limit is not None:
limit = int(limit)
if limit < 0:
msg = _('limit param must be positive')
raise exception.InvalidInput(reason=msg)
except ValueError:
msg = _('limit param must be an integer')
raise exception.InvalidInput(reason=msg)
if filters:
LOG.debug("Searching by: %s" % str(filters))
if (context.is_admin and 'all_tenants' in filters):
# Need to remove all_tenants to pass the filtering below.
del filters['all_tenants']
groups = self.db.consistencygroup_get_all(context)
else:
groups = self.db.consistencygroup_get_all_by_project(
context,
context.project_id)
return groups
def get_group(self, context, group_id):
check_policy(context, 'get_group')
rv = self.db.consistencygroup_get(context, group_id)
return dict(rv.iteritems())
def create_cgsnapshot(self, context,
group, name,
description):
return self._create_cgsnapshot(context, group, name, description)
def _create_cgsnapshot(self, context,
group, name, description):
options = {'consistencygroup_id': group['id'],
'user_id': context.user_id,
'project_id': context.project_id,
'status': "creating",
'name': name,
'description': description}
try:
cgsnapshot = self.db.cgsnapshot_create(context, options)
cgsnapshot_id = cgsnapshot['id']
volumes = self.db.volume_get_all_by_group(
context.elevated(),
cgsnapshot['consistencygroup_id'])
if not volumes:
msg = _("Consistency group is empty. No cgsnapshot "
"will be created.")
raise exception.InvalidConsistencyGroup(reason=msg)
snap_name = cgsnapshot['name']
snap_desc = cgsnapshot['description']
self.volume_api.create_snapshots_in_db(
context, volumes, snap_name, snap_desc, True, cgsnapshot_id)
except Exception:
with excutils.save_and_reraise_exception():
try:
self.db.cgsnapshot_destroy(context, cgsnapshot_id)
finally:
LOG.error(_("Error occurred when creating cgsnapshot"
" %s."), cgsnapshot_id)
self.volume_rpcapi.create_cgsnapshot(context, group, cgsnapshot)
return cgsnapshot
def delete_cgsnapshot(self, context, cgsnapshot, force=False):
if cgsnapshot['status'] not in ["available", "error"]:
msg = _("Cgsnapshot status must be available or error")
raise exception.InvalidCgSnapshot(reason=msg)
self.db.cgsnapshot_update(context, cgsnapshot['id'],
{'status': 'deleting'})
group = self.db.consistencygroup_get(
context,
cgsnapshot['consistencygroup_id'])
self.volume_rpcapi.delete_cgsnapshot(context.elevated(), cgsnapshot,
group['host'])
def update_cgsnapshot(self, context, cgsnapshot, fields):
self.db.cgsnapshot_update(context, cgsnapshot['id'], fields)
def get_cgsnapshot(self, context, cgsnapshot_id):
check_policy(context, 'get_cgsnapshot')
rv = self.db.cgsnapshot_get(context, cgsnapshot_id)
return dict(rv.iteritems())
def get_all_cgsnapshots(self, context, search_opts=None):
check_policy(context, 'get_all_cgsnapshots')
search_opts = search_opts or {}
if (context.is_admin and 'all_tenants' in search_opts):
# Need to remove all_tenants to pass the filtering below.
del search_opts['all_tenants']
cgsnapshots = self.db.cgsnapshot_get_all(context)
else:
cgsnapshots = self.db.cgsnapshot_get_all_by_project(
context.elevated(), context.project_id)
if search_opts:
LOG.debug("Searching by: %s" % search_opts)
results = []
not_found = object()
for cgsnapshot in cgsnapshots:
for opt, value in search_opts.iteritems():
if cgsnapshot.get(opt, not_found) != value:
break
else:
results.append(cgsnapshot)
cgsnapshots = results
return cgsnapshots
| |
from __future__ import division, print_function, absolute_import
import numpy.testing as npt
import numpy as np
import nose
from scipy.lib.six.moves import xrange
from scipy import stats
DECIMAL_meanvar = 0#1 # was 0
distdiscrete = [
['bernoulli',(0.3,)],
['binom', (5, 0.4)],
['boltzmann',(1.4, 19)],
['dlaplace', (0.8,)], #0.5
['geom', (0.5,)],
['hypergeom',(30, 12, 6)],
['hypergeom',(21,3,12)], #numpy.random (3,18,12) numpy ticket:921
['hypergeom',(21,18,11)], #numpy.random (18,3,11) numpy ticket:921
['logser', (0.6,)], # reenabled, numpy ticket:921
['nbinom', (5, 0.5)],
['nbinom', (0.4, 0.4)], #from tickets: 583
['planck', (0.51,)], #4.1
['poisson', (0.6,)],
['randint', (7, 31)],
['skellam', (15, 8)]]
# ['zipf', (4,)] ] # arg=4 is ok,
# Zipf broken for arg = 2, e.g. weird .stats
# looking closer, mean, var should be inf for arg=2
#@npt.dec.slow
def test_discrete_basic():
for distname, arg in distdiscrete:
distfn = getattr(stats,distname)
#npt.assert_(stats.dlaplace.rvs(0.8) is not None)
np.random.seed(9765456)
rvs = distfn.rvs(size=2000,*arg)
supp = np.unique(rvs)
m,v = distfn.stats(*arg)
#yield npt.assert_almost_equal(rvs.mean(), m, decimal=4,err_msg='mean')
#yield npt.assert_almost_equal, rvs.mean(), m, 2, 'mean' # does not work
yield check_sample_meanvar, rvs.mean(), m, distname + ' sample mean test'
yield check_sample_meanvar, rvs.var(), v, distname + ' sample var test'
yield check_cdf_ppf, distfn, arg, distname + ' cdf_ppf'
yield check_cdf_ppf2, distfn, arg, supp, distname + ' cdf_ppf'
yield check_pmf_cdf, distfn, arg, distname + ' pmf_cdf'
# zipf doesn't fail, but generates floating point warnings.
# Should be checked.
if not distname in ['zipf']:
yield check_oth, distfn, arg, distname + ' oth'
skurt = stats.kurtosis(rvs)
sskew = stats.skew(rvs)
yield check_sample_skew_kurt, distfn, arg, skurt, sskew, \
distname + ' skew_kurt'
# dlaplace doesn't fail, but generates lots of floating point warnings.
# Should be checked.
if not distname in ['dlaplace']: #['logser']: #known failure, fixed
alpha = 0.01
yield check_discrete_chisquare, distfn, arg, rvs, alpha, \
distname + ' chisquare'
@npt.dec.slow
def test_discrete_extra():
for distname, arg in distdiscrete:
distfn = getattr(stats,distname)
yield check_ppf_limits, distfn, arg, distname + \
' ppf limit test'
yield check_isf_limits, distfn, arg, distname + \
' isf limit test'
yield check_entropy, distfn, arg, distname + \
' entropy nan test'
@npt.dec.skipif(True)
def test_discrete_private():
#testing private methods mostly for debugging
# some tests might fail by design,
# e.g. incorrect definition of distfn.a and distfn.b
for distname, arg in distdiscrete:
distfn = getattr(stats,distname)
rvs = distfn.rvs(size=10000,*arg)
m,v = distfn.stats(*arg)
yield check_ppf_ppf, distfn, arg
yield check_cdf_ppf_private, distfn, arg, distname
yield check_generic_moment, distfn, arg, m, 1, 3 # last is decimal
yield check_generic_moment, distfn, arg, v+m*m, 2, 3 # last is decimal
yield check_moment_frozen, distfn, arg, m, 1, 3 # last is decimal
yield check_moment_frozen, distfn, arg, v+m*m, 2, 3 # last is decimal
def check_sample_meanvar(sm,m,msg):
if not np.isinf(m):
npt.assert_almost_equal(sm, m, decimal=DECIMAL_meanvar, err_msg=msg + \
' - finite moment')
else:
npt.assert_(sm > 10000, msg='infinite moment, sm = ' + str(sm))
def check_sample_var(sm,m,msg):
npt.assert_almost_equal(sm, m, decimal=DECIMAL_meanvar, err_msg= msg + 'var')
def check_cdf_ppf(distfn,arg,msg):
ppf05 = distfn.ppf(0.5,*arg)
cdf05 = distfn.cdf(ppf05,*arg)
npt.assert_almost_equal(distfn.ppf(cdf05-1e-6,*arg),ppf05,
err_msg=msg + 'ppf-cdf-median')
npt.assert_((distfn.ppf(cdf05+1e-4,*arg)>ppf05), msg + 'ppf-cdf-next')
def check_cdf_ppf2(distfn,arg,supp,msg):
npt.assert_array_equal(distfn.ppf(distfn.cdf(supp,*arg),*arg),
supp, msg + '-roundtrip')
npt.assert_array_equal(distfn.ppf(distfn.cdf(supp,*arg)-1e-8,*arg),
supp, msg + '-roundtrip')
# -1e-8 could cause an error if pmf < 1e-8
def check_cdf_ppf_private(distfn,arg,msg):
ppf05 = distfn._ppf(0.5,*arg)
cdf05 = distfn.cdf(ppf05,*arg)
npt.assert_almost_equal(distfn._ppf(cdf05-1e-6,*arg),ppf05,
err_msg=msg + '_ppf-cdf-median ')
npt.assert_((distfn._ppf(cdf05+1e-4,*arg)>ppf05), msg + '_ppf-cdf-next')
def check_ppf_ppf(distfn, arg):
npt.assert_(distfn.ppf(0.5,*arg) < np.inf)
ppfs = distfn.ppf([0.5,0.9],*arg)
ppf_s = [distfn._ppf(0.5,*arg), distfn._ppf(0.9,*arg)]
npt.assert_(np.all(ppfs < np.inf))
npt.assert_(ppf_s[0] == distfn.ppf(0.5,*arg))
npt.assert_(ppf_s[1] == distfn.ppf(0.9,*arg))
npt.assert_(ppf_s[0] == ppfs[0])
npt.assert_(ppf_s[1] == ppfs[1])
def check_pmf_cdf(distfn, arg, msg):
startind = np.int(distfn._ppf(0.01,*arg)-1)
index = list(range(startind,startind+10))
cdfs = distfn.cdf(index,*arg)
npt.assert_almost_equal(cdfs, distfn.pmf(index, *arg).cumsum() + \
cdfs[0] - distfn.pmf(index[0],*arg),
decimal=4, err_msg= msg + 'pmf-cdf')
def check_generic_moment(distfn, arg, m, k, decim):
npt.assert_almost_equal(distfn.generic_moment(k,*arg), m, decimal=decim,
err_msg= str(distfn) + ' generic moment test')
def check_moment_frozen(distfn, arg, m, k, decim):
npt.assert_almost_equal(distfn(*arg).moment(k), m, decimal=decim,
err_msg= str(distfn) + ' frozen moment test')
def check_oth(distfn, arg, msg):
#checking other methods of distfn
meanint = round(float(distfn.stats(*arg)[0])) # closest integer to mean
npt.assert_almost_equal(distfn.sf(meanint, *arg), 1 - \
distfn.cdf(meanint, *arg), decimal=8)
median_sf = distfn.isf(0.5, *arg)
npt.assert_(distfn.sf(median_sf - 1, *arg) > 0.5)
npt.assert_(distfn.cdf(median_sf + 1, *arg) > 0.5)
npt.assert_equal(distfn.isf(0.5, *arg), distfn.ppf(0.5, *arg))
#next 3 functions copied from test_continous_extra
# adjusted
def check_ppf_limits(distfn,arg,msg):
below,low,upp,above = distfn.ppf([-1,0,1,2], *arg)
#print distfn.name, distfn.a, low, distfn.b, upp
#print distfn.name,below,low,upp,above
assert_equal_inf_nan(distfn.a-1,low, msg + 'ppf lower bound')
assert_equal_inf_nan(distfn.b,upp, msg + 'ppf upper bound')
npt.assert_(np.isnan(below), msg + 'ppf out of bounds - below')
npt.assert_(np.isnan(above), msg + 'ppf out of bounds - above')
def check_isf_limits(distfn,arg,msg):
below,low,upp,above = distfn.isf([-1,0,1,2], *arg)
#print distfn.name, distfn.a, low, distfn.b, upp
#print distfn.name,below,low,upp,above
assert_equal_inf_nan(distfn.a-1,upp, msg + 'isf lower bound')
assert_equal_inf_nan(distfn.b,low, msg + 'isf upper bound')
npt.assert_(np.isnan(below), msg + 'isf out of bounds - below')
npt.assert_(np.isnan(above), msg + 'isf out of bounds - above')
def assert_equal_inf_nan(v1,v2,msg):
npt.assert_(not np.isnan(v1))
if not np.isinf(v1):
npt.assert_almost_equal(v1, v2, decimal=10, err_msg = msg + \
' - finite')
else:
npt.assert_(np.isinf(v2) or np.isnan(v2),
msg + ' - infinite, v2=%s' % str(v2))
def check_sample_skew_kurt(distfn, arg, sk, ss, msg):
k,s = distfn.stats(moment='ks',*arg)
check_sample_meanvar, sk, k, msg + 'sample skew test'
check_sample_meanvar, ss, s, msg + 'sample kurtosis test'
def check_entropy(distfn,arg,msg):
ent = distfn.entropy(*arg)
#print 'Entropy =', ent
npt.assert_(not np.isnan(ent), msg + 'test Entropy is nan')
def check_discrete_chisquare(distfn, arg, rvs, alpha, msg):
'''perform chisquare test for random sample of a discrete distribution
Parameters
----------
distname : string
name of distribution function
arg : sequence
parameters of distribution
alpha : float
significance level, threshold for p-value
Returns
-------
result : bool
0 if test passes, 1 if test fails
uses global variable debug for printing results
'''
# define parameters for test
## n=2000
n = len(rvs)
nsupp = 20
wsupp = 1.0/nsupp
## distfn = getattr(stats, distname)
## np.random.seed(9765456)
## rvs = distfn.rvs(size=n,*arg)
# construct intervals with minimum mass 1/nsupp
# intervalls are left-half-open as in a cdf difference
distsupport = xrange(max(distfn.a, -1000), min(distfn.b, 1000) + 1)
last = 0
distsupp = [max(distfn.a, -1000)]
distmass = []
for ii in distsupport:
current = distfn.cdf(ii,*arg)
if current - last >= wsupp-1e-14:
distsupp.append(ii)
distmass.append(current - last)
last = current
if current > (1-wsupp):
break
if distsupp[-1] < distfn.b:
distsupp.append(distfn.b)
distmass.append(1-last)
distsupp = np.array(distsupp)
distmass = np.array(distmass)
# convert intervals to right-half-open as required by histogram
histsupp = distsupp+1e-8
histsupp[0] = distfn.a
# find sample frequencies and perform chisquare test
freq,hsupp = np.histogram(rvs,histsupp)
cdfs = distfn.cdf(distsupp,*arg)
(chis,pval) = stats.chisquare(np.array(freq),n*distmass)
npt.assert_(pval > alpha, 'chisquare - test for %s'
' at arg = %s with pval = %s' % (msg,str(arg),str(pval)))
if __name__ == "__main__":
#nose.run(argv=['', __file__])
nose.runmodule(argv=[__file__,'-s'], exit=False)
| |
#!/usr/bin/env python
"""Script runs cactus to compare a bunch of assemblies against a set of two haplotypes and
a set of contamination sequences.
"""
import os
import xml.etree.ElementTree as ET
import xml
import sys
from optparse import OptionParser
from jobTree.scriptTree.target import Target
from jobTree.scriptTree.stack import Stack
from sonLib.bioio import logger
from sonLib.bioio import setLoggingFromOptions
from cactus.shared.experimentWrapper import ExperimentWrapper
from cactus.shared.common import runCactusWorkflow
from sonLib.bioio import getTempFile, getTempDirectory
from sonLib.bioio import fastaRead, fastaWrite
from sonLib.bioio import system
from jobTree.src.common import runJobTreeStatusAndFailIfNotComplete
def getRootPathString():
"""
function for finding external location
"""
import os
import assemblaScripts.bin.pipeline
i = os.path.abspath(assemblaScripts.bin.pipeline.__file__)
return os.path.split(os.path.split(i)[0])[0] #os.path.split(os.path.split(os.path.split(i)[0])[0])[0]
def getCactusDiskString(alignmentFile):
return "<st_kv_database_conf type=\"tokyo_cabinet\"><tokyo_cabinet database_dir=\"%s\"/></st_kv_database_conf>" % alignmentFile
class MakeAlignment(Target):
"""Target runs the alignment.
"""
def __init__(self, newickTree, haplotypeSequences,
assemblyFile, outputDir, configFile, options):
Target.__init__(self, cpu=1, memory=8000000000)
self.newickTree = newickTree
self.haplotypeSequences = haplotypeSequences
self.assemblyFile = assemblyFile
self.outputDir = outputDir
self.configFile = configFile
self.options = options
def run(self):
cactusAlignmentName = "cactusAlignment"
cactusAlignment = os.path.join(self.outputDir, cactusAlignmentName)
if not os.path.exists(cactusAlignment):
#Prepare the assembly
#First copy it.
if self.assemblyFile[-3:] == '.gz':
tempAssemblyFile = getTempFile(rootDir=self.getLocalTempDir(), suffix=".gz")
system("cp %s %s" % (self.assemblyFile, tempAssemblyFile))
system("gunzip %s" % tempAssemblyFile)
tempAssemblyFile = tempAssemblyFile[:-3]
assert os.path.exists(tempAssemblyFile)
else:
tempAssemblyFile = getTempFile(rootDir=self.getLocalTempDir(), suffix="")
system("cp %s %s" % (self.assemblyFile, tempAssemblyFile))
#Make the supporting temporary files
tempExperimentFile = getTempFile(rootDir=self.getLocalTempDir())
tempJobTreeDir = os.path.join(self.getLocalTempDir(), "jobTree")
#Make the experiment file
cactusWorkflowExperiment = ExperimentWrapper.createExperimentWrapper(
sequences=self.haplotypeSequences + [ tempAssemblyFile ],
newickTreeString=self.newickTree,
outputDir=self.getLocalTempDir(),
configFile=self.configFile)
cactusWorkflowExperiment.setDbName(cactusAlignmentName)
cactusWorkflowExperiment.setDbDir(os.path.join(self.getLocalTempDir(), cactusWorkflowExperiment.getDbName())) #This needs to be set to ensure the thing gets put in the right directory
cactusWorkflowExperiment.writeXML(tempExperimentFile)
#Now run cactus workflow
runCactusWorkflow(experimentFile=tempExperimentFile, jobTreeDir=tempJobTreeDir,
buildAvgs=False, buildReference=True,
batchSystem="single_machine", maxThreads=1, jobTreeStats=True)
logger.info("Ran the workflow")
#Check if the jobtree completed sucessively.
runJobTreeStatusAndFailIfNotComplete(tempJobTreeDir)
logger.info("Checked the job tree dir")
#Compute the stats
cactusAlignmentDir = os.path.join(self.getLocalTempDir(), cactusAlignmentName)
tempJobTreeStatsFile = os.path.join(self.getLocalTempDir(),"jobTreeStats.xml")
system("jobTreeStats --jobTree %s --outputFile %s" % (tempJobTreeDir, tempJobTreeStatsFile))
#Now copy the true assembly back to the output
system("mv %s/* %s" % (self.getLocalTempDir(), self.outputDir))
#system("mv %s %s/config.xml" % (tempExperimentFile, self.outputDir))
#system("mv %s %s/" % (tempJobTreeStatsFile, self.outputDir))
#system("mv %s %s/" % (cactusAlignmentDir, self.outputDir))
assert os.path.exists(cactusAlignment)
#We're done!
self.addChildTarget(MakeStats1(self.outputDir, cactusAlignment, self.options))
class MakeAlignments(Target):
"""Makes alignments using pipeline.
"""
def __init__(self, newickTree, haplotypeSequences,
assembliesDir, outputDir, configFile, options):
Target.__init__(self)
self.newickTree = newickTree
self.haplotypeSequences = haplotypeSequences
self.assembliesDir = assembliesDir
self.outputDir = outputDir
self.configFile = configFile
self.options = options
def run(self):
for assembly in os.listdir(self.assembliesDir):
if assembly[-3:] == '.gz' or assembly[-3:] == '.fa':
assemblyFile = os.path.join(self.assembliesDir, assembly)
#The output directory
outputDirForAssembly = os.path.join(self.outputDir, assembly)
if(outputDirForAssembly[-3:] == ".gz"):
outputDirForAssembly = outputDirForAssembly[:-3]
#Make the output dir if it doesn't exist
if not os.path.exists(outputDirForAssembly):
os.mkdir(outputDirForAssembly)
#Make the output file
self.addChildTarget(MakeAlignment(newickTree=self.newickTree, haplotypeSequences=self.haplotypeSequences,
assemblyFile=assemblyFile, outputDir=outputDirForAssembly, configFile=self.configFile, options=self.options))
class MakeStats1(Target):
"""Builds basic stats and the maf alignment(s).
"""
def __init__(self, outputDir, alignment, options, cpu=4, memory=8000000000):
Target.__init__(self, cpu=cpu, memory=memory)
self.alignment = alignment
self.options = options
self.outputDir = outputDir
def runScript(self, binaryName, outputFile, specialOptions):
if not os.path.exists(outputFile):
tempOutputFile = getTempFile(rootDir=self.getLocalTempDir())
os.remove(tempOutputFile)
system("%s --cactusDisk '%s' --outputFile %s --assemblyEventString %s \
--haplotype1EventString %s --haplotype2EventString %s \
--contaminationEventString %s --minimumNsForScaffoldGap %s %s" %
(os.path.join(getRootPathString(), "bin", binaryName),
getCactusDiskString(self.alignment),
tempOutputFile,
self.options.assemblyEventString,
self.options.haplotype1EventString,
self.options.haplotype2EventString,
self.options.contaminationEventString,
self.options.minimumNsForScaffoldGap, specialOptions))
system("mv %s %s" % (tempOutputFile, outputFile))
def run(self):
outputFile = os.path.join(self.outputDir, "cactusTreeStats.xml")
if not os.path.exists(outputFile):
system("cactus_treeStats --cactusDisk '%s' --flowerName 0 --outputFile %s --noPerColumnStats" % (getCactusDiskString(self.alignment), outputFile))
#outputFile = "%s.maf" % self.alignment
#if not os.path.exists(outputFile):
# system("cactus_MAFGenerator --cactusDisk '%s' --flowerName 0 --outputFile %s --orderByReference" % (getCactusDiskString(self.alignment), outputFile))
outputFile = os.path.join(self.outputDir, "annotatedPaths.maf")
self.runScript("pathAnnotatedMafGenerator", outputFile, "")
self.addChildTarget(MakeContigPathStats(self.outputDir, self.alignment, self.options))
class MakeContigPathStats(MakeStats1):
"""Makes contig path stats.
"""
def run(self):
outputFile = os.path.join(self.outputDir, "pathStats.xml")
self.runScript("pathStats", outputFile, "")
self.addChildTarget(MakeCoveragePlots(self.outputDir, self.alignment, self.options))
outputFile = os.path.join(self.outputDir, "pathStats_hap1Phasing.xml")
self.runScript("pathStats", outputFile, "--treatHaplotype2AsContamination")
self.addChildTarget(MakeCoveragePlots(self.outputDir, self.alignment, self.options))
outputFile = os.path.join(self.outputDir, "pathStats_hap2Phasing.xml")
self.runScript("pathStats", outputFile, "--treatHaplotype1AsContamination")
self.addChildTarget(MakeCoveragePlots(self.outputDir, self.alignment, self.options))
class MakeCoveragePlots(MakeStats1):
"""Makes coverage plots.
"""
def run(self):
self.runScript("coveragePlots", os.path.join(self.outputDir, "coveragePlots"), "")
self.addChildTarget(MakeSubstitutionStats(self.outputDir, self.alignment, self.options))
class MakeSubstitutionStats(MakeStats1):
"""Makes substitution stats.
"""
def run(self):
outputFile = os.path.join(self.outputDir, "substitutionStats_1000_98_5.xml")
self.runScript("substitutionStats", outputFile, "--ignoreFirstNBases 5 --minimumBlockLength 1000 --minimumIdentity 98")
outputFile = os.path.join(self.outputDir, "substitutionStats_1000_98_5_indel_positions.xml")
self.runScript("substitutionStats", outputFile, "--ignoreFirstNBases 5 --minimumBlockLength 1000 --minimumIdentity 98 --printIndelPositions")
outputFile = os.path.join(self.outputDir, "substitutionStats_1000_98_5_het_positions.xml")
self.runScript("substitutionStats", outputFile, "--ignoreFirstNBases 5 --minimumBlockLength 1000 --minimumIdentity 98 --printHetPositions")
outputFile = os.path.join(self.outputDir, "substitutionStats_0_0_0.xml")
self.runScript("substitutionStats", outputFile, "--ignoreFirstNBases 0 --minimumBlockLength 0")
self.addChildTarget(MakeCopyNumberStats(self.outputDir, self.alignment, self.options))
class MakeCopyNumberStats(MakeStats1):
"""Make copy number stats.
"""
def run(self):
outputFile = os.path.join(self.outputDir, "copyNumberStats_0.xml")
self.runScript("copyNumberStats", outputFile, "")
outputFile = os.path.join(self.outputDir, "copyNumberStats_1000.xml")
self.runScript("copyNumberStats", outputFile, "--minimumBlockLength 1000")
self.addChildTarget(MakeLinkageStats(self.outputDir, self.alignment, self.options))
class MakeLinkageStats(MakeStats1):
"""Make linkage stats.
"""
def run(self):
outputFile = os.path.join(self.outputDir, "linkageStats.xml")
self.runScript("linkageStats", outputFile, "--bucketNumber 2000 --sampleNumber 1000000")
self.addChildTarget(MakeContigAndScaffoldPathIntervals(self.outputDir, self.alignment, self.options))
class MakeContigAndScaffoldPathIntervals(MakeStats1):
"""Make linkage stats.
"""
def run(self):
#Get contig and scaffold paths
contigPathOutputFile = os.path.join(self.outputDir, "splitContigPaths.bed")
self.runScript("pathIntervals", contigPathOutputFile, "")
#Get bed containments
contigPathOverlapFile = os.path.join(self.outputDir, "contigPathsFeatureOverlap.xml")
binPath = os.path.join(getRootPathString(), "bin")
system("python %s/bedFileIntersection.py %s %s %s" % (binPath, contigPathOutputFile, contigPathOverlapFile, self.options.featureBedFiles))
#Get gene containment
contigPathGeneOverlapFile = os.path.join(self.outputDir, "contigPathsFeatureGeneOverlap.xml")
system("python %s/bedFileGeneIntersection.py %s %s %s" % (binPath, contigPathOutputFile, contigPathGeneOverlapFile, self.options.geneBedFiles))
def main():
##########################################
#Construct the arguments.
##########################################
parser = OptionParser()
parser.add_option("--haplotypeSequences", dest="haplotypeSequences")
parser.add_option("--newickTree", dest="newickTree")
parser.add_option("--assembliesDir", dest="assembliesDir")
parser.add_option("--outputDir", dest="outputDir")
parser.add_option("--configFile", dest="configFile")
parser.add_option("--minimumNsForScaffoldGap", dest="minimumNsForScaffoldGap")
parser.add_option("--assemblyEventString", dest="assemblyEventString")
parser.add_option("--haplotype1EventString", dest="haplotype1EventString")
parser.add_option("--haplotype2EventString", dest="haplotype2EventString")
parser.add_option("--contaminationEventString", dest="contaminationEventString")
parser.add_option("--featureBedFiles", dest="featureBedFiles")
parser.add_option("--geneBedFiles", dest="geneBedFiles")
Stack.addJobTreeOptions(parser)
options, args = parser.parse_args()
setLoggingFromOptions(options)
if len(args) != 0:
raise RuntimeError("Unrecognised input arguments: %s" % " ".join(args))
Stack(MakeAlignments(newickTree=options.newickTree,
haplotypeSequences=options.haplotypeSequences.split(),
assembliesDir=options.assembliesDir,
outputDir=options.outputDir,
configFile=options.configFile,
options=options)).startJobTree(options)
logger.info("Done with job tree")
def _test():
import doctest
return doctest.testmod()
if __name__ == '__main__':
from assemblaScripts.bin.pipeline import *
_test()
main()
| |
from bokeh.models import ColumnDataSource, TapTool, OpenURL
from bokeh.layouts import column, layout
from bokeh.models import LinearColorMapper
from bokeh.models.widgets import Div
from bokeh.models import Range1d
from dashboard.bokeh.helper import get_palette
from dashboard.bokeh.plots.descriptors.table import Table
from dashboard.bokeh.plots.descriptors.title import Title
from dashboard.bokeh.plots.plot2d.main import Plot2d
from qlf_models import QLFModels
from dashboard.bokeh.helper import sort_obj
from bokeh.resources import CDN
from bokeh.embed import file_html
import numpy as np
from dashboard.models import Job, Process, Fibermap
class SNR:
def __init__(self, process_id, arm, spectrograph):
self.selected_process_id = process_id
self.selected_arm = arm
self.selected_spectrograph = spectrograph
def load_qa(self):
cam = self.selected_arm+str(self.selected_spectrograph)
mergedqa = QLFModels().get_output(self.selected_process_id, cam)
# list of available object in petal
objlist = mergedqa["TASKS"]["CHECK_SPECTRA"]["METRICS"]["OBJLIST"]
if 'SKY' in objlist:
objlist.remove('SKY')
gen_info = mergedqa['GENERAL_INFO']
ra = gen_info['RA']
dec = gen_info['DEC']
check_spectra = mergedqa['TASKS']['CHECK_SPECTRA']
snr = check_spectra['METRICS']
nrg = check_spectra['PARAMS']['FIDSNR_TGT_NORMAL_RANGE']
wrg = check_spectra['PARAMS']['FIDSNR_TGT_WARN_RANGE']
# Object identification in fibers:
obj_fiber = sort_obj(gen_info)
qlf_obj = ['ELG', 'LRG', 'QSO', 'STAR']
avobj = ['STAR' if x == 'STD' else x for x in objlist]
obj_idx = {}
for o in qlf_obj:
try:
obj_idx.update({o: avobj.index(o)})
except:
obj_idx.update({o: None})
try:
exptime = gen_info['EXPTIME']
name_warn = ''
except:
exptime = 1000
name_warn = ' (exptime fixed)'
# Sort objects for QLF:
obj_idx = {}
for o in qlf_obj:
try:
obj_idx.update({o: avobj.index(o)})
except:
obj_idx.update({o: None})
def good_idx(mag, snr):
# Filtering measurements with good SNR & good MAG
# Treating None (inf and Nan already treated in db)
mag_2 = np.array([-9998 if x is None else x for x in mag])
snr_2 = np.array([-9998 if x is None else x for x in snr])
idx = np.arange(len(snr_2))
# Filtering values with good mag AND snr
return list(idx[(mag_2 > -999) & (snr_2 > 0)])
# Treating bad snr and mag
mag_snr = {}
for o in avobj:
snr_ql, mag_ql = snr['SNR_MAG_TGT'][obj_idx[o]]
idx = good_idx(mag_ql, snr_ql)
x = [mag_ql[i] for i in idx]
y = [snr_ql[i] for i in idx]
mag_snr.update({o: [y, x]})
# Preparing xy_plot data:
if obj_idx['ELG'] is not None:
elg_snr = mag_snr['ELG']
if obj_idx['LRG'] is not None:
lrg_snr = mag_snr['LRG']
if obj_idx['QSO'] is not None:
qso_snr = mag_snr['QSO']
if obj_idx['STAR'] is not None:
star_snr = mag_snr['STAR']
#lrg_snr = mag_snr['LRG']
def fit_func(xdata, coeff):
""" astro fit
"""
r1 = 0.0 # read noise
a, b = coeff
x = np.linspace(min(xdata), max(xdata), 1000)
Flux = 10**(-0.4*(x - 22.5))
y = a*Flux*exptime/np.sqrt(a*Flux*exptime + b*exptime+r1**2)
return x, y
data_model = {
'x': [],
'y': [],
'y2': [],
'fiber_id': [],
'ra': [],
'dec': [],
}
elg = ColumnDataSource(data=data_model.copy())
lrg = ColumnDataSource(data=data_model.copy())
qso = ColumnDataSource(data=data_model.copy())
star = ColumnDataSource(data=data_model.copy())
data_fit = {
'x': [],
'y': [],
'y2': [],
'fiber_id': [],
'ra': [],
'dec': []
}
elg_fit = ColumnDataSource(data=data_fit.copy())
lrg_fit = ColumnDataSource(data=data_fit.copy())
qso_fit = ColumnDataSource(data=data_fit.copy())
star_fit = ColumnDataSource(data=data_fit.copy())
if obj_idx['ELG'] is not None:
elg.data['x'] = elg_snr[1]
elg.data['y'] = np.array(elg_snr[0])
elg.data['y2'] = np.array(elg_snr[0])**2
elg.data['fiber_id'] = gen_info['ELG_FIBERID']
elg.data['ra'] = [ra[i%500] for i in gen_info['ELG_FIBERID']]
elg.data['dec'] = [dec[i%500] for i in gen_info['ELG_FIBERID']]
xfit, yfit = fit_func(elg_snr[1],
snr['FITCOEFF_TGT'][obj_idx['ELG']])
elg_fit.data['x'] = xfit
elg_fit.data['y'] = np.array(yfit)
elg_fit.data['y2'] = np.array(yfit)**2
for key in ['fiber_id', 'ra', 'dec']:
elg_fit.data[key] = ['']*len(yfit)
if obj_idx['LRG'] is not None:
lrg.data['x'] = lrg_snr[1]
lrg.data['y'] = np.array(lrg_snr[0])
lrg.data['y2'] = np.array(lrg_snr[0])**2
lrg.data['fiber_id'] = gen_info['LRG_FIBERID']
lrg.data['ra'] = [ra[i%500] for i in gen_info['LRG_FIBERID']]
lrg.data['dec'] = [dec[i%500] for i in gen_info['LRG_FIBERID']]
xfit, yfit = fit_func(lrg_snr[1],
snr['FITCOEFF_TGT'][obj_idx['LRG']])
lrg_fit.data['x'] = xfit
lrg_fit.data['y'] = np.array(yfit)
lrg_fit.data['y2'] = np.array(yfit)**2
for key in ['fiber_id', 'ra', 'dec']:
lrg_fit.data[key] = ['']*len(yfit)
if obj_idx['QSO'] is not None:
qso.data['x'] = qso_snr[1]
qso.data['y'] = np.array(qso_snr[0])
qso.data['y2'] = np.array(qso_snr[0])**2
qso.data['fiber_id'] = gen_info['QSO_FIBERID']
qso.data['ra'] = [ra[i%500] for i in gen_info['QSO_FIBERID']]
qso.data['dec'] = [dec[i%500] for i in gen_info['QSO_FIBERID']]
xfit, yfit = fit_func(qso_snr[1],
snr['FITCOEFF_TGT'][obj_idx['QSO']])
qso_fit.data['x'] = xfit
qso_fit.data['y'] = np.array(yfit)
qso_fit.data['y2'] = np.array(yfit)**2
for key in ['fiber_id', 'ra', 'dec']:
qso_fit.data[key] = ['']*len(yfit)
if obj_idx['STAR'] is not None:
star.data['x'] = star_snr[1]
star.data['y'] = np.array(star_snr[0])
star.data['y2'] = np.array(star_snr[0])**2
star.data['fiber_id'] = gen_info['STAR_FIBERID']
star.data['ra'] = [ra[i%500] for i in gen_info['STAR_FIBERID']]
star.data['dec'] = [dec[i%500] for i in gen_info['STAR_FIBERID']]
xfit, yfit = fit_func(star_snr[1],
snr['FITCOEFF_TGT'][obj_idx['STAR']])
star_fit.data['x'] = xfit
star_fit.data['y'] = np.array(yfit)
star_fit.data['y2'] = np.array(yfit)**2
for key in ['fiber_id', 'ra', 'dec']:
star_fit.data[key] = ['']*len(yfit)
html_tooltip = """
<div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">SNR: </span>
<span style="font-size: 1vw; color: #515151;">@y</span>
</div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">DECAM_{}: </span>
<span style="font-size: 1vw; color: #515151;">@x</span>
</div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">Fiber ID: </span>
<span style="font-size: 1vw; color: #515151;">@fiber_id</span>
</div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">RA: </span>
<span style="font-size: 1vw; color: #515151;">@ra</span>
</div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">Dec: </span>
<span style="font-size: 1vw; color: #515151">@dec</span>
</div>
</div>
""".format(str(self.selected_arm).upper())
url = "http://legacysurvey.org/viewer?ra=@ra&dec=@dec&zoom=16&layer=decals-dr5"
elg_plot = Plot2d(
x_label="DECAM_{}".format(str(self.selected_arm).upper()),
y_label="MEDIAN SNR^2",
tooltip=html_tooltip,
title="ELG",
width=500,
height=380,
yscale="log",
).line(
source=elg_fit,
y='y2',
).circle(
source=elg,
size=8,
y='y2',
fill_color="blue",
).plot
taptool = elg_plot.select(type=TapTool)
taptool.callback = OpenURL(url=url)
lrg_plot = Plot2d(
x_label="DECAM_{}".format(str(self.selected_arm).upper()),
y_label="MEDIAN SNR^2",
tooltip=html_tooltip,
title="LRG",
width=500,
height=380,
yscale="log"
).line(
source=lrg_fit,
y='y2',
).circle(
source=lrg,
size=8,
y='y2',
fill_color="red",
).plot
taptool = lrg_plot.select(type=TapTool)
taptool.callback = OpenURL(url=url)
qso_plot = Plot2d(
x_label="DECAM_{}".format(str(self.selected_arm).upper()),
y_label="MEDIAN SNR^2",
tooltip=html_tooltip,
title="QSO",
width=500,
height=380,
yscale="log"
).line(
source=qso_fit,
y='y2',
).circle(
source=qso,
size=8,
y='y2',
fill_color="green",
).plot
taptool = qso_plot.select(type=TapTool)
taptool.callback = OpenURL(url=url)
star_plot = Plot2d(
x_label="DECAM_{}".format(str(self.selected_arm).upper()),
y_label="MEDIAN SNR^2",
tooltip=html_tooltip,
title="STAR",
width=500,
height=380,
yscale="log",
).line(
source=star_fit,
y='y2'
).circle(
source=star,
size=8,
y='y2',
fill_color="yellow",
).plot
taptool = star_plot.select(type=TapTool)
taptool.callback = OpenURL(url=url)
# infos
info_col = Title().write_description('snr')
# -----------------
# WEDGES
snr_tooltip = """
<div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">Resid: </span>
<span style="font-size: 1vw; color: #515151">@resid_snr</span>
</div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">Obj Type: </span>
<span style="font-size: 1vw; color: #515151;">@OBJ_TYPE</span>
</div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">RA: </span>
<span style="font-size: 1vw; color: #515151;">@x1</span>
</div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">DEC: </span>
<span style="font-size: 1vw; color: #515151;">@y1</span>
</div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">FIBER ID: </span>
<span style="font-size: 1vw; color: #515151;">@QLF_FIBERID</span>
</div>
</div>
"""
median = snr['MEDIAN_SNR']
resid = snr['SNR_RESID']
qlf_fiberid = range(0, 500)
my_palette = get_palette('bwr')
fibersnr_tgt = []
for i in avobj:
fibersnr_tgt.append(gen_info[i+'_FIBERID'])
fibersnr = []
for i in list(range(len(fibersnr_tgt))):
fibersnr = fibersnr + fibersnr_tgt[i]
source = ColumnDataSource(data={
'x1': [ra[i%500] for i in fibersnr],
'y1': [dec[i%500] for i in fibersnr],
'resid_snr': resid,
'QLF_FIBERID': fibersnr,
'OBJ_TYPE': [obj_fiber[i%500] for i in fibersnr],
'median': median
})
ra_not = []
dec_not = []
obj_not = []
fiber_not = []
fiber_mod= []
for fiber in fibersnr:
fiber_mod.append(fiber%500)
for i in range(500):
if i not in fiber_mod:
ra_not.append(ra[i])
dec_not.append(dec[i])
fiber_not.append(i)
obj_not.append(obj_fiber[i])
source_not = ColumnDataSource(data={
'x1': ra_not,
'y1': dec_not,
'resid_snr': ['']*len(dec_not),
'QLF_FIBERID': fiber_not,
'OBJ_TYPE': obj_not
})
rmax, rmin = np.nanmax(resid), np.nanmin(resid)
if np.isnan(rmax) or np.isnan(rmin):
fill_color = 'lightgray'
else:
dy = (rmax - rmin)*0.1
mapper = LinearColorMapper(palette=my_palette, nan_color='darkgray',
low=rmin - dy, high=rmax+dy)
fill_color = {'field': 'resid_snr', 'transform': mapper}
radius = 0.0165
radius_hover = 0.02
# centralize wedges in plots:
ra_center = 0.5*(max(ra)+min(ra))
dec_center = 0.5*(max(dec)+min(dec))
xrange_wedge = Range1d(start=ra_center + .95, end=ra_center-.95)
yrange_wedge = Range1d(start=dec_center+.82, end=dec_center-.82)
# axes limit
xmin, xmax = [min(ra[:]), max(ra[:])]
ymin, ymax = [min(dec[:]), max(dec[:])]
xfac, yfac = [(xmax-xmin)*0.06, (ymax-ymin)*0.06]
left, right = xmin - xfac, xmax+xfac
bottom, top = ymin-yfac, ymax+yfac
# WEDGE RESIDUAL plots
wedge_plot = Plot2d(
x_range=xrange_wedge,
y_range=yrange_wedge,
x_label="RA",
y_label="DEC",
tooltip=snr_tooltip,
title='Residual SNR'+name_warn,
width=500,
height=380,
yscale="auto"
).wedge(
source,
x='x1',
y='y1',
field='resid_snr',
mapper=mapper,
).wedge(
source_not,
x='x1',
y='y1',
).plot
taptool = wedge_plot.select(type=TapTool)
taptool.callback = OpenURL(url=url)
# -------------------
# Median plot
median_tooltip = """
<div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">MEDIAN: </span>
<span style="font-size: 1vw; color: #515151">@median</span>
</div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">Resid: </span>
<span style="font-size: 1vw; color: #515151">@resid_snr</span>
</div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">Obj Type: </span>
<span style="font-size: 1vw; color: #515151;">@OBJ_TYPE</span>
</div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">RA: </span>
<span style="font-size: 1vw; color: #515151;">@x1</span>
</div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">DEC: </span>
<span style="font-size: 1vw; color: #515151;">@y1</span>
</div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">FIBER ID: </span>
<span style="font-size: 1vw; color: #515151;">@QLF_FIBERID</span>
</div>
</div>
"""
mediam_plot = Plot2d(
x_label="Fiber",
y_label='Median SNR',
tooltip=median_tooltip,
title="",
width=600,
height=400,
yscale="auto",
hover_mode="vline",
).vbar(
source,
y="median",
x="QLF_FIBERID",
line_width=0.4,
)
# Prepare tables
current_exposures = check_spectra['METRICS']['FIDSNR_TGT']
program = gen_info['PROGRAM'].upper()
reference_exposures = check_spectra['PARAMS']['FIDSNR_TGT_' +
program + '_REF']
keynames = ["FIDSNR_TGT" + " ({})".format(i) for i in objlist]
table = Table().single_table(keynames, current_exposures, reference_exposures, nrg, wrg)
layout = column(info_col, Div(),
table, Div(),
column(elg_plot, sizing_mode='scale_both'),
column(lrg_plot, sizing_mode='scale_both'),
column(qso_plot, sizing_mode='scale_both'),
column(star_plot, sizing_mode='scale_both'),
column(mediam_plot, sizing_mode='scale_both'),
column(wedge_plot, sizing_mode='scale_both'),
css_classes=["display-grid"])
return file_html(layout, CDN, "MEDIAN SNR")
| |
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
RGB_PATTERN = r"^\s*rgb\(\s*(\d{1,3})\s*,\s*(\d{1,3})\s*,\s*(\d{1,3})\s*\)\s*$"
RGB_PCT_PATTERN = r"^\s*rgb\(\s*(\d{1,3}|\d{1,2}\.\d+)%\s*,\s*(\d{1,3}|\d{1,2}\.\d+)%\s*,\s*(\d{1,3}|\d{1,2}\.\d+)%\s*\)\s*$"
RGBA_PATTERN = r"^\s*rgba\(\s*(\d{1,3})\s*,\s*(\d{1,3})\s*,\s*(\d{1,3})\s*,\s*(0|1|0\.\d+)\s*\)\s*$"
RGBA_PCT_PATTERN = r"^\s*rgba\(\s*(\d{1,3}|\d{1,2}\.\d+)%\s*,\s*(\d{1,3}|\d{1,2}\.\d+)%\s*,\s*(\d{1,3}|\d{1,2}\.\d+)%\s*,\s*(0|1|0\.\d+)\s*\)\s*$"
HEX_PATTERN = r"#([A-Fa-f0-9]{2})([A-Fa-f0-9]{2})([A-Fa-f0-9]{2})"
HEX3_PATTERN = r"#([A-Fa-f0-9])([A-Fa-f0-9])([A-Fa-f0-9])"
HSL_PATTERN = r"^\s*hsl\(\s*(\d{1,3})\s*,\s*(\d{1,3})%\s*,\s*(\d{1,3})%\s*\)\s*$"
HSLA_PATTERN = r"^\s*hsla\(\s*(\d{1,3})\s*,\s*(\d{1,3})%\s*,\s*(\d{1,3})%\s*,\s*(0|1|0\.\d+)\s*\)\s*$"
class Color(object):
"""
Color conversion support class
Example:
.. code-block:: python
from selenium.webdriver.support.color import Color
print(Color.from_string('#00ff33').rgba)
print(Color.from_string('rgb(1, 255, 3)').hex)
print(Color.from_string('blue').rgba)
"""
@staticmethod
def from_string(str_):
import re
class Matcher(object):
def __init__(self):
self.match_obj = None
def match(self, pattern, str_):
self.match_obj = re.match(pattern, str_)
return self.match_obj
@property
def groups(self):
return () if self.match_obj is None else self.match_obj.groups()
m = Matcher()
if m.match(RGB_PATTERN, str_):
return Color(*m.groups)
elif m.match(RGB_PCT_PATTERN, str_):
rgb = tuple([float(each) / 100 * 255 for each in m.groups])
return Color(*rgb)
elif m.match(RGBA_PATTERN, str_):
return Color(*m.groups)
elif m.match(RGBA_PCT_PATTERN, str_):
rgba = tuple([float(each) / 100 * 255 for each in m.groups[:3]] + [m.groups[3]])
return Color(*rgba)
elif m.match(HEX_PATTERN, str_):
rgb = tuple([int(each, 16) for each in m.groups])
return Color(*rgb)
elif m.match(HEX3_PATTERN, str_):
rgb = tuple([int(each * 2, 16) for each in m.groups])
return Color(*rgb)
elif m.match(HSL_PATTERN, str_) or m.match(HSLA_PATTERN, str_):
return Color._from_hsl(*m.groups)
elif str_.upper() in Colors.keys():
return Colors[str_.upper()]
else:
raise ValueError("Could not convert %s into color" % str_)
@staticmethod
def _from_hsl(h, s, l, a=1):
h = float(h) / 360
s = float(s) / 100
l = float(l) / 100
if s == 0:
r = l
g = r
b = r
else:
luminocity2 = l * (1 + s) if l < 0.5 else l + s - l * s
luminocity1 = 2 * l - luminocity2
def hue_to_rgb(lum1, lum2, hue):
if hue < 0.0:
hue += 1
if hue > 1.0:
hue -= 1
if hue < 1.0 / 6.0:
return (lum1 + (lum2 - lum1) * 6.0 * hue)
elif hue < 1.0 / 2.0:
return lum2
elif hue < 2.0 / 3.0:
return lum1 + (lum2 - lum1) * ((2.0 / 3.0) - hue) * 6.0
else:
return lum1
r = hue_to_rgb(luminocity1, luminocity2, h + 1.0 / 3.0)
g = hue_to_rgb(luminocity1, luminocity2, h)
b = hue_to_rgb(luminocity1, luminocity2, h - 1.0 / 3.0)
return Color(round(r * 255), round(g * 255), round(b * 255), a)
def __init__(self, red, green, blue, alpha=1):
self.red = int(red)
self.green = int(green)
self.blue = int(blue)
self.alpha = "1" if float(alpha) == 1 else str(float(alpha) or 0)
@property
def rgb(self):
return "rgb(%d, %d, %d)" % (self.red, self.green, self.blue)
@property
def rgba(self):
return "rgba(%d, %d, %d, %s)" % (self.red, self.green, self.blue, self.alpha)
@property
def hex(self):
return "#%02x%02x%02x" % (self.red, self.green, self.blue)
def __eq__(self, other):
if isinstance(other, Color):
return self.rgba == other.rgba
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def __hash__(self):
return hash((self.red, self.green, self.blue, self.alpha))
def __repr__(self):
return "Color(red=%d, green=%d, blue=%d, alpha=%s)" % (self.red, self.green, self.blue, self.alpha)
def __str__(self):
return "Color: %s" % self.rgba
# Basic, extended and transparent colour keywords as defined by the W3C HTML4 spec
# See http://www.w3.org/TR/css3-color/#html4
Colors = {
"TRANSPARENT": Color(0, 0, 0, 0),
"ALICEBLUE": Color(240, 248, 255),
"ANTIQUEWHITE": Color(250, 235, 215),
"AQUA": Color(0, 255, 255),
"AQUAMARINE": Color(127, 255, 212),
"AZURE": Color(240, 255, 255),
"BEIGE": Color(245, 245, 220),
"BISQUE": Color(255, 228, 196),
"BLACK": Color(0, 0, 0),
"BLANCHEDALMOND": Color(255, 235, 205),
"BLUE": Color(0, 0, 255),
"BLUEVIOLET": Color(138, 43, 226),
"BROWN": Color(165, 42, 42),
"BURLYWOOD": Color(222, 184, 135),
"CADETBLUE": Color(95, 158, 160),
"CHARTREUSE": Color(127, 255, 0),
"CHOCOLATE": Color(210, 105, 30),
"CORAL": Color(255, 127, 80),
"CORNFLOWERBLUE": Color(100, 149, 237),
"CORNSILK": Color(255, 248, 220),
"CRIMSON": Color(220, 20, 60),
"CYAN": Color(0, 255, 255),
"DARKBLUE": Color(0, 0, 139),
"DARKCYAN": Color(0, 139, 139),
"DARKGOLDENROD": Color(184, 134, 11),
"DARKGRAY": Color(169, 169, 169),
"DARKGREEN": Color(0, 100, 0),
"DARKGREY": Color(169, 169, 169),
"DARKKHAKI": Color(189, 183, 107),
"DARKMAGENTA": Color(139, 0, 139),
"DARKOLIVEGREEN": Color(85, 107, 47),
"DARKORANGE": Color(255, 140, 0),
"DARKORCHID": Color(153, 50, 204),
"DARKRED": Color(139, 0, 0),
"DARKSALMON": Color(233, 150, 122),
"DARKSEAGREEN": Color(143, 188, 143),
"DARKSLATEBLUE": Color(72, 61, 139),
"DARKSLATEGRAY": Color(47, 79, 79),
"DARKSLATEGREY": Color(47, 79, 79),
"DARKTURQUOISE": Color(0, 206, 209),
"DARKVIOLET": Color(148, 0, 211),
"DEEPPINK": Color(255, 20, 147),
"DEEPSKYBLUE": Color(0, 191, 255),
"DIMGRAY": Color(105, 105, 105),
"DIMGREY": Color(105, 105, 105),
"DODGERBLUE": Color(30, 144, 255),
"FIREBRICK": Color(178, 34, 34),
"FLORALWHITE": Color(255, 250, 240),
"FORESTGREEN": Color(34, 139, 34),
"FUCHSIA": Color(255, 0, 255),
"GAINSBORO": Color(220, 220, 220),
"GHOSTWHITE": Color(248, 248, 255),
"GOLD": Color(255, 215, 0),
"GOLDENROD": Color(218, 165, 32),
"GRAY": Color(128, 128, 128),
"GREY": Color(128, 128, 128),
"GREEN": Color(0, 128, 0),
"GREENYELLOW": Color(173, 255, 47),
"HONEYDEW": Color(240, 255, 240),
"HOTPINK": Color(255, 105, 180),
"INDIANRED": Color(205, 92, 92),
"INDIGO": Color(75, 0, 130),
"IVORY": Color(255, 255, 240),
"KHAKI": Color(240, 230, 140),
"LAVENDER": Color(230, 230, 250),
"LAVENDERBLUSH": Color(255, 240, 245),
"LAWNGREEN": Color(124, 252, 0),
"LEMONCHIFFON": Color(255, 250, 205),
"LIGHTBLUE": Color(173, 216, 230),
"LIGHTCORAL": Color(240, 128, 128),
"LIGHTCYAN": Color(224, 255, 255),
"LIGHTGOLDENRODYELLOW": Color(250, 250, 210),
"LIGHTGRAY": Color(211, 211, 211),
"LIGHTGREEN": Color(144, 238, 144),
"LIGHTGREY": Color(211, 211, 211),
"LIGHTPINK": Color(255, 182, 193),
"LIGHTSALMON": Color(255, 160, 122),
"LIGHTSEAGREEN": Color(32, 178, 170),
"LIGHTSKYBLUE": Color(135, 206, 250),
"LIGHTSLATEGRAY": Color(119, 136, 153),
"LIGHTSLATEGREY": Color(119, 136, 153),
"LIGHTSTEELBLUE": Color(176, 196, 222),
"LIGHTYELLOW": Color(255, 255, 224),
"LIME": Color(0, 255, 0),
"LIMEGREEN": Color(50, 205, 50),
"LINEN": Color(250, 240, 230),
"MAGENTA": Color(255, 0, 255),
"MAROON": Color(128, 0, 0),
"MEDIUMAQUAMARINE": Color(102, 205, 170),
"MEDIUMBLUE": Color(0, 0, 205),
"MEDIUMORCHID": Color(186, 85, 211),
"MEDIUMPURPLE": Color(147, 112, 219),
"MEDIUMSEAGREEN": Color(60, 179, 113),
"MEDIUMSLATEBLUE": Color(123, 104, 238),
"MEDIUMSPRINGGREEN": Color(0, 250, 154),
"MEDIUMTURQUOISE": Color(72, 209, 204),
"MEDIUMVIOLETRED": Color(199, 21, 133),
"MIDNIGHTBLUE": Color(25, 25, 112),
"MINTCREAM": Color(245, 255, 250),
"MISTYROSE": Color(255, 228, 225),
"MOCCASIN": Color(255, 228, 181),
"NAVAJOWHITE": Color(255, 222, 173),
"NAVY": Color(0, 0, 128),
"OLDLACE": Color(253, 245, 230),
"OLIVE": Color(128, 128, 0),
"OLIVEDRAB": Color(107, 142, 35),
"ORANGE": Color(255, 165, 0),
"ORANGERED": Color(255, 69, 0),
"ORCHID": Color(218, 112, 214),
"PALEGOLDENROD": Color(238, 232, 170),
"PALEGREEN": Color(152, 251, 152),
"PALETURQUOISE": Color(175, 238, 238),
"PALEVIOLETRED": Color(219, 112, 147),
"PAPAYAWHIP": Color(255, 239, 213),
"PEACHPUFF": Color(255, 218, 185),
"PERU": Color(205, 133, 63),
"PINK": Color(255, 192, 203),
"PLUM": Color(221, 160, 221),
"POWDERBLUE": Color(176, 224, 230),
"PURPLE": Color(128, 0, 128),
"REBECCAPURPLE": Color(128, 51, 153),
"RED": Color(255, 0, 0),
"ROSYBROWN": Color(188, 143, 143),
"ROYALBLUE": Color(65, 105, 225),
"SADDLEBROWN": Color(139, 69, 19),
"SALMON": Color(250, 128, 114),
"SANDYBROWN": Color(244, 164, 96),
"SEAGREEN": Color(46, 139, 87),
"SEASHELL": Color(255, 245, 238),
"SIENNA": Color(160, 82, 45),
"SILVER": Color(192, 192, 192),
"SKYBLUE": Color(135, 206, 235),
"SLATEBLUE": Color(106, 90, 205),
"SLATEGRAY": Color(112, 128, 144),
"SLATEGREY": Color(112, 128, 144),
"SNOW": Color(255, 250, 250),
"SPRINGGREEN": Color(0, 255, 127),
"STEELBLUE": Color(70, 130, 180),
"TAN": Color(210, 180, 140),
"TEAL": Color(0, 128, 128),
"THISTLE": Color(216, 191, 216),
"TOMATO": Color(255, 99, 71),
"TURQUOISE": Color(64, 224, 208),
"VIOLET": Color(238, 130, 238),
"WHEAT": Color(245, 222, 179),
"WHITE": Color(255, 255, 255),
"WHITESMOKE": Color(245, 245, 245),
"YELLOW": Color(255, 255, 0),
"YELLOWGREEN": Color(154, 205, 50)
}
| |
# -*- coding: utf-8 -*-
"""
werkzeug.debug.console
~~~~~~~~~~~~~~~~~~~~~~
Interactive console support.
:copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD.
"""
import sys
import code
from types import CodeType
from werkzeug.utils import escape
from werkzeug.local import Local
from werkzeug.debug.repr import debug_repr, dump, helper
from werkzeug.debug.utils import render_template
_local = Local()
class HTMLStringO(object):
"""A StringO version that HTML escapes on write."""
def __init__(self):
self._buffer = []
def isatty(self):
return False
def close(self):
pass
def flush(self):
pass
def seek(self, n, mode=0):
raise IOError('Bad file descriptor')
def readline(self):
raise IOError('Bad file descriptor')
def reset(self):
val = ''.join(self._buffer)
del self._buffer[:]
return val
def _write(self, x):
if isinstance(x, str):
x = x.decode('utf-8', 'replace')
self._buffer.append(x)
def write(self, x):
self._write(escape(x))
def writelines(self, x):
self._write(escape(''.join(x)))
class ThreadedStream(object):
"""Thread-local wrapper for sys.stdout for the interactive console."""
def push():
if not isinstance(sys.stdout, ThreadedStream):
sys.stdout = ThreadedStream()
_local.stream = HTMLStringO()
push = staticmethod(push)
def fetch():
try:
stream = _local.stream
except AttributeError:
return ''
return stream.reset()
fetch = staticmethod(fetch)
def displayhook(obj):
try:
stream = _local.stream
except AttributeError:
return _displayhook(obj)
# stream._write bypasses escaping as debug_repr is
# already generating HTML for us.
if obj is not None:
stream._write(debug_repr(obj))
displayhook = staticmethod(displayhook)
def __setattr__(self, name, value):
raise AttributeError('read only attribute %s' % name)
def __dir__(self):
return dir(sys.__stdout__)
def __getattribute__(self, name):
if name == '__members__':
return dir(sys.__stdout__)
try:
stream = _local.stream
except AttributeError:
stream = sys.__stdout__
return getattr(stream, name)
def __repr__(self):
return repr(sys.__stdout__)
# add the threaded stream as display hook
_displayhook = sys.displayhook
sys.displayhook = ThreadedStream.displayhook
class _ConsoleLoader(object):
def __init__(self):
self._storage = {}
def register(self, code, source):
self._storage[id(code)] = source
# register code objects of wrapped functions too.
for var in code.co_consts:
if isinstance(var, CodeType):
self._storage[id(var)] = source
def get_source_by_code(self, code):
try:
return self._storage[id(code)]
except KeyError:
pass
def _wrap_compiler(console):
compile = console.compile
def func(source, filename, symbol):
code = compile(source, filename, symbol)
console.loader.register(code, source)
return code
console.compile = func
class _InteractiveConsole(code.InteractiveInterpreter):
def __init__(self, globals, locals):
code.InteractiveInterpreter.__init__(self, locals)
self.globals = dict(globals)
self.globals['dump'] = dump
self.globals['help'] = helper
self.globals['__loader__'] = self.loader = _ConsoleLoader()
self.more = False
self.buffer = []
_wrap_compiler(self)
def runsource(self, source):
source = source.rstrip() + '\n'
ThreadedStream.push()
prompt = self.more and '... ' or '>>> '
try:
source_to_eval = ''.join(self.buffer + [source])
if code.InteractiveInterpreter.runsource(self,
source_to_eval, '<debugger>', 'single'):
self.more = True
self.buffer.append(source)
else:
self.more = False
del self.buffer[:]
finally:
output = ThreadedStream.fetch()
return prompt + source + output
def runcode(self, code):
try:
exec code in self.globals, self.locals
except:
self.showtraceback()
def showtraceback(self):
from werkzeug.debug.tbtools import get_current_traceback
tb = get_current_traceback(skip=1)
sys.stdout._write(tb.render_summary())
def showsyntaxerror(self, filename=None):
from werkzeug.debug.tbtools import get_current_traceback
tb = get_current_traceback(skip=4)
sys.stdout._write(tb.render_summary())
def write(self, data):
sys.stdout.write(data)
class Console(object):
"""An interactive console."""
def __init__(self, globals=None, locals=None):
if locals is None:
locals = {}
if globals is None:
globals = {}
self._ipy = _InteractiveConsole(globals, locals)
def eval(self, code):
return self._ipy.runsource(code)
| |
"""The tests for the Owntracks device tracker."""
import json
from asynctest import patch
import pytest
from homeassistant.components import owntracks
from homeassistant.const import STATE_NOT_HOME
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry, async_fire_mqtt_message, async_mock_mqtt_component,
mock_coro)
USER = 'greg'
DEVICE = 'phone'
LOCATION_TOPIC = 'owntracks/{}/{}'.format(USER, DEVICE)
EVENT_TOPIC = 'owntracks/{}/{}/event'.format(USER, DEVICE)
WAYPOINTS_TOPIC = 'owntracks/{}/{}/waypoints'.format(USER, DEVICE)
WAYPOINT_TOPIC = 'owntracks/{}/{}/waypoint'.format(USER, DEVICE)
USER_BLACKLIST = 'ram'
WAYPOINTS_TOPIC_BLOCKED = 'owntracks/{}/{}/waypoints'.format(
USER_BLACKLIST, DEVICE)
LWT_TOPIC = 'owntracks/{}/{}/lwt'.format(USER, DEVICE)
BAD_TOPIC = 'owntracks/{}/{}/unsupported'.format(USER, DEVICE)
DEVICE_TRACKER_STATE = 'device_tracker.{}_{}'.format(USER, DEVICE)
IBEACON_DEVICE = 'keys'
MOBILE_BEACON_FMT = 'device_tracker.beacon_{}'
CONF_MAX_GPS_ACCURACY = 'max_gps_accuracy'
CONF_WAYPOINT_IMPORT = owntracks.CONF_WAYPOINT_IMPORT
CONF_WAYPOINT_WHITELIST = owntracks.CONF_WAYPOINT_WHITELIST
CONF_SECRET = owntracks.CONF_SECRET
CONF_MQTT_TOPIC = owntracks.CONF_MQTT_TOPIC
CONF_EVENTS_ONLY = owntracks.CONF_EVENTS_ONLY
CONF_REGION_MAPPING = owntracks.CONF_REGION_MAPPING
TEST_ZONE_LAT = 45.0
TEST_ZONE_LON = 90.0
TEST_ZONE_DEG_PER_M = 0.0000127
FIVE_M = TEST_ZONE_DEG_PER_M * 5.0
# Home Assistant Zones
INNER_ZONE = {
'name': 'zone',
'latitude': TEST_ZONE_LAT + 0.1,
'longitude': TEST_ZONE_LON + 0.1,
'radius': 50
}
OUTER_ZONE = {
'name': 'zone',
'latitude': TEST_ZONE_LAT,
'longitude': TEST_ZONE_LON,
'radius': 100000
}
def build_message(test_params, default_params):
"""Build a test message from overrides and another message."""
new_params = default_params.copy()
new_params.update(test_params)
return new_params
# Default message parameters
DEFAULT_LOCATION_MESSAGE = {
'_type': 'location',
'lon': OUTER_ZONE['longitude'],
'lat': OUTER_ZONE['latitude'],
'acc': 60,
'tid': 'user',
't': 'u',
'batt': 92,
'cog': 248,
'alt': 27,
'p': 101.3977584838867,
'vac': 4,
'tst': 1,
'vel': 0
}
# Owntracks will publish a transition when crossing
# a circular region boundary.
ZONE_EDGE = TEST_ZONE_DEG_PER_M * INNER_ZONE['radius']
DEFAULT_TRANSITION_MESSAGE = {
'_type': 'transition',
't': 'c',
'lon': INNER_ZONE['longitude'],
'lat': INNER_ZONE['latitude'] - ZONE_EDGE,
'acc': 60,
'event': 'enter',
'tid': 'user',
'desc': 'inner',
'wtst': 1,
'tst': 2
}
# iBeacons that are named the same as an HA zone
# are used to trigger enter and leave updates
# for that zone. In this case the "inner" zone.
#
# iBeacons that do not share an HA zone name
# are treated as mobile tracking devices for
# objects which can't track themselves e.g. keys.
#
# iBeacons are typically configured with the
# default lat/lon 0.0/0.0 and have acc 0.0 but
# regardless the reported location is not trusted.
#
# Owntracks will send both a location message
# for the device and an 'event' message for
# the beacon transition.
DEFAULT_BEACON_TRANSITION_MESSAGE = {
'_type': 'transition',
't': 'b',
'lon': 0.0,
'lat': 0.0,
'acc': 0.0,
'event': 'enter',
'tid': 'user',
'desc': 'inner',
'wtst': 1,
'tst': 2
}
# Location messages
LOCATION_MESSAGE = DEFAULT_LOCATION_MESSAGE
LOCATION_MESSAGE_INACCURATE = build_message(
{'lat': INNER_ZONE['latitude'] - ZONE_EDGE,
'lon': INNER_ZONE['longitude'] - ZONE_EDGE,
'acc': 2000},
LOCATION_MESSAGE)
LOCATION_MESSAGE_ZERO_ACCURACY = build_message(
{'lat': INNER_ZONE['latitude'] - ZONE_EDGE,
'lon': INNER_ZONE['longitude'] - ZONE_EDGE,
'acc': 0},
LOCATION_MESSAGE)
LOCATION_MESSAGE_NOT_HOME = build_message(
{'lat': OUTER_ZONE['latitude'] - 2.0,
'lon': INNER_ZONE['longitude'] - 2.0,
'acc': 100},
LOCATION_MESSAGE)
# Region GPS messages
REGION_GPS_ENTER_MESSAGE = DEFAULT_TRANSITION_MESSAGE
REGION_GPS_LEAVE_MESSAGE = build_message(
{'lon': INNER_ZONE['longitude'] - ZONE_EDGE * 10,
'lat': INNER_ZONE['latitude'] - ZONE_EDGE * 10,
'event': 'leave'},
DEFAULT_TRANSITION_MESSAGE)
REGION_GPS_ENTER_MESSAGE_INACCURATE = build_message(
{'acc': 2000},
REGION_GPS_ENTER_MESSAGE)
REGION_GPS_LEAVE_MESSAGE_INACCURATE = build_message(
{'acc': 2000},
REGION_GPS_LEAVE_MESSAGE)
REGION_GPS_ENTER_MESSAGE_ZERO = build_message(
{'acc': 0},
REGION_GPS_ENTER_MESSAGE)
REGION_GPS_LEAVE_MESSAGE_ZERO = build_message(
{'acc': 0},
REGION_GPS_LEAVE_MESSAGE)
REGION_GPS_LEAVE_MESSAGE_OUTER = build_message(
{'lon': OUTER_ZONE['longitude'] - 2.0,
'lat': OUTER_ZONE['latitude'] - 2.0,
'desc': 'outer',
'event': 'leave'},
DEFAULT_TRANSITION_MESSAGE)
REGION_GPS_ENTER_MESSAGE_OUTER = build_message(
{'lon': OUTER_ZONE['longitude'],
'lat': OUTER_ZONE['latitude'],
'desc': 'outer',
'event': 'enter'},
DEFAULT_TRANSITION_MESSAGE)
# Region Beacon messages
REGION_BEACON_ENTER_MESSAGE = DEFAULT_BEACON_TRANSITION_MESSAGE
REGION_BEACON_LEAVE_MESSAGE = build_message(
{'event': 'leave'},
DEFAULT_BEACON_TRANSITION_MESSAGE)
# Mobile Beacon messages
MOBILE_BEACON_ENTER_EVENT_MESSAGE = build_message(
{'desc': IBEACON_DEVICE},
DEFAULT_BEACON_TRANSITION_MESSAGE)
MOBILE_BEACON_LEAVE_EVENT_MESSAGE = build_message(
{'desc': IBEACON_DEVICE,
'event': 'leave'},
DEFAULT_BEACON_TRANSITION_MESSAGE)
# Waypoint messages
WAYPOINTS_EXPORTED_MESSAGE = {
"_type": "waypoints",
"_creator": "test",
"waypoints": [
{
"_type": "waypoint",
"tst": 3,
"lat": 47,
"lon": 9,
"rad": 10,
"desc": "exp_wayp1"
},
{
"_type": "waypoint",
"tst": 4,
"lat": 3,
"lon": 9,
"rad": 500,
"desc": "exp_wayp2"
}
]
}
WAYPOINTS_UPDATED_MESSAGE = {
"_type": "waypoints",
"_creator": "test",
"waypoints": [
{
"_type": "waypoint",
"tst": 4,
"lat": 9,
"lon": 47,
"rad": 50,
"desc": "exp_wayp1"
},
]
}
WAYPOINT_MESSAGE = {
"_type": "waypoint",
"tst": 4,
"lat": 9,
"lon": 47,
"rad": 50,
"desc": "exp_wayp1"
}
WAYPOINT_ENTITY_NAMES = [
'zone.greg_phone_exp_wayp1',
'zone.greg_phone_exp_wayp2',
'zone.ram_phone_exp_wayp1',
'zone.ram_phone_exp_wayp2',
]
LWT_MESSAGE = {
"_type": "lwt",
"tst": 1
}
BAD_MESSAGE = {
"_type": "unsupported",
"tst": 1
}
BAD_JSON_PREFIX = '--$this is bad json#--'
BAD_JSON_SUFFIX = '** and it ends here ^^'
# pylint: disable=invalid-name, len-as-condition, redefined-outer-name
@pytest.fixture
def setup_comp(hass, mock_device_tracker_conf):
"""Initialize components."""
assert hass.loop.run_until_complete(async_setup_component(
hass, 'persistent_notification', {}))
hass.loop.run_until_complete(async_setup_component(
hass, 'device_tracker', {}))
hass.loop.run_until_complete(async_mock_mqtt_component(hass))
hass.states.async_set(
'zone.inner', 'zoning', INNER_ZONE)
hass.states.async_set(
'zone.inner_2', 'zoning', INNER_ZONE)
hass.states.async_set(
'zone.outer', 'zoning', OUTER_ZONE)
yield
async def setup_owntracks(hass, config,
ctx_cls=owntracks.OwnTracksContext):
"""Set up OwnTracks."""
MockConfigEntry(domain='owntracks', data={
'webhook_id': 'owntracks_test',
'secret': 'abcd',
}).add_to_hass(hass)
with patch.object(owntracks, 'OwnTracksContext', ctx_cls):
assert await async_setup_component(
hass, 'owntracks', {'owntracks': config})
await hass.async_block_till_done()
@pytest.fixture
def context(hass, setup_comp):
"""Set up the mocked context."""
orig_context = owntracks.OwnTracksContext
context = None
# pylint: disable=no-value-for-parameter
def store_context(*args):
"""Store the context."""
nonlocal context
context = orig_context(*args)
return context
hass.loop.run_until_complete(setup_owntracks(hass, {
CONF_MAX_GPS_ACCURACY: 200,
CONF_WAYPOINT_IMPORT: True,
CONF_WAYPOINT_WHITELIST: ['jon', 'greg']
}, store_context))
def get_context():
"""Get the current context."""
return context
yield get_context
async def send_message(hass, topic, message, corrupt=False):
"""Test the sending of a message."""
str_message = json.dumps(message)
if corrupt:
mod_message = BAD_JSON_PREFIX + str_message + BAD_JSON_SUFFIX
else:
mod_message = str_message
async_fire_mqtt_message(hass, topic, mod_message)
await hass.async_block_till_done()
await hass.async_block_till_done()
def assert_location_state(hass, location):
"""Test the assertion of a location state."""
state = hass.states.get(DEVICE_TRACKER_STATE)
assert state.state == location
def assert_location_latitude(hass, latitude):
"""Test the assertion of a location latitude."""
state = hass.states.get(DEVICE_TRACKER_STATE)
assert state.attributes.get('latitude') == latitude
def assert_location_longitude(hass, longitude):
"""Test the assertion of a location longitude."""
state = hass.states.get(DEVICE_TRACKER_STATE)
assert state.attributes.get('longitude') == longitude
def assert_location_accuracy(hass, accuracy):
"""Test the assertion of a location accuracy."""
state = hass.states.get(DEVICE_TRACKER_STATE)
assert state.attributes.get('gps_accuracy') == accuracy
def assert_location_source_type(hass, source_type):
"""Test the assertion of source_type."""
state = hass.states.get(DEVICE_TRACKER_STATE)
assert state.attributes.get('source_type') == source_type
def assert_mobile_tracker_state(hass, location, beacon=IBEACON_DEVICE):
"""Test the assertion of a mobile beacon tracker state."""
dev_id = MOBILE_BEACON_FMT.format(beacon)
state = hass.states.get(dev_id)
assert state.state == location
def assert_mobile_tracker_latitude(hass, latitude, beacon=IBEACON_DEVICE):
"""Test the assertion of a mobile beacon tracker latitude."""
dev_id = MOBILE_BEACON_FMT.format(beacon)
state = hass.states.get(dev_id)
assert state.attributes.get('latitude') == latitude
def assert_mobile_tracker_accuracy(hass, accuracy, beacon=IBEACON_DEVICE):
"""Test the assertion of a mobile beacon tracker accuracy."""
dev_id = MOBILE_BEACON_FMT.format(beacon)
state = hass.states.get(dev_id)
assert state.attributes.get('gps_accuracy') == accuracy
async def test_location_invalid_devid(hass, context):
"""Test the update of a location."""
await send_message(hass, 'owntracks/paulus/nexus-5x', LOCATION_MESSAGE)
state = hass.states.get('device_tracker.paulus_nexus_5x')
assert state.state == 'outer'
async def test_location_update(hass, context):
"""Test the update of a location."""
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
assert_location_latitude(hass, LOCATION_MESSAGE['lat'])
assert_location_accuracy(hass, LOCATION_MESSAGE['acc'])
assert_location_state(hass, 'outer')
async def test_location_inaccurate_gps(hass, context):
"""Test the location for inaccurate GPS information."""
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE_INACCURATE)
# Ignored inaccurate GPS. Location remains at previous.
assert_location_latitude(hass, LOCATION_MESSAGE['lat'])
assert_location_longitude(hass, LOCATION_MESSAGE['lon'])
async def test_location_zero_accuracy_gps(hass, context):
"""Ignore the location for zero accuracy GPS information."""
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE_ZERO_ACCURACY)
# Ignored inaccurate GPS. Location remains at previous.
assert_location_latitude(hass, LOCATION_MESSAGE['lat'])
assert_location_longitude(hass, LOCATION_MESSAGE['lon'])
# ------------------------------------------------------------------------
# GPS based event entry / exit testing
async def test_event_gps_entry_exit(hass, context):
"""Test the entry event."""
# Entering the owntracks circular region named "inner"
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
# Enter uses the zone's gps co-ords
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_accuracy(hass, INNER_ZONE['radius'])
assert_location_state(hass, 'inner')
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# Updates ignored when in a zone
# note that LOCATION_MESSAGE is actually pretty far
# from INNER_ZONE and has good accuracy. I haven't
# received a transition message though so I'm still
# associated with the inner zone regardless of GPS.
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_accuracy(hass, INNER_ZONE['radius'])
assert_location_state(hass, 'inner')
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE)
# Exit switches back to GPS
assert_location_latitude(hass, REGION_GPS_LEAVE_MESSAGE['lat'])
assert_location_accuracy(hass, REGION_GPS_LEAVE_MESSAGE['acc'])
assert_location_state(hass, 'outer')
# Left clean zone state
assert not context().regions_entered[USER]
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# Now sending a location update moves me again.
assert_location_latitude(hass, LOCATION_MESSAGE['lat'])
assert_location_accuracy(hass, LOCATION_MESSAGE['acc'])
async def test_event_gps_with_spaces(hass, context):
"""Test the entry event."""
message = build_message({'desc': "inner 2"},
REGION_GPS_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, 'inner 2')
message = build_message({'desc': "inner 2"},
REGION_GPS_LEAVE_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
# Left clean zone state
assert not context().regions_entered[USER]
async def test_event_gps_entry_inaccurate(hass, context):
"""Test the event for inaccurate entry."""
# Set location to the outer zone.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE_INACCURATE)
# I enter the zone even though the message GPS was inaccurate.
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_accuracy(hass, INNER_ZONE['radius'])
assert_location_state(hass, 'inner')
async def test_event_gps_entry_exit_inaccurate(hass, context):
"""Test the event for inaccurate exit."""
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
# Enter uses the zone's gps co-ords
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_accuracy(hass, INNER_ZONE['radius'])
assert_location_state(hass, 'inner')
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE_INACCURATE)
# Exit doesn't use inaccurate gps
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_accuracy(hass, INNER_ZONE['radius'])
assert_location_state(hass, 'inner')
# But does exit region correctly
assert not context().regions_entered[USER]
async def test_event_gps_entry_exit_zero_accuracy(hass, context):
"""Test entry/exit events with accuracy zero."""
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE_ZERO)
# Enter uses the zone's gps co-ords
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_accuracy(hass, INNER_ZONE['radius'])
assert_location_state(hass, 'inner')
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE_ZERO)
# Exit doesn't use zero gps
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_accuracy(hass, INNER_ZONE['radius'])
assert_location_state(hass, 'inner')
# But does exit region correctly
assert not context().regions_entered[USER]
async def test_event_gps_exit_outside_zone_sets_away(hass, context):
"""Test the event for exit zone."""
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
assert_location_state(hass, 'inner')
# Exit message far away GPS location
message = build_message(
{'lon': 90.0,
'lat': 90.0},
REGION_GPS_LEAVE_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
# Exit forces zone change to away
assert_location_state(hass, STATE_NOT_HOME)
async def test_event_gps_entry_exit_right_order(hass, context):
"""Test the event for ordering."""
# Enter inner zone
# Set location to the outer zone.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
assert_location_state(hass, 'inner')
# Enter inner2 zone
message = build_message(
{'desc': "inner_2"},
REGION_GPS_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, 'inner_2')
# Exit inner_2 - should be in 'inner'
message = build_message(
{'desc': "inner_2"},
REGION_GPS_LEAVE_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, 'inner')
# Exit inner - should be in 'outer'
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE)
assert_location_latitude(hass, REGION_GPS_LEAVE_MESSAGE['lat'])
assert_location_accuracy(hass, REGION_GPS_LEAVE_MESSAGE['acc'])
assert_location_state(hass, 'outer')
async def test_event_gps_entry_exit_wrong_order(hass, context):
"""Test the event for wrong order."""
# Enter inner zone
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
assert_location_state(hass, 'inner')
# Enter inner2 zone
message = build_message(
{'desc': "inner_2"},
REGION_GPS_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, 'inner_2')
# Exit inner - should still be in 'inner_2'
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE)
assert_location_state(hass, 'inner_2')
# Exit inner_2 - should be in 'outer'
message = build_message(
{'desc': "inner_2"},
REGION_GPS_LEAVE_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_latitude(hass, REGION_GPS_LEAVE_MESSAGE['lat'])
assert_location_accuracy(hass, REGION_GPS_LEAVE_MESSAGE['acc'])
assert_location_state(hass, 'outer')
async def test_event_gps_entry_unknown_zone(hass, context):
"""Test the event for unknown zone."""
# Just treat as location update
message = build_message(
{'desc': "unknown"},
REGION_GPS_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_latitude(hass, REGION_GPS_ENTER_MESSAGE['lat'])
assert_location_state(hass, 'inner')
async def test_event_gps_exit_unknown_zone(hass, context):
"""Test the event for unknown zone."""
# Just treat as location update
message = build_message(
{'desc': "unknown"},
REGION_GPS_LEAVE_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_latitude(hass, REGION_GPS_LEAVE_MESSAGE['lat'])
assert_location_state(hass, 'outer')
async def test_event_entry_zone_loading_dash(hass, context):
"""Test the event for zone landing."""
# Make sure the leading - is ignored
# Owntracks uses this to switch on hold
message = build_message(
{'desc': "-inner"},
REGION_GPS_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, 'inner')
async def test_events_only_on(hass, context):
"""Test events_only config suppresses location updates."""
# Sending a location message that is not home
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE_NOT_HOME)
assert_location_state(hass, STATE_NOT_HOME)
context().events_only = True
# Enter and Leave messages
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE_OUTER)
assert_location_state(hass, 'outer')
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE_OUTER)
assert_location_state(hass, STATE_NOT_HOME)
# Sending a location message that is inside outer zone
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# Ignored location update. Location remains at previous.
assert_location_state(hass, STATE_NOT_HOME)
async def test_events_only_off(hass, context):
"""Test when events_only is False."""
# Sending a location message that is not home
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE_NOT_HOME)
assert_location_state(hass, STATE_NOT_HOME)
context().events_only = False
# Enter and Leave messages
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE_OUTER)
assert_location_state(hass, 'outer')
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE_OUTER)
assert_location_state(hass, STATE_NOT_HOME)
# Sending a location message that is inside outer zone
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# Location update processed
assert_location_state(hass, 'outer')
async def test_event_source_type_entry_exit(hass, context):
"""Test the entry and exit events of source type."""
# Entering the owntracks circular region named "inner"
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
# source_type should be gps when entering using gps.
assert_location_source_type(hass, 'gps')
# owntracks shouldn't send beacon events with acc = 0
await send_message(hass, EVENT_TOPIC, build_message(
{'acc': 1}, REGION_BEACON_ENTER_MESSAGE))
# We should be able to enter a beacon zone even inside a gps zone
assert_location_source_type(hass, 'bluetooth_le')
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE)
# source_type should be gps when leaving using gps.
assert_location_source_type(hass, 'gps')
# owntracks shouldn't send beacon events with acc = 0
await send_message(hass, EVENT_TOPIC, build_message(
{'acc': 1}, REGION_BEACON_LEAVE_MESSAGE))
assert_location_source_type(hass, 'bluetooth_le')
# Region Beacon based event entry / exit testing
async def test_event_region_entry_exit(hass, context):
"""Test the entry event."""
# Seeing a beacon named "inner"
await send_message(hass, EVENT_TOPIC, REGION_BEACON_ENTER_MESSAGE)
# Enter uses the zone's gps co-ords
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_accuracy(hass, INNER_ZONE['radius'])
assert_location_state(hass, 'inner')
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# Updates ignored when in a zone
# note that LOCATION_MESSAGE is actually pretty far
# from INNER_ZONE and has good accuracy. I haven't
# received a transition message though so I'm still
# associated with the inner zone regardless of GPS.
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_accuracy(hass, INNER_ZONE['radius'])
assert_location_state(hass, 'inner')
await send_message(hass, EVENT_TOPIC, REGION_BEACON_LEAVE_MESSAGE)
# Exit switches back to GPS but the beacon has no coords
# so I am still located at the center of the inner region
# until I receive a location update.
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_accuracy(hass, INNER_ZONE['radius'])
assert_location_state(hass, 'inner')
# Left clean zone state
assert not context().regions_entered[USER]
# Now sending a location update moves me again.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
assert_location_latitude(hass, LOCATION_MESSAGE['lat'])
assert_location_accuracy(hass, LOCATION_MESSAGE['acc'])
async def test_event_region_with_spaces(hass, context):
"""Test the entry event."""
message = build_message({'desc': "inner 2"},
REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, 'inner 2')
message = build_message({'desc': "inner 2"},
REGION_BEACON_LEAVE_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
# Left clean zone state
assert not context().regions_entered[USER]
async def test_event_region_entry_exit_right_order(hass, context):
"""Test the event for ordering."""
# Enter inner zone
# Set location to the outer zone.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# See 'inner' region beacon
await send_message(hass, EVENT_TOPIC, REGION_BEACON_ENTER_MESSAGE)
assert_location_state(hass, 'inner')
# See 'inner_2' region beacon
message = build_message(
{'desc': "inner_2"},
REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, 'inner_2')
# Exit inner_2 - should be in 'inner'
message = build_message(
{'desc': "inner_2"},
REGION_BEACON_LEAVE_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, 'inner')
# Exit inner - should be in 'outer'
await send_message(hass, EVENT_TOPIC, REGION_BEACON_LEAVE_MESSAGE)
# I have not had an actual location update yet and my
# coordinates are set to the center of the last region I
# entered which puts me in the inner zone.
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_accuracy(hass, INNER_ZONE['radius'])
assert_location_state(hass, 'inner')
async def test_event_region_entry_exit_wrong_order(hass, context):
"""Test the event for wrong order."""
# Enter inner zone
await send_message(hass, EVENT_TOPIC, REGION_BEACON_ENTER_MESSAGE)
assert_location_state(hass, 'inner')
# Enter inner2 zone
message = build_message(
{'desc': "inner_2"},
REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, 'inner_2')
# Exit inner - should still be in 'inner_2'
await send_message(hass, EVENT_TOPIC, REGION_BEACON_LEAVE_MESSAGE)
assert_location_state(hass, 'inner_2')
# Exit inner_2 - should be in 'outer'
message = build_message(
{'desc': "inner_2"},
REGION_BEACON_LEAVE_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
# I have not had an actual location update yet and my
# coordinates are set to the center of the last region I
# entered which puts me in the inner_2 zone.
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_accuracy(hass, INNER_ZONE['radius'])
assert_location_state(hass, 'inner_2')
async def test_event_beacon_unknown_zone_no_location(hass, context):
"""Test the event for unknown zone."""
# A beacon which does not match a HA zone is the
# definition of a mobile beacon. In this case, "unknown"
# will be turned into device_tracker.beacon_unknown and
# that will be tracked at my current location. Except
# in this case my Device hasn't had a location message
# yet so it's in an odd state where it has state.state
# None and no GPS coords to set the beacon to.
hass.states.async_set(DEVICE_TRACKER_STATE, None)
message = build_message(
{'desc': "unknown"},
REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
# My current state is None because I haven't seen a
# location message or a GPS or Region # Beacon event
# message. None is the state the test harness set for
# the Device during test case setup.
assert_location_state(hass, 'None')
# We have had no location yet, so the beacon status
# set to unknown.
assert_mobile_tracker_state(hass, 'unknown', 'unknown')
async def test_event_beacon_unknown_zone(hass, context):
"""Test the event for unknown zone."""
# A beacon which does not match a HA zone is the
# definition of a mobile beacon. In this case, "unknown"
# will be turned into device_tracker.beacon_unknown and
# that will be tracked at my current location. First I
# set my location so that my state is 'outer'
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
assert_location_state(hass, 'outer')
message = build_message(
{'desc': "unknown"},
REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
# My state is still outer and now the unknown beacon
# has joined me at outer.
assert_location_state(hass, 'outer')
assert_mobile_tracker_state(hass, 'outer', 'unknown')
async def test_event_beacon_entry_zone_loading_dash(hass, context):
"""Test the event for beacon zone landing."""
# Make sure the leading - is ignored
# Owntracks uses this to switch on hold
message = build_message(
{'desc': "-inner"},
REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, 'inner')
# ------------------------------------------------------------------------
# Mobile Beacon based event entry / exit testing
async def test_mobile_enter_move_beacon(hass, context):
"""Test the movement of a beacon."""
# I am in the outer zone.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# I see the 'keys' beacon. I set the location of the
# beacon_keys tracker to my current device location.
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
assert_mobile_tracker_latitude(hass, LOCATION_MESSAGE['lat'])
assert_mobile_tracker_state(hass, 'outer')
# Location update to outside of defined zones.
# I am now 'not home' and neither are my keys.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE_NOT_HOME)
assert_location_state(hass, STATE_NOT_HOME)
assert_mobile_tracker_state(hass, STATE_NOT_HOME)
not_home_lat = LOCATION_MESSAGE_NOT_HOME['lat']
assert_location_latitude(hass, not_home_lat)
assert_mobile_tracker_latitude(hass, not_home_lat)
async def test_mobile_enter_exit_region_beacon(hass, context):
"""Test the enter and the exit of a mobile beacon."""
# I am in the outer zone.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# I see a new mobile beacon
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
assert_mobile_tracker_latitude(hass, OUTER_ZONE['latitude'])
assert_mobile_tracker_state(hass, 'outer')
# GPS enter message should move beacon
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
assert_mobile_tracker_latitude(hass, INNER_ZONE['latitude'])
assert_mobile_tracker_state(hass, REGION_GPS_ENTER_MESSAGE['desc'])
# Exit inner zone to outer zone should move beacon to
# center of outer zone
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE)
assert_mobile_tracker_latitude(hass, REGION_GPS_LEAVE_MESSAGE['lat'])
assert_mobile_tracker_state(hass, 'outer')
async def test_mobile_exit_move_beacon(hass, context):
"""Test the exit move of a beacon."""
# I am in the outer zone.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# I see a new mobile beacon
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
assert_mobile_tracker_latitude(hass, OUTER_ZONE['latitude'])
assert_mobile_tracker_state(hass, 'outer')
# Exit mobile beacon, should set location
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_LEAVE_EVENT_MESSAGE)
assert_mobile_tracker_latitude(hass, OUTER_ZONE['latitude'])
assert_mobile_tracker_state(hass, 'outer')
# Move after exit should do nothing
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE_NOT_HOME)
assert_mobile_tracker_latitude(hass, OUTER_ZONE['latitude'])
assert_mobile_tracker_state(hass, 'outer')
async def test_mobile_multiple_async_enter_exit(hass, context):
"""Test the multiple entering."""
# Test race condition
for _ in range(0, 20):
async_fire_mqtt_message(
hass, EVENT_TOPIC,
json.dumps(MOBILE_BEACON_ENTER_EVENT_MESSAGE))
async_fire_mqtt_message(
hass, EVENT_TOPIC,
json.dumps(MOBILE_BEACON_LEAVE_EVENT_MESSAGE))
async_fire_mqtt_message(
hass, EVENT_TOPIC,
json.dumps(MOBILE_BEACON_ENTER_EVENT_MESSAGE))
await hass.async_block_till_done()
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_LEAVE_EVENT_MESSAGE)
assert len(context().mobile_beacons_active['greg_phone']) == 0
async def test_mobile_multiple_enter_exit(hass, context):
"""Test the multiple entering."""
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_LEAVE_EVENT_MESSAGE)
assert len(context().mobile_beacons_active['greg_phone']) == 0
async def test_complex_movement(hass, context):
"""Test a complex sequence representative of real-world use."""
# I am in the outer zone.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
assert_location_state(hass, 'outer')
# gps to inner location and event, as actually happens with OwnTracks
location_message = build_message(
{'lat': REGION_GPS_ENTER_MESSAGE['lat'],
'lon': REGION_GPS_ENTER_MESSAGE['lon']},
LOCATION_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_state(hass, 'inner')
# region beacon enter inner event and location as actually happens
# with OwnTracks
location_message = build_message(
{'lat': location_message['lat'] + FIVE_M,
'lon': location_message['lon'] + FIVE_M},
LOCATION_MESSAGE)
await send_message(hass, EVENT_TOPIC, REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_state(hass, 'inner')
# see keys mobile beacon and location message as actually happens
location_message = build_message(
{'lat': location_message['lat'] + FIVE_M,
'lon': location_message['lon'] + FIVE_M},
LOCATION_MESSAGE)
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_mobile_tracker_latitude(hass, INNER_ZONE['latitude'])
assert_location_state(hass, 'inner')
assert_mobile_tracker_state(hass, 'inner')
# Slightly odd, I leave the location by gps before I lose
# sight of the region beacon. This is also a little odd in
# that my GPS coords are now in the 'outer' zone but I did not
# "enter" that zone when I started up so my location is not
# the center of OUTER_ZONE, but rather just my GPS location.
# gps out of inner event and location
location_message = build_message(
{'lat': REGION_GPS_LEAVE_MESSAGE['lat'],
'lon': REGION_GPS_LEAVE_MESSAGE['lon']},
LOCATION_MESSAGE)
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_latitude(hass, REGION_GPS_LEAVE_MESSAGE['lat'])
assert_mobile_tracker_latitude(hass, REGION_GPS_LEAVE_MESSAGE['lat'])
assert_location_state(hass, 'outer')
assert_mobile_tracker_state(hass, 'outer')
# region beacon leave inner
location_message = build_message(
{'lat': location_message['lat'] - FIVE_M,
'lon': location_message['lon'] - FIVE_M},
LOCATION_MESSAGE)
await send_message(hass, EVENT_TOPIC, REGION_BEACON_LEAVE_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_latitude(hass, location_message['lat'])
assert_mobile_tracker_latitude(hass, location_message['lat'])
assert_location_state(hass, 'outer')
assert_mobile_tracker_state(hass, 'outer')
# lose keys mobile beacon
lost_keys_location_message = build_message(
{'lat': location_message['lat'] - FIVE_M,
'lon': location_message['lon'] - FIVE_M},
LOCATION_MESSAGE)
await send_message(hass, LOCATION_TOPIC, lost_keys_location_message)
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_LEAVE_EVENT_MESSAGE)
assert_location_latitude(hass, lost_keys_location_message['lat'])
assert_mobile_tracker_latitude(hass, lost_keys_location_message['lat'])
assert_location_state(hass, 'outer')
assert_mobile_tracker_state(hass, 'outer')
# gps leave outer
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE_NOT_HOME)
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE_OUTER)
assert_location_latitude(hass, LOCATION_MESSAGE_NOT_HOME['lat'])
assert_mobile_tracker_latitude(hass, lost_keys_location_message['lat'])
assert_location_state(hass, 'not_home')
assert_mobile_tracker_state(hass, 'outer')
# location move not home
location_message = build_message(
{'lat': LOCATION_MESSAGE_NOT_HOME['lat'] - FIVE_M,
'lon': LOCATION_MESSAGE_NOT_HOME['lon'] - FIVE_M},
LOCATION_MESSAGE_NOT_HOME)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_latitude(hass, location_message['lat'])
assert_mobile_tracker_latitude(hass, lost_keys_location_message['lat'])
assert_location_state(hass, 'not_home')
assert_mobile_tracker_state(hass, 'outer')
async def test_complex_movement_sticky_keys_beacon(hass, context):
"""Test a complex sequence which was previously broken."""
# I am not_home
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
assert_location_state(hass, 'outer')
# gps to inner location and event, as actually happens with OwnTracks
location_message = build_message(
{'lat': REGION_GPS_ENTER_MESSAGE['lat'],
'lon': REGION_GPS_ENTER_MESSAGE['lon']},
LOCATION_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_state(hass, 'inner')
# see keys mobile beacon and location message as actually happens
location_message = build_message(
{'lat': location_message['lat'] + FIVE_M,
'lon': location_message['lon'] + FIVE_M},
LOCATION_MESSAGE)
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_mobile_tracker_latitude(hass, INNER_ZONE['latitude'])
assert_location_state(hass, 'inner')
assert_mobile_tracker_state(hass, 'inner')
# region beacon enter inner event and location as actually happens
# with OwnTracks
location_message = build_message(
{'lat': location_message['lat'] + FIVE_M,
'lon': location_message['lon'] + FIVE_M},
LOCATION_MESSAGE)
await send_message(hass, EVENT_TOPIC, REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_state(hass, 'inner')
# This sequence of moves would cause keys to follow
# greg_phone around even after the OwnTracks sent
# a mobile beacon 'leave' event for the keys.
# leave keys
await send_message(hass, LOCATION_TOPIC, location_message)
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_LEAVE_EVENT_MESSAGE)
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_state(hass, 'inner')
assert_mobile_tracker_state(hass, 'inner')
assert_mobile_tracker_latitude(hass, INNER_ZONE['latitude'])
# leave inner region beacon
await send_message(hass, EVENT_TOPIC, REGION_BEACON_LEAVE_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_state(hass, 'inner')
assert_mobile_tracker_state(hass, 'inner')
assert_mobile_tracker_latitude(hass, INNER_ZONE['latitude'])
# enter inner region beacon
await send_message(hass, EVENT_TOPIC, REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_state(hass, 'inner')
assert_mobile_tracker_state(hass, 'inner')
assert_mobile_tracker_latitude(hass, INNER_ZONE['latitude'])
# enter keys
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_state(hass, 'inner')
assert_mobile_tracker_state(hass, 'inner')
assert_mobile_tracker_latitude(hass, INNER_ZONE['latitude'])
# leave keys
await send_message(hass, LOCATION_TOPIC, location_message)
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_LEAVE_EVENT_MESSAGE)
assert_location_state(hass, 'inner')
assert_mobile_tracker_state(hass, 'inner')
assert_mobile_tracker_latitude(hass, INNER_ZONE['latitude'])
# leave inner region beacon
await send_message(hass, EVENT_TOPIC, REGION_BEACON_LEAVE_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_state(hass, 'inner')
assert_mobile_tracker_state(hass, 'inner')
assert_mobile_tracker_latitude(hass, INNER_ZONE['latitude'])
# GPS leave inner region, I'm in the 'outer' region now
# but on GPS coords
leave_location_message = build_message(
{'lat': REGION_GPS_LEAVE_MESSAGE['lat'],
'lon': REGION_GPS_LEAVE_MESSAGE['lon']},
LOCATION_MESSAGE)
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE)
await send_message(hass, LOCATION_TOPIC, leave_location_message)
assert_location_state(hass, 'outer')
assert_mobile_tracker_state(hass, 'inner')
assert_location_latitude(hass, REGION_GPS_LEAVE_MESSAGE['lat'])
assert_mobile_tracker_latitude(hass, INNER_ZONE['latitude'])
async def test_waypoint_import_simple(hass, context):
"""Test a simple import of list of waypoints."""
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
await send_message(hass, WAYPOINTS_TOPIC, waypoints_message)
# Check if it made it into states
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[0])
assert wayp is not None
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[1])
assert wayp is not None
async def test_waypoint_import_blacklist(hass, context):
"""Test import of list of waypoints for blacklisted user."""
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
await send_message(hass, WAYPOINTS_TOPIC_BLOCKED, waypoints_message)
# Check if it made it into states
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[2])
assert wayp is None
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[3])
assert wayp is None
async def test_waypoint_import_no_whitelist(hass, setup_comp):
"""Test import of list of waypoints with no whitelist set."""
await setup_owntracks(hass, {
CONF_MAX_GPS_ACCURACY: 200,
CONF_WAYPOINT_IMPORT: True,
CONF_MQTT_TOPIC: 'owntracks/#',
})
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
await send_message(hass, WAYPOINTS_TOPIC_BLOCKED, waypoints_message)
# Check if it made it into states
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[2])
assert wayp is not None
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[3])
assert wayp is not None
async def test_waypoint_import_bad_json(hass, context):
"""Test importing a bad JSON payload."""
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
await send_message(hass, WAYPOINTS_TOPIC, waypoints_message, True)
# Check if it made it into states
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[2])
assert wayp is None
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[3])
assert wayp is None
async def test_waypoint_import_existing(hass, context):
"""Test importing a zone that exists."""
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
await send_message(hass, WAYPOINTS_TOPIC, waypoints_message)
# Get the first waypoint exported
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[0])
# Send an update
waypoints_message = WAYPOINTS_UPDATED_MESSAGE.copy()
await send_message(hass, WAYPOINTS_TOPIC, waypoints_message)
new_wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[0])
assert wayp == new_wayp
async def test_single_waypoint_import(hass, context):
"""Test single waypoint message."""
waypoint_message = WAYPOINT_MESSAGE.copy()
await send_message(hass, WAYPOINT_TOPIC, waypoint_message)
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[0])
assert wayp is not None
async def test_not_implemented_message(hass, context):
"""Handle not implemented message type."""
patch_handler = patch('homeassistant.components.owntracks.'
'messages.async_handle_not_impl_msg',
return_value=mock_coro(False))
patch_handler.start()
assert not await send_message(hass, LWT_TOPIC, LWT_MESSAGE)
patch_handler.stop()
async def test_unsupported_message(hass, context):
"""Handle not implemented message type."""
patch_handler = patch('homeassistant.components.owntracks.'
'messages.async_handle_unsupported_msg',
return_value=mock_coro(False))
patch_handler.start()
assert not await send_message(hass, BAD_TOPIC, BAD_MESSAGE)
patch_handler.stop()
def generate_ciphers(secret):
"""Generate test ciphers for the DEFAULT_LOCATION_MESSAGE."""
# PyNaCl ciphertext generation will fail if the module
# cannot be imported. However, the test for decryption
# also relies on this library and won't be run without it.
import pickle
import base64
try:
from nacl.secret import SecretBox
from nacl.encoding import Base64Encoder
keylen = SecretBox.KEY_SIZE
key = secret.encode("utf-8")
key = key[:keylen]
key = key.ljust(keylen, b'\0')
msg = json.dumps(DEFAULT_LOCATION_MESSAGE).encode("utf-8")
ctxt = SecretBox(key).encrypt(msg,
encoder=Base64Encoder).decode("utf-8")
except (ImportError, OSError):
ctxt = ''
mctxt = base64.b64encode(
pickle.dumps(
(secret.encode("utf-8"),
json.dumps(DEFAULT_LOCATION_MESSAGE).encode("utf-8"))
)
).decode("utf-8")
return ctxt, mctxt
TEST_SECRET_KEY = 's3cretkey'
CIPHERTEXT, MOCK_CIPHERTEXT = generate_ciphers(TEST_SECRET_KEY)
ENCRYPTED_LOCATION_MESSAGE = {
# Encrypted version of LOCATION_MESSAGE using libsodium and TEST_SECRET_KEY
'_type': 'encrypted',
'data': CIPHERTEXT
}
MOCK_ENCRYPTED_LOCATION_MESSAGE = {
# Mock-encrypted version of LOCATION_MESSAGE using pickle
'_type': 'encrypted',
'data': MOCK_CIPHERTEXT
}
def mock_cipher():
"""Return a dummy pickle-based cipher."""
def mock_decrypt(ciphertext, key):
"""Decrypt/unpickle."""
import pickle
import base64
(mkey, plaintext) = pickle.loads(base64.b64decode(ciphertext))
if key != mkey:
raise ValueError()
return plaintext
return len(TEST_SECRET_KEY), mock_decrypt
@pytest.fixture
def config_context(hass, setup_comp):
"""Set up the mocked context."""
patch_load = patch(
'homeassistant.components.device_tracker.async_load_config',
return_value=mock_coro([]))
patch_load.start()
patch_save = patch('homeassistant.components.device_tracker.'
'DeviceTracker.async_update_config')
patch_save.start()
yield
patch_load.stop()
patch_save.stop()
@patch('homeassistant.components.owntracks.messages.get_cipher',
mock_cipher)
async def test_encrypted_payload(hass, setup_comp):
"""Test encrypted payload."""
await setup_owntracks(hass, {
CONF_SECRET: TEST_SECRET_KEY,
})
await send_message(hass, LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert_location_latitude(hass, LOCATION_MESSAGE['lat'])
@patch('homeassistant.components.owntracks.messages.get_cipher',
mock_cipher)
async def test_encrypted_payload_topic_key(hass, setup_comp):
"""Test encrypted payload with a topic key."""
await setup_owntracks(hass, {
CONF_SECRET: {
LOCATION_TOPIC: TEST_SECRET_KEY,
}
})
await send_message(hass, LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert_location_latitude(hass, LOCATION_MESSAGE['lat'])
@patch('homeassistant.components.owntracks.messages.get_cipher',
mock_cipher)
async def test_encrypted_payload_no_key(hass, setup_comp):
"""Test encrypted payload with no key, ."""
assert hass.states.get(DEVICE_TRACKER_STATE) is None
await setup_owntracks(hass, {
CONF_SECRET: {
}
})
await send_message(hass, LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert hass.states.get(DEVICE_TRACKER_STATE) is None
@patch('homeassistant.components.owntracks.messages.get_cipher',
mock_cipher)
async def test_encrypted_payload_wrong_key(hass, setup_comp):
"""Test encrypted payload with wrong key."""
await setup_owntracks(hass, {
CONF_SECRET: 'wrong key',
})
await send_message(hass, LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert hass.states.get(DEVICE_TRACKER_STATE) is None
@patch('homeassistant.components.owntracks.messages.get_cipher',
mock_cipher)
async def test_encrypted_payload_wrong_topic_key(hass, setup_comp):
"""Test encrypted payload with wrong topic key."""
await setup_owntracks(hass, {
CONF_SECRET: {
LOCATION_TOPIC: 'wrong key'
},
})
await send_message(hass, LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert hass.states.get(DEVICE_TRACKER_STATE) is None
@patch('homeassistant.components.owntracks.messages.get_cipher',
mock_cipher)
async def test_encrypted_payload_no_topic_key(hass, setup_comp):
"""Test encrypted payload with no topic key."""
await setup_owntracks(hass, {
CONF_SECRET: {
'owntracks/{}/{}'.format(USER, 'otherdevice'): 'foobar'
}})
await send_message(hass, LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert hass.states.get(DEVICE_TRACKER_STATE) is None
async def test_encrypted_payload_libsodium(hass, setup_comp):
"""Test sending encrypted message payload."""
try:
# pylint: disable=unused-import
import nacl # noqa: F401
except (ImportError, OSError):
pytest.skip("PyNaCl/libsodium is not installed")
return
await setup_owntracks(hass, {
CONF_SECRET: TEST_SECRET_KEY,
})
await send_message(hass, LOCATION_TOPIC, ENCRYPTED_LOCATION_MESSAGE)
assert_location_latitude(hass, LOCATION_MESSAGE['lat'])
async def test_customized_mqtt_topic(hass, setup_comp):
"""Test subscribing to a custom mqtt topic."""
await setup_owntracks(hass, {
CONF_MQTT_TOPIC: 'mytracks/#',
})
topic = 'mytracks/{}/{}'.format(USER, DEVICE)
await send_message(hass, topic, LOCATION_MESSAGE)
assert_location_latitude(hass, LOCATION_MESSAGE['lat'])
async def test_region_mapping(hass, setup_comp):
"""Test region to zone mapping."""
await setup_owntracks(hass, {
CONF_REGION_MAPPING: {
'foo': 'inner'
},
})
hass.states.async_set(
'zone.inner', 'zoning', INNER_ZONE)
message = build_message({'desc': 'foo'}, REGION_GPS_ENTER_MESSAGE)
assert message['desc'] == 'foo'
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, 'inner')
| |
from __future__ import division
import json
import base64
import functools
from uuid import uuid4
import os
from flask import Flask, request, g, redirect, url_for, render_template, flash, session, Response, \
copy_current_request_context
from tornado.wsgi import WSGIContainer
from tornado.ioloop import IOLoop
from tornado.httpserver import HTTPServer
import redis
from rauth.service import OAuth2Service
from sessions import RedisSessionInterface
from models import db, User, Server, StatusUpdate
import forms
import gevent
if os.getuid() != 0:
print "Must run as root to run on port 80"
import sys
sys.exit(1)
app = Flask(__name__, template_folder='../../templates', static_folder='../../static')
app.config.from_object('config')
db.app = app
db.init_app(app)
def init_db():
db.create_all()
redis_pool = redis.ConnectionPool(host=app.config['REDIS_HOST'],
port=app.config['REDIS_PORT'],
db=app.config['REDIS_DB'])
app.session_interface = RedisSessionInterface(pool=redis_pool)
google = OAuth2Service(
name='google',
base_url=app.config['GOOGLE_API_URL'],
authorize_url=app.config['GOOGLE_OAUTH2_URL'] + 'auth',
access_token_url=app.config['GOOGLE_OAUTH2_URL'] + 'token',
client_id=app.config['GOOGLE_CLIENT_ID'],
client_secret=app.config['GOOGLE_CLIENT_SECRET']
)
def background(template):
def _f(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
jobid = uuid4().hex
key = 'job-{0}'.format(jobid)
skey = 'job-{0}-status'.format(jobid)
expire_time = 3600
g.redis.set(skey, 202)
g.redis.expire(skey, expire_time)
@copy_current_request_context
def task():
try:
data = f(*args, **kwargs)
except:
g.redis.set(skey, 500)
else:
g.redis.set(skey, 200)
g.redis.set(key, data)
g.redis.expire(key, expire_time)
g.redis.expire(skey, expire_time)
gevent.spawn(task)
return render_template(template, jobid=jobid)
return wrapper
return _f
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
# MetaException(type="page_not_found", arg1=)
return render_template('404.html'), 404
@app.before_request
def setup_globals():
g.db = db
g.redis = redis.StrictRedis(connection_pool=redis_pool)
if request.endpoint is None or request.endpoint.startswith('login') or request.endpoint.startswith('api'):
return
if 'ext_id' in session:
g.current_user = User.query.filter_by(ext_id=session['ext_id']).first()
if g.current_user is not None:
return
return redirect(url_for('login'))
@app.after_request
def fix_response(resp):
if app.config['DEBUG']:
resp.headers['Cache-Control'] = 'no-cache'
return resp
@app.teardown_request
def close_globals(error=None):
if error is None:
db.session.commit()
else:
db.session.rollback()
@app.route('/')
def index():
return render_template('index.html')
@app.route('/login/redirect')
def login():
return redirect(google.get_authorize_url(redirect_uri=url_for('login_authorized', _external=True),
scope=app.config['GOOGLE_API_SCOPE'], response_type='code'))
@app.route('/login/oauth_authorized')
def login_authorized():
if 'code' not in request.args:
flash('Your authorization failed. Try again.')
return render_template('login_failed.html')
data = dict(code=request.args.get('code'), redirect_uri=url_for('login_authorized', _external=True),
grant_type='authorization_code')
sess = google.get_auth_session(data=data, decoder=json.loads)
uinfo = sess.get('userinfo').json()
session['ext_id'] = uinfo['id']
User.get_or_create(uinfo['email'], session['ext_id'])
flash('Logged in as {}'.format(uinfo['email']))
return redirect(url_for('index'))
@app.route('/logout')
def logout():
del session['ext_id']
return redirect(url_for('index'))
@app.route('/servers/list')
def list_servers():
servers = Server.query.all()
return render_template('list_servers.html', servers=servers)
@app.route('/servers/add', methods=['POST', 'GET'])
def add_server():
form = forms.AddServer()
if form.validate_on_submit():
try:
server = Server(hostname=form.hostname.data)
g.db.session.add(server)
g.db.session.commit()
flash('Successfully added server {}'.format(server.hostname))
return redirect(url_for('list_servers'))
except ValueError:
flash('One or more fields has a value of the wrong data type.')
except KeyError as err:
flash('Missing required field {}'.format(err.args[0]))
return render_template('add_server.html', form=form)
@app.route('/servers/view/<int:id>')
def view_server(id):
server = Server.query.filter_by(id=id).first()
if server is None:
flash('Server with id {} not found.'.format(id))
return redirect(url_for('list_servers'))
status_updates = None
if server.status_updates.count():
status_updates = []
for status_update in server.status_updates:
if status_update.disks.count():
used, size = zip(*map(lambda d: (d.used, d.size), status_update.disks))
status_update.disk_pct = sum(used) / sum(size)
status_updates.append(status_update)
return render_template('server_info.html', server=server, status_updates=status_updates)
@app.route('/servers/status-updates/view/<int:id>')
def status_update(id):
status_update = StatusUpdate.query.filter_by(id=id).first()
if not status_update:
flash('Status update with ID {} not found.'.format(id))
return redirect(url_for('list_servers'))
return render_template('view_status_update.html', status_update=status_update)
@app.route('/servers/deploy/<int:id>')
@background('job_added.html')
def deploy_package(id):
pass
@app.route('/job/check/<jobid>')
def job_check(jobid):
done = g.redis.get('job-{0}-status'.format(jobid)) == 200
return render_template('job_status.html', jobid=jobid)
@app.route('/job/dl/<jobid>')
def job_download(jobid):
if g.redis.get('job-{0}-status'.format(jobid)) != 200:
flash("Job not done running.")
return redirect(url_for('job_check', jobid=jobid))
data = g.redis.get('job-{0}'.format(jobid))
raw = base64.b64decode(data)
return Response(raw, mimetype='application/x-gzip')
if app.config['DEBUG']:
import debug
app.register_blueprint(debug.bp, url_prefix='/debug')
import api
app.register_blueprint(api.bp, url_prefix='/api')
if __name__ == '__main__':
if not app.config['DEBUG']:
container = WSGIContainer(app)
http_server = HTTPServer(container)
http_server.listen(80)
IOLoop.instance().start()
else:
app.run(host='0.0.0.0', port=80)
| |
from pandac.PandaModules import *
from toontown.toonbase.ToontownBattleGlobals import *
from direct.task.Timer import *
import math
from direct.directnotify import DirectNotifyGlobal
from toontown.toon import NPCToons
from toontown.toonbase import TTLocalizer
TOON_ID_COL = 0
TOON_TRACK_COL = 1
TOON_LVL_COL = 2
TOON_TGT_COL = 3
TOON_HP_COL = 4
TOON_ACCBONUS_COL = 5
TOON_HPBONUS_COL = 6
TOON_KBBONUS_COL = 7
SUIT_DIED_COL = 8
SUIT_REVIVE_COL = 9
SUIT_ID_COL = 0
SUIT_ATK_COL = 1
SUIT_TGT_COL = 2
SUIT_HP_COL = 3
TOON_DIED_COL = 4
SUIT_BEFORE_TOONS_COL = 5
SUIT_TAUNT_COL = 6
NO_ID = -1
NO_ATTACK = -1
UN_ATTACK = -2
PASS_ATTACK = -3
NO_TRAP = -1
LURE_SUCCEEDED = -1
PASS = 98
SOS = 99
NPCSOS = 97
PETSOS = 96
FIRE = 100
HEAL = HEAL_TRACK
TRAP = TRAP_TRACK
LURE = LURE_TRACK
SOUND = SOUND_TRACK
THROW = THROW_TRACK
SQUIRT = SQUIRT_TRACK
DROP = DROP_TRACK
TOON_ATTACK_TIME = 12.0
SUIT_ATTACK_TIME = 12.0
TOON_TRAP_DELAY = 0.8
TOON_SOUND_DELAY = 1.0
TOON_THROW_DELAY = 0.5
TOON_THROW_SUIT_DELAY = 1.0
TOON_SQUIRT_DELAY = 0.5
TOON_SQUIRT_SUIT_DELAY = 1.0
TOON_DROP_DELAY = 0.8
TOON_DROP_SUIT_DELAY = 1.0
TOON_RUN_T = 3.3
TIMEOUT_PER_USER = 5
TOON_FIRE_DELAY = 0.5
TOON_FIRE_SUIT_DELAY = 1.0
REWARD_TIMEOUT = 120
FLOOR_REWARD_TIMEOUT = 4
BUILDING_REWARD_TIMEOUT = 300
try:
CLIENT_INPUT_TIMEOUT = base.config.GetFloat('battle-input-timeout', TTLocalizer.BBbattleInputTimeout)
except:
CLIENT_INPUT_TIMEOUT = simbase.config.GetFloat('battle-input-timeout', TTLocalizer.BBbattleInputTimeout)
def levelAffectsGroup(track, level):
return attackAffectsGroup(track, level)
def attackAffectsGroup(track, level, type = None):
if track == NPCSOS or type == NPCSOS or track == PETSOS or type == PETSOS:
return 1
elif track >= 0 and track <= DROP_TRACK:
return AvPropTargetCat[AvPropTarget[track]][level]
else:
return 0
def getToonAttack(id, track = NO_ATTACK, level = -1, target = -1):
return [id,
track,
level,
target,
[],
0,
0,
[],
0,
0]
def getDefaultSuitAttacks():
suitAttacks = [[NO_ID,
NO_ATTACK,
-1,
[],
0,
0,
0],
[NO_ID,
NO_ATTACK,
-1,
[],
0,
0,
0],
[NO_ID,
NO_ATTACK,
-1,
[],
0,
0,
0],
[NO_ID,
NO_ATTACK,
-1,
[],
0,
0,
0]]
return suitAttacks
def getDefaultSuitAttack():
return [NO_ID,
NO_ATTACK,
-1,
[],
0,
0,
0]
def findToonAttack(toons, attacks, track):
foundAttacks = []
for t in toons:
if attacks.has_key(t):
attack = attacks[t]
local_track = attack[TOON_TRACK_COL]
if track != NPCSOS and attack[TOON_TRACK_COL] == NPCSOS:
local_track = NPCToons.getNPCTrack(attack[TOON_TGT_COL])
if local_track == track:
if local_track == FIRE:
canFire = 1
for attackCheck in foundAttacks:
if attackCheck[TOON_TGT_COL] == attack[TOON_TGT_COL]:
canFire = 0
if canFire:
foundAttacks.append(attack)
else:
foundAttacks.append(attack)
def compFunc(a, b):
if a[TOON_LVL_COL] > b[TOON_LVL_COL]:
return 1
elif a[TOON_LVL_COL] < b[TOON_LVL_COL]:
return -1
return 0
foundAttacks.sort(compFunc)
return foundAttacks
SERVER_BUFFER_TIME = 2.0
SERVER_INPUT_TIMEOUT = CLIENT_INPUT_TIMEOUT + SERVER_BUFFER_TIME
MAX_JOIN_T = TTLocalizer.BBbattleInputTimeout
FACEOFF_TAUNT_T = 3.5
FACEOFF_LOOK_AT_PROP_T = 6
ELEVATOR_T = 4.0
BATTLE_SMALL_VALUE = 1e-07
MAX_EXPECTED_DISTANCE_FROM_BATTLE = 50.0
class BattleBase:
notify = DirectNotifyGlobal.directNotify.newCategory('BattleBase')
suitPoints = (((Point3(0, 5, 0), 179),),
((Point3(2, 5.3, 0), 170), (Point3(-2, 5.3, 0), 180)),
((Point3(4, 5.2, 0), 170), (Point3(0, 6, 0), 179), (Point3(-4, 5.2, 0), 190)),
((Point3(6, 4.4, 0), 160),
(Point3(2, 6.3, 0), 170),
(Point3(-2, 6.3, 0), 190),
(Point3(-6, 4.4, 0), 200)))
suitPendingPoints = ((Point3(-4, 8.2, 0), 190),
(Point3(0, 9, 0), 179),
(Point3(4, 8.2, 0), 170),
(Point3(8, 3.2, 0), 160))
toonPoints = (((Point3(0, -6, 0), 0),),
((Point3(1.5, -6.5, 0), 5), (Point3(-1.5, -6.5, 0), -5)),
((Point3(3, -6.75, 0), 5), (Point3(0, -7, 0), 0), (Point3(-3, -6.75, 0), -5)),
((Point3(4.5, -7, 0), 10),
(Point3(1.5, -7.5, 0), 5),
(Point3(-1.5, -7.5, 0), -5),
(Point3(-4.5, -7, 0), -10)))
toonPendingPoints = ((Point3(-3, -8, 0), -5),
(Point3(0, -9, 0), 0),
(Point3(3, -8, 0), 5),
(Point3(5.5, -5.5, 0), 20))
posA = Point3(0, 10, 0)
posB = Point3(-7.071, 7.071, 0)
posC = Point3(-10, 0, 0)
posD = Point3(-7.071, -7.071, 0)
posE = Point3(0, -10, 0)
posF = Point3(7.071, -7.071, 0)
posG = Point3(10, 0, 0)
posH = Point3(7.071, 7.071, 0)
allPoints = (posA,
posB,
posC,
posD,
posE,
posF,
posG,
posH)
toonCwise = [posA,
posB,
posC,
posD,
posE]
toonCCwise = [posH,
posG,
posF,
posE]
suitCwise = [posE,
posF,
posG,
posH,
posA]
suitCCwise = [posD,
posC,
posB,
posA]
suitSpeed = 4.8
toonSpeed = 8.0
def __init__(self):
self.pos = Point3(0, 0, 0)
self.initialSuitPos = Point3(0, 1, 0)
self.timer = Timer()
self.resetLists()
def resetLists(self):
self.suits = []
self.pendingSuits = []
self.joiningSuits = []
self.activeSuits = []
self.luredSuits = []
self.suitGone = 0
self.toons = []
self.joiningToons = []
self.pendingToons = []
self.activeToons = []
self.runningToons = []
self.toonGone = 0
self.helpfulToons = []
def calcFaceoffTime(self, centerpos, suitpos):
facing = Vec3(centerpos - suitpos)
facing.normalize()
suitdest = Point3(centerpos - Point3(facing * 6.0))
dist = Vec3(suitdest - suitpos).length()
return dist / BattleBase.suitSpeed
def calcSuitMoveTime(self, pos0, pos1):
dist = Vec3(pos0 - pos1).length()
return dist / BattleBase.suitSpeed
def calcToonMoveTime(self, pos0, pos1):
dist = Vec3(pos0 - pos1).length()
return dist / BattleBase.toonSpeed
def buildJoinPointList(self, avPos, destPos, toon = 0):
minDist = 999999.0
nearestP = None
for p in BattleBase.allPoints:
dist = Vec3(avPos - p).length()
if dist < minDist:
nearestP = p
minDist = dist
self.notify.debug('buildJoinPointList() - avp: %s nearp: %s' % (avPos, nearestP))
dist = Vec3(avPos - destPos).length()
if dist < minDist:
self.notify.debug('buildJoinPointList() - destPos is nearest')
return []
if toon == 1:
if nearestP == BattleBase.posE:
self.notify.debug('buildJoinPointList() - posE')
plist = [BattleBase.posE]
elif BattleBase.toonCwise.count(nearestP) == 1:
self.notify.debug('buildJoinPointList() - clockwise')
index = BattleBase.toonCwise.index(nearestP)
plist = BattleBase.toonCwise[index + 1:]
else:
self.notify.debug('buildJoinPointList() - counter-clockwise')
index = BattleBase.toonCCwise.index(nearestP)
plist = BattleBase.toonCCwise[index + 1:]
elif nearestP == BattleBase.posA:
self.notify.debug('buildJoinPointList() - posA')
plist = [BattleBase.posA]
elif BattleBase.suitCwise.count(nearestP) == 1:
self.notify.debug('buildJoinPointList() - clockwise')
index = BattleBase.suitCwise.index(nearestP)
plist = BattleBase.suitCwise[index + 1:]
else:
self.notify.debug('buildJoinPointList() - counter-clockwise')
index = BattleBase.suitCCwise.index(nearestP)
plist = BattleBase.suitCCwise[index + 1:]
self.notify.debug('buildJoinPointList() - plist: %s' % plist)
return plist
def addHelpfulToon(self, toonId):
if toonId not in self.helpfulToons:
self.helpfulToons.append(toonId)
| |
import os, sys, re, dicom, scipy, cv2
import numpy as np
from skimage import transform, exposure
from sklearn import decomposition
from PIL import Image
from pandas.io.parsers import read_csv
import theano
import theano.tensor as T
import lasagne as nn
import utils as u, config as c
import heart, gaussian2d
#reload(heart)
def sigmoid(x):
return 1/(1+np.exp(-x))
class CNN_Dataset(heart.Dataset):
def resize_center(self,ims, xyd):#xyd = xcenter, ycenter, crop size from center
new_ims = np.empty((ims.shape[0], ims.shape[1], 1, self.img_size, self.img_size), \
dtype=np.uint8)
for s in xrange(ims.shape[0]):
for t in xrange(ims.shape[1]):
new_ims[s,t,0] = transform.resize(\
ims[s,t][xyd[0]-xyd[2]:xyd[0]+xyd[2],xyd[1]-xyd[2]:xyd[1]+xyd[2]], \
(self.img_size,self.img_size));
if t == 0:
self.area_multiplier[s] *= (xyd[2]*2./self.img_size)**2
return new_ims;
def __init__(self, dset_num, img_size=c.fcn_img_size, direc=None,
load_sax_images=True, load_ch4_images=False):
s = str(dset_num)
if not direc:
direcs = [os.path.join(c.data_kaggle, 'train'),
os.path.join(c.data_kaggle, 'validate'),
os.path.join(c.data_kaggle,'test')]
direc = next(p for p in direcs if os.path.exists(os.path.join(p, s)))
super(CNN_Dataset, self).__init__(os.path.join(direc, s), s)
self.img_size = img_size
if load_sax_images:
self.load()
self.img_size = img_size
self.orig_img = self.images
xyd = self.getxydinit();
self.images = self.resize_center(self.orig_img, xyd)
if load_ch4_images:
self.load_ch4()
def set_sys_dias_times(self):
times_totals = self.areas.mean(axis=0)
self.sys_time, self.dias_time = np.argmin(times_totals), np.argmax(times_totals)
def segment(self, segment_fn, segment_transform, means=None,stds=None,segment_transform2=None):
imgs = np.copy(self.images.reshape(-1, 1, self.images.shape[3], self.images.shape[4]))
if segment_transform2 is not None:
imgs2 = segment_transform2(imgs)
imgs = segment_transform(imgs)
self.segments = np.zeros_like(imgs)
for i in xrange(imgs.shape[0]):
if segment_transform2 is None:
self.segments[i:i+1] = segment_fn(imgs[i:i+1])
else:
#choose the one with larger area (hope it got activate in one case)
tmp1 = segment_fn(imgs[i:i+1]);
tmp2 = segment_fn(imgs2[i:i+1]);
if np.sum(tmp1) > np.sum(tmp2):
self.segments[i:i+1] = tmp1;
else:
self.segments[i:i+1] = tmp2;
#self.segments[i:i+1] = np.maximum(segment_fn(imgs[i:i+1]),segment_fn(imgs2[i:i+1]));
self.seg_binary = clean_segmentation(self.segments, self.img_size)
self.contour_portion = get_contour_portion(self.images, self.seg_binary);
self.counts = np.array([np.count_nonzero(s) for s in self.seg_binary])\
.reshape(len(self.slices_ver), -1)
#self.counts = clean_counts(self.counts); #I don't think it works
self.areas = np.zeros_like(self.counts)
for s_idx, _ in enumerate(self.slices_ver):
for t_idx,_ in enumerate(self.time):
self.areas[s_idx, t_idx] = self.counts[s_idx, t_idx] * self.area_multiplier[s_idx]
self.set_sys_dias_times()
#self.ll = ll_of_count(self.counts, means, stds) \
# if means is not None and stds is not None else 0
def calculate_areas(self):
self.sys_vol = volume(self.slocation,self.areas[:,self.sys_time]);
self.dias_vol = volume(self.slocation,self.areas[:,self.dias_time]);
return self.sys_vol, self.dias_vol
def areas_cutoff(self,true_svol, true_dvol):
sys_area = self.areas[:,self.sys_time];
L = self.areas.shape[0];
dias_area = self.areas[:,self.dias_time];
sax = np.argmax(dias_area);
s_i = -1;
for i in xrange(sax):
sv = volume(self.slocation[i:],sys_area[i:]);
if np.isnan(sv) or sv<max(true_svol-6,true_svol*(1-1.0/L)):
s_i,s_v = i,sv;
break;
d_i = -1;
for i in xrange(sax):
dv = volume(self.slocation[i:],dias_area[i:]);
if np.isnan(dv) or dv<max(true_dvol-6,true_dvol*(1-1.0/L)):
d_i,d_v = i,dv;
break;
if s_i>=d_i:
return s_i-1,d_i-1;
else:
return -1,-1
def unload(self):
self.images = None
self.orig_img = None
self.segments = None
self.seg_binary = None
self.ch4seg = None
self.ch4_images = None
def load_ch4(self):
ch4dirs = [d for d in os.listdir(self.directory) if '4ch' in d]
max_val = 0
max_dir = None
self.ch4_images = None
for d in ch4dirs:
fn = [f for f in os.listdir(os.path.join(self.directory, d)) if 'dcm' in f][0]
series = int(f.split('-')[1])
if series > max_val:
max_val = series
max_dir = d
if max_dir is not None:
ch4_fns = [f for f in os.listdir(os.path.join(self.directory, max_dir))
if 'dcm' in f]
ch4_fns = sorted(ch4_fns, key=lambda f: int(f.split('.')[0].split('-')[2]))
ch4_ims = np.empty((len(ch4_fns), self.img_size, self.img_size))
for i,fn in enumerate(ch4_fns):
d = dicom.read_file(os.path.join(self.directory, max_dir, fn))
ch4_ims[i] = crop_resize(d.pixel_array, self.img_size)
if i == 0:
short_edge = min(d.pixel_array.shape)
self.ch4_line_mult = float(d.PixelSpacing[0])*short_edge*1./self.img_size
self.ch4_images = ch4_ims
def volume(x,y):
d = min(8, np.median(np.diff(x)));
idx = y>0;
x = x[idx];
y = y[idx];
L = np.sum(idx);
if L<3:
return np.nan;
vol = (y[0]+y[-1])/2.0*d;#end slice
for i in xrange(L-1):
vol += (y[i]+y[i+1])*np.abs(x[i+1]-x[i])/2.0;
return vol/1000.0;
#sorenson-dice
def sorenson_dice(pred, tgt, ss=10):
return -2*(T.sum(pred*tgt)+ss)/(T.sum(pred) + T.sum(tgt) + ss)
# get_patches deals in 2d arrays of value [0,1]
def get_patches(segment_arr):
ret = []
im = segment_arr.astype(np.uint8)
contours = cv2.findContours(im, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
hulls = [cv2.convexHull(cont) for cont in contours[1]] #seems my version of CV2 (3.0) uses [1]
for contour_idx in xrange(len(hulls)):
cimg = np.zeros_like(im)
cv2.drawContours(cimg, hulls, contour_idx, color=255, thickness=-1)
pts = np.array(np.where(cimg == 255)).T
ret.append(pts)
return ret
def ll_of_count(counts, means, stds):
cm = np.copy(counts)
cm = (cm*255./cm.max()).astype(np.uint8)
cm = cm[np.where(cm.sum(axis=1))]
if cm.shape[0] == 0:
cm = np.zeros((10, 30), dtype = np.uint8)
im = Image.fromarray(cm).resize((30,10), Image.ANTIALIAS)
counts_resized_arr = np.array(im.getdata(), dtype=np.float32).reshape(10,30)/255.
max_ll = -10000000
for roll_by in xrange(30):
resized_counts = np.roll(counts_resized_arr, roll_by, axis=1).flatten()
ll = 0.
for i in xrange(resized_counts.shape[0]):
ll += np.log(scipy.stats.norm.pdf(resized_counts[i], loc=means[i], scale=stds[i]))
if ll > max_ll:
max_ll = ll
return max_ll
def clean_segmentation(segments, img_size):
mean = segments.mean(axis=(0,1))
gaussian_params = gaussian2d.moments_fake(mean, normalize_height=True)
#gaussian_params = gaussian2d.fitgaussian(mean)
pdf = gaussian2d.gaussian(*gaussian_params)
seg_binary = np.zeros_like(segments)
pdf_dict = np.zeros_like(mean)
for x in xrange(mean.shape[0]):
for y in xrange(mean.shape[1]):
pdf_dict[x,y] = pdf(x,y)
for i in xrange(segments.shape[0]):
_,sb = cv2.threshold(np.copy(segments[i,0])*255, 127, 255, cv2.THRESH_BINARY)
patches = get_patches(sb)
if len(patches)==0:
continue
sum_pdf_vals = [sum(pdf_dict[x,y] for x,y in p) for p in patches]
avg_pdf_vals = [sum(pdf_dict[x,y] for x,y in p)/p.shape[0] for p in patches]
max_sum_pdf = max(sum_pdf_vals)
for p_idx, p in enumerate(patches):
if avg_pdf_vals[p_idx] < 0.07 or sum_pdf_vals[p_idx] < max_sum_pdf:
for x,y in p:
seg_binary[i,0,x,y]=0
else:
for x,y in p:
seg_binary[i,0,x,y]=1
return seg_binary
def clean_segmentation2(segments, img_size):
seg_binary = np.zeros_like(segments)
for i in xrange(segments.shape[0]):
_,sb = cv2.threshold(np.copy(segments[i,0])*255, 127, 255, cv2.THRESH_BINARY)
patches = get_patches(sb)
if len(patches)==0:
continue
sum_pdf_vals = [sum(pdf_dict[x,y] for x,y in p) for p in patches]
avg_pdf_vals = [sum(pdf_dict[x,y] for x,y in p)/p.shape[0] for p in patches]
max_sum_pdf = max(sum_pdf_vals)
for p_idx, p in enumerate(patches):
if avg_pdf_vals[p_idx] < 0.07 or sum_pdf_vals[p_idx] < max_sum_pdf:
for x,y in p:
seg_binary[i,0,x,y]=0
else:
for x,y in p:
seg_binary[i,0,x,y]=1
return seg_binary
def get_contour_shape(x,y,z):
N = 30;
res = np.zeros(N);
cx,cy = np.mean(x),np.mean(y);
L = x.size;
theta = (np.arctan2(y-cy,x-cx)*180/np.pi+180+90)%360;#0-360
b = np.array(np.floor(theta/(360.0001/N)),dtype=np.int);
for i in range(N):
idx = (b==i);
if sum(idx)==0:##bad contour
return None;
res[i] = np.mean(z[b==i]);
return res;
def get_eff_portion(con_shape, cut):
return np.sum(con_shape<cut)*1.0/con_shape.size;
def get_contour_portion(images,segb):
ns = images.shape[0];
nt = images.shape[1];
portion = np.zeros((ns,nt));
for s in range(ns):
for t in range(nt):
img = images[s,t,0];
seg = segb[nt*s+t,0];
if np.sum(seg)<10:
portion[s,t] = 0.0;
continue;
mask = cv2.dilate(seg,np.ones((7,7)))-seg>0;
z = img[mask];
x,y = np.where(mask);
lvinside = np.mean(img[seg>0]);
lvoutside = np.percentile(z,20);
ccut = lvinside * 0.3 + lvoutside * 0.7;
cnt_sh = get_contour_shape(x,y,z);
if cnt_sh is None:
portion[s,t] = 0.0;
else:
res = get_eff_portion(cnt_sh,ccut);
portion[s,t] = res;
return portion;
def write_outputs(dsets, dest_dir,vvv,style):
areas_lines = [] #the area
#calc_lines = []
p_lines = [];
for dset in dsets:
areas_lines.append('{},{},{},'.format(dset.name, len(dset.slices_ver),len(dset.time)) +
','.join(['%.3f'%(c_) for c_ in dset.slocation]) + ',' +
','.join(['%.1f'%(c_) for c_ in dset.areas.T.flatten()]) + '\n')
p_lines.append('{},{},{},'.format(dset.name, len(dset.slices_ver),len(dset.time)) +
','.join(['%.3f'%(c_) for c_ in dset.slocation]) + ',' +
','.join(['%.3f'%(c_) for c_ in dset.contour_portion.T.flatten()]) + '\n')
open(os.path.join(dest_dir, 'areas_map_{}.csv'.format(vvv)), style) \
.writelines(areas_lines)
open(os.path.join(dest_dir, 'contour_portion_{}.csv'.format(vvv)), style) \
.writelines(p_lines)
def clean_counts(counts):
times_totals = counts.mean(axis=0)
sys_time, dias_time = np.argmin(times_totals), np.argmax(times_totals)
ret = np.copy(counts)
for s in xrange(counts.shape[0]):
last_t = t = dias_time
while (t != sys_time):
t -= 1
if t == -1:
t = counts.shape[1]-1
ret[s,t] = min(ret[s,t], ret[s, last_t])
last_t = t
last_t = t = dias_time
while (t != sys_time):
t += 1
if t == counts.shape[1]:
t = 0
ret[s,t] = min(ret[s,t], ret[s, last_t])
last_t = t
return ret
# calc_map = { dset_name: ([sys_vector], [dias_vector]) }
# vector is of format [1, calculated_val, (any other variables)]
# e.g. { 1: [1, sys_val, variation, ... ], [1, dias_val, variation, ... ]}
# calculates optimal w (four functions) as linear combination of everything
# in the vector
def optimize_w(calc_vector_map, label_map, dims_to_use = -1, function=sigmoid,
min_w = 1, max_w = 13):
# slice to fewer dims if specified
calculated_map = { k:(tuple([v1[:dims_to_use] for v1 in v]) if dims_to_use > 0 else v)
for k,v in calc_vector_map.iteritems() if k in label_map }
lin_constr = lambda x_vec, p_vec: min(max_w, max(min_w, np.dot(x_vec, p_vec)))
error_funcs = [lambda a: np.concatenate([calculate_diffs(calc[0][1], label_map[ds][1],
lin_constr(calc[0], a), 9, function=function)
for ds,calc in calculated_map.iteritems()]),
lambda a: np.concatenate([calculate_diffs(calc[0][1], label_map[ds][1], 9,
lin_constr(calc[0], a), function=function)
for ds,calc in calculated_map.iteritems()]),
lambda a: np.concatenate([calculate_diffs(calc[1][1], label_map[ds][0],
lin_constr(calc[1], a), 9, function=function)
for ds,calc in calculated_map.iteritems()]),
lambda a: np.concatenate([calculate_diffs(calc[1][1], label_map[ds][0], 9,
lin_constr(calc[1], a), function=function)
for ds,calc in calculated_map.iteritems()])]
num_vars = len(calculated_map.values()[0][0])
guesses = [[5,0.1] + [.01]*(num_vars-2)]*4
parms = []
for func, guess in zip(error_funcs, guesses):
obj, success = scipy.optimize.leastsq(func, guess)
parms.append(obj)
print obj
return lambda p, idx: lin_constr(p, parms[idx])
def calculate_submission_values(volume, width_below, width_above, function=sigmoid):
ret = []
for i in xrange(600):
term = function((i-volume)/(width_below if i < volume else width_above))
ret.append(term)
return np.array(ret)
def calculate_diffs(calculated, real, width_below, width_above, function=sigmoid):
calc_vals = calculate_submission_values(calculated, width_below, width_above, function)
signals = np.array([1 if i > real else 0 for i in range(600)])
return signals - calc_vals
def calculate_err(calculated, real, width_below, width_above, function=sigmoid):
diffs = calculate_diffs(calculated, real, width_below, width_above, function)
return np.square(diffs).mean()
def get_label_map(labels_file):
labels = np.loadtxt(labels_file, delimiter=',', skiprows=1)
label_map = {}
for l in labels:
label_map[l[0]] = (l[2], l[1])
return label_map
def get_calc_counts_errors_maps(calc_file, counts_file, labels_file):
label_map = get_label_map(labels_file)
calc_map = read_csv(calc_file, header=None)
calc_map = dict((r[0], (r[1],r[2])) for _,r in calc_map.iterrows())
counts_map = None
if counts_file is not None:
counts_map = open(counts_file, 'r').readlines()
counts_map = [l.split(',') for l in counts_map]
counts_map = [[int(st) for st in l] for l in counts_map]
counts_map = dict((r[0], np.array(r[2:]).reshape((-1,r[1]))) for r in counts_map)
def error(calc):
return 0.5*(calculate_err(calc[0], label_map[ds][1], 10, 10) \
+ calculate_err(calc[1], label_map[ds][0], 10, 10))
errors_map = dict([(ds,error(calc)) for ds,calc in calc_map.iteritems()
if ds in label_map])
return calc_map, counts_map, errors_map
def crop_resize(img, size):
"""crop center and resize"""
img = img.astype(float) / np.max(img)
if img.shape[0] < img.shape[1]:
img = img.T[::-1]
# we crop image from center
short_egde = min(img.shape[:2])
yy = int((img.shape[0] - short_egde) / 2)
xx = int((img.shape[1] - short_egde) / 2)
crop_img = img[yy : yy + short_egde, xx : xx + short_egde]
# resize to 64, 64
resized_img = transform.resize(crop_img, (size, size))
resized_img *= 255
return resized_img.astype("uint8")
def rescale(img, sc):
res = np.zeros_like(img);
size = res.shape;
ns = (int(size[0]*sc),int(size[1]*sc));
if sc>1:
sx,sy = (ns[0]-size[0])//2,(ns[1]-size[1])//2;
res = cv2.resize(img,ns)[sx:sx+size[0],sy:sy+size[1]];
else:
sx,sy = (size[0]-ns[0])//2,(size[1]-ns[1])//2;
res[sx:sx+ns[0],sy:sy+ns[1]] = cv2.resize(img,ns);
return res;
def img_shift(img, xy):
res = np.zeros_like(img);
non = lambda s: s if s<0 else None
mom = lambda s: max(0,s)
ox,oy = xy;
res[mom(oy):non(oy), mom(ox):non(ox)] = img[mom(-oy):non(-oy), mom(-ox):non(-ox)]
return res;
def segmenter_data_transform(imb, shift=0, rotate=0, scale=0, normalize_pctwise=(20,95), istest=False):
if isinstance(imb, tuple) and len(imb) == 2:
imgs,labels = imb
else:
imgs = imb
# rotate image if training
if rotate>0:
for i in xrange(imgs.shape[0]):
degrees = rotate if istest else np.clip(np.random.normal(),-2,2)*rotate;
imgs[i,0] = scipy.misc.imrotate(imgs[i,0], degrees, interp='bilinear')
if isinstance(imb, tuple):
labels[i,0] = scipy.misc.imrotate(labels[i,0], degrees, interp='bilinear')
#rescale
if scale>0:
assert(scale>0 and scale<=0.5);
for i in xrange(imgs.shape[0]):
sc = 1 + (scale if istest else np.clip(np.random.normal(),-2,2)*scale);
imgs[i,0] = rescale(imgs[i,0],sc);
if isinstance(imb, tuple):
labels[i,0] = rescale(labels[i,0], sc);
#shift
if shift>0 and not istest:
for i in xrange(imgs.shape[0]):
x,y = np.random.randint(-shift,shift,2);
imgs[i,0] = img_shift(imgs[i,0], (x,y));
if isinstance(imb, tuple):
labels[i,0] = img_shift(labels[i,0], (x,y));
imgs = nn.utils.floatX(imgs)/255.0;
for i in xrange(imgs.shape[0]):
pclow, pchigh = normalize_pctwise
if isinstance(pclow,tuple):
pclow = np.random.randint(pclow[0],pclow[1]);
pchigh = np.random.randint(pchigh[0],pchigh[1]);
pl,ph = np.percentile(imgs[i],(pclow, pchigh))
imgs[i] = exposure.rescale_intensity(imgs[i], in_range=(pl, ph));
imgs[i] = 2*imgs[i]/imgs[i].max() - 1.
if isinstance(imb,tuple):
labels = nn.utils.floatX(labels)/255.0;
return imgs,labels
else:
return imgs;
def deconvert(im):
return ((im-im.min())*255/(im.max()-im.min())).astype(np.uint8)
def z_old_optimize_w(calc_map, label_map):
calculated_map = dict((k,v) for k,v in calc_map.iteritems() if k in label_map)
lin_constr = lambda x,a,b: min(20, max(0.5, a*x+b))
error_funcs = [lambda a: np.concatenate([calculate_diffs(calc[0], label_map[ds][1], lin_constr(calc[0], a[0], a[1]), 9)
for ds,calc in calculated_map.iteritems()]),
lambda a: np.concatenate([calculate_diffs(calc[0], label_map[ds][1], 9, lin_constr(calc[0], a[0], a[1]))
for ds,calc in calculated_map.iteritems()]),
lambda a: np.concatenate([calculate_diffs(calc[1], label_map[ds][0], lin_constr(calc[1], a[0], a[1]), 9)
for ds,calc in calculated_map.iteritems()]),
lambda a: np.concatenate([calculate_diffs(calc[1], label_map[ds][0], 9, lin_constr(calc[1], a[0], a[1]))
for ds,calc in calculated_map.iteritems()])]
guesses = [[0.04656, 4.693],
[0.03896, -0.4893],
[0.02458, 1.541],
[0.03392,0.1355]]
parms = []
for func, guess in zip(error_funcs, guesses):
obj, success = scipy.optimize.leastsq(func, guess)
parms.append(obj)
return lambda p, idx: lin_constr(p, parms[idx][0], parms[idx][1])
# given predictions and label map, gives optimal stdev above and below
# for each example
def z_old_optimal_w_funcs(calculated_map, label_map, verbose=False):
optimal_ws_map = dict((ds,[]) for ds in calculated_map if ds in label_map)
for ds in [d for d in calculated_map if d in label_map]:
sys_vol, dias_vol = calculated_map[ds]
edv, esv = label_map[ds]
err_func = [lambda x: calculate_err(sys_vol, esv, x, 10),
lambda x: calculate_err(sys_vol, esv, 10, x),
lambda x: calculate_err(dias_vol, edv, x, 10),
lambda x: calculate_err(dias_vol, edv, 10, x)]
for w_idx in xrange(4):
min_err = 1000000
min_w = 0
for w in xrange(100):
err = err_func[w_idx](w)
if err < min_err:
min_err = err
min_w = w
optimal_ws_map[ds].append(min_w)
if verbose and ds % 5 == 0:
print ds, optimal_ws_map[ds]
preds_arr = np.empty((len(optimal_ws_map), 6), dtype=np.float32)
i=0
for ds in optimal_ws_map:
preds_arr[i] = np.array([calculated_map[ds][0], calculated_map[ds][1],
min(100,optimal_ws_map[ds][0]),
min(100,optimal_ws_map[ds][1]),
min(100,optimal_ws_map[ds][2]),
min(100,optimal_ws_map[ds][3])])
i += 1
degree=1
wsb1 = np.poly1d(np.polyfit(preds_arr[:,0], preds_arr[:,2], degree))
wsa1 = np.poly1d(np.polyfit(preds_arr[:,0], preds_arr[:,3], degree))
wdb1 = np.poly1d(np.polyfit(preds_arr[:,1], preds_arr[:,4], degree))
wda1 = np.poly1d(np.polyfit(preds_arr[:,1], preds_arr[:,5], degree))
wsb = lambda x: min(20, max(0, wsb1(x)))
wsa = lambda x: min(20, max(0, wsa1(x)))
wdb = lambda x: min(20, max(0, wdb1(x)))
wda = lambda x: min(20, max(0, wda1(x)))
return wsb, wsa, wdb, wda
def save_imgcon(cst,img,con=None):#cst (case, slice, time)
if con is None:
con = np.zeros_like(img);
con *= 255;
import os
ddir = c.data_auto_contours + '/size_{}'.format(c.fcn_img_size);
if not os.path.isdir(ddir):
os.mkdir(ddir);
fname = ddir + '/c_{}_s_{}_t_{}.pkl'.format(cst[0],cst[1],cst[2]);
import pickle
with open(fname,'wb') as f:
pickle.dump((img,con),f);
| |
## Solve the Dirichlet problem for the Laplace equation on
## a planar domain where the boundary is a smooth simple
## closed curve with a C^2 parametrisation using the
## boundary integral equation method.
##
## Dale Roberts <dale.o.roberts@gmail.com>
import numpy as np
import scipy.linalg as la
import matplotlib.pyplot as plt
import matplotlib.path as path
import matplotlib.patches as patches
import matplotlib.cm as cm
import matplotlib.tri as tri
## Set the parameters
n_boundary = 128
n_quadrature = n_boundary
n_angles = n_boundary
n_radii = n_boundary
min_radius = 0.01
max_radius = 0.99
plot_contours = False
n_levels = 128
colors = cm.prism
## Now define the boundary surface parametrisation
##
## \[ r(t) = (\xi(t), \eta(t)), 0 \le t \le L \]
##
## with $r \in C^2[0,L]$ and $|r'(t)| \ne 0$ for $0 \le t \le L$.
L = 2.0 * np.pi # angles from 0 to L
# ellipse boundary
a = 1 # half major axis
b = 1 # half minor axis
def r(t):
# ellipse boundary
x = a * np.cos(t)
y = b * np.sin(t)
return x, y
## We solve the integral equation:
##
## \[ - \pi \rho(t) + \int_0^L k(t,s) \rho(s) \, ds = f(t) \]
##
## for $0 \le t \le L$, where the kernel k(t,s) is given by
##
## \[
## k(t,s) =
## \frac{
## \eta'(s)[\xi(t) - \xi(s)] - \xi'(s)[\eta(t) - \eta(s)],
## [\xi(t)- \xi(s)]^2 + [\eta(t) - \eta(s)]^2
## }
## \]
##
## for $s \ne t$, and
##
## \[
## k(t,t) =
## \frac{
## \eta'(s)\xi''(t) - \xi'(s)\eta''(t),
## 2 [\xi'(t)^2 + \eta'(s)^2]
## }
## \]
def k(r,s):
# ellipse boundary
theta = (r+s)/2.0
cost = np.cos(theta)
sint = np.sin(theta)
sig = np.sqrt(a**2.0 * sint**2.0 + b**2.0 * cost**2.0)
return -a * b / (2.0 * sig**2.0)
## and the boundary data $f(t)$ is given by
## $f(t) \equiv f(r(t))$.
def f(r):
return np.sin(10*r)
## Assemble the linear system
##
## \[
##
## - \pi \rho_n(t_i) + h \sum_{j=1}^n k(t_i,t_j)\rho_n(t_j) = f(t_i)
##
## \]
##
## where n = n_boundary, h = L/n, t_j = j h.
# Sample the angles for the boundary discretisation
t, h = np.linspace(0, L, n_boundary, endpoint=False, retstep=True)
# Assemble matrix (dense!)
A = np.zeros((n_boundary,n_boundary))
for i in range(n_boundary):
for j in range(n_boundary):
A[i,j] = k(t[i],t[j])
A = -np.pi * np.eye(n_boundary) + h * A
# Assemble right-hand side
f_n = f(t)
## Solve for the approximation of the kernel $\rho_n$.
rho_n = la.solve(A,f_n)
## We can approximate the kernel $\rho(t)$ at any t using
## interpolation.
def rho_int(s):
# Nystrom interpolation to obtain value at arbitrary s
K = h*np.dot(k(s,t),rho_n)
return 1.0/np.pi * (-f(s) + K)
if n_quadrature != n_boundary:
rho = np.array([rho(tau) for tau in T]).flatten()
else:
rho = rho_n
## We now need to evaluate the double layer potential
##
## u(x,y) = \int_0^L M(x,y,s) \rho(s)\,ds
##
## where
##
## \[
## M(x,y,s) =
## \frac{
## -\eta'(s)[\xi(s) - x] + \xi'(s) [\eta(s) - y],
## [\xi(s) - x]^2 + [\eta(s) - y]^2
## }
## \]
##
## We use a trapezoidal rule.
def M(x,y,s):
coss = np.cos(s)
sins = np.sin(s)
numer = - coss*(coss - x) - sins*(sins - y)
denom = (coss - x)**2.0 + (sins-y)**2.0
return numer/denom
## Sample the angles for the quadrature
T, H = np.linspace(0, L, n_quadrature, endpoint=False, retstep=True)
def u(x,y):
# solution given by trapesoidal rule
return H * np.dot(M(x,y,T),rho)
## We now plot the solution
# First sample the x and y coordinates of the points we want
# to evaluate the solution u(x,y) at
radii = np.linspace(min_radius, max_radius, n_radii)
angles = np.linspace(0, L, n_angles, endpoint=False)
angles = np.repeat(angles[...,np.newaxis], n_radii, axis=1)
angles[:,1::2] += np.pi/n_angles
X = a*(radii*np.cos(angles)).flatten()
Y = b*(radii*np.sin(angles)).flatten()
Z = []
for x,y in zip(X,Y):
Z.append(u(x,y))
Z = np.array(Z)
# unit levelsets for contour plot
levelsets = np.linspace(np.floor(np.min(Z))-1,np.ceil(np.max(Z))+1, n_levels)
# clear plot
plt.clf()
# create mesh
mesh = tri.Triangulation(X, Y)
# Mask off unwanted triangles.
xmid = X[mesh.triangles].mean(axis=1)
ymid = Y[mesh.triangles].mean(axis=1)
mask = np.where(xmid*xmid + ymid*ymid < min_radius*min_radius, 1, 0)
#mesh.set_mask(mask)
plt.gca().set_aspect('equal')
if plot_contours:
plt.tricontourf(mesh, Z, levels=levelsets, cmap=colors)
plt.colorbar()
plt.tricontour(mesh, Z, levels=levelsets, cmap=colors)
else:
plt.tripcolor(mesh, Z)
plt.savefig('sol.png', bb_inches='tight')
#plt.show()
| |
#!/usr/bin/env python
#
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract attribution data from the ohchr UDHR site."""
# This tool generates a .tsv file of attribution data based on information at the ohchr
# site, but first you have to manually extract that data from the html on the site, as
# there's no convenient way to get it. This block of comments describes the process.
#
# The idea is to find out which data on the ohchr site is 'official United Nations' data
# and which is not. The data itself doesn't say, so we need to look at the attributions
# listed on the ohchr.org site.
#
# Note that the data we actually use is not directly from ohchr.org, but from
# www.unicode.org/udhr. That site has cleaned up the data a little and converted it to
# xml format. We are assuming that any data with a matching language code shares the
# original attribution, but we could be wrong. The unicode.org site does not have the
# attribution data in any kind of organized form. Instead, they put a comment at the top
# of each document giving copyright to "The Office of the High Commisioner for Human
# Rights."
#
# Unfortunately, the data at www.ohchr.org is not readily available. At
# http://www.ohchr.org/EN/UDHR/Pages/SearchByLang.aspx you can page through the data using
# the dropdown under 'Search by Translation', but there's no visible url for a single page
# or for the data as a whole.
#
# If you try to view each page and then 'save as...', chrome fetches the url for the page
# it is showing, which returns the first (default) page no matter what data you are
# actually viewing. 'View as source' works, but it provides a formatted page, and if you
# choose 'save as...' from there, you get the source for that formatted page, not the raw
# source. The only way to get the source is to select and copy it from the source view
# into another document.
#
# At this point it makes more sense to just grab the portion of the data we can use
# instead of the whole file. So the process is to use the dropdown to show one of the
# pages of translations and then choose view source for it. Copy the contents of the
# <table> tag that lists the languages and sources into a stub html file. Repeat this for
# each of the six dropdown pages. The stub contains a single table element with the id
# 'ohchr_alldata', after this the table contains the data from all six ohchr pages.
#
# This data is still odd, in particular it nests <tr> and <td> tags. Fortunately
# HTMLParser doesn't care, and we don't need to care. The three pieces of data are the
# 'ohchr code', the 'language name', and the 'source'. The ohchr code is how they link to
# the page for the translation, mostly it is a three-letter language code but sometimes it
# is just whatever their server uses. The 'language name' is more or less an English
# translation of the language, sometimes with notes on script or region or the native name
# of the language, and the attribution is a string. The data is structured so that the
# ohchr code is part of an anchor tag that wraps the language string, and the source is
# part of a span in the following td. There are no other anchor tags or spans in the
# data, so we can just look for these. Separating each set is a close tr tag, so we can
# emit the data then.
#
# The output is a list of records with tab-separated fields: ohchr_code, lang_name, and
# source_name. The udhr index at unicode.org references the 'ohchr' code, so this is how
# we tie the attributions to the data from unicode.org.
import argparse
import codecs
import HTMLParser as html
import re
from nototools import tool_utils
class ParseOhchr(html.HTMLParser):
def __init__(self, trace=False):
html.HTMLParser.__init__(self)
self.trace = trace
self.result_list = []
self.restart()
def restart(self):
self.margin = ''
self.state = 'before_table'
self.tag_stack = []
self.collect_lang = False
self.collect_source = False
self.ohchr_code = ''
self.lang_name = ''
self.source_name = ''
def results(self):
return self.result_list
def indent(self):
self.margin += ' '
def outdent(self):
if not self.margin:
print '*** cannot outdent ***'
else:
self.margin = self.margin[:-2]
def get_attr(self, attr_list, attr_id):
for t in attr_list:
if t[0] == attr_id:
return t[1]
return None
def handle_starttag(self, tag, attrs):
if tag not in ['link', 'meta', 'area', 'img', 'br']:
if self.trace:
print self.margin + tag + '>'
self.tag_stack.append((tag, self.getpos()))
self.indent()
elif self.trace:
print self.margin + tag
if self.state == 'before_table' and tag == 'table':
table_id = self.get_attr(attrs, 'id')
if table_id == 'ohchr_alldata':
self.state = 'in_table'
elif self.state == 'in_table':
if tag == 'tr':
self.ohchr_code = ''
self.lang_name = ''
self.source_name = ''
elif tag == 'a':
a_id = self.get_attr(attrs, 'id')
if a_id and a_id.endswith('_hpLangTitleID'):
ohchr_code = self.get_attr(attrs, 'href')
ix = ohchr_code.rfind('=')
self.ohchr_code = ohchr_code[ix+1:]
self.collect_lang = True
elif tag == 'span':
span_id = self.get_attr(attrs, 'id')
if span_id and span_id.endswith('_lblSourceID'):
self.collect_source = True
elif tag == 'td':
self.collect_lang = False
self.collect_source = False
def handle_endtag(self, tag):
while self.tag_stack:
prev_tag, prev_pos = self.tag_stack.pop()
self.outdent()
if tag != prev_tag:
if self.trace:
print 'no close tag for %s at %s' % (prev_tag, prev_pos)
else:
break
if self.trace:
print self.margin + '<'
if self.state == 'in_table':
if tag == 'table':
self.state = 'after_table'
elif tag == 'tr':
if self.ohchr_code:
self.lang_name = re.sub(r'\s+', ' ', self.lang_name).strip()
self.source_name = re.sub(r'\s+', ' ', self.source_name).strip()
if not self.source_name:
self.source_name = '(no attribution)'
self.result_list.append((self.ohchr_code, self.lang_name, self.source_name))
self.ohchr_code = ''
self.lang_name = ''
self.source_name = ''
def handle_data(self, data):
if self.collect_lang:
self.lang_name += data
elif self.collect_source:
self.source_name += data
pass
def get_ohchr_status(ohchr_code, lang, attrib):
"""Decide the status based on the attribution text.
'original' are in the public domain and need no attribution.
'UN' are official UN translations and should be attributed as such.
'other' are not official UN translations and should be attributed as such."""
if ohchr_code in ['eng', 'frn', 'spn', 'rus', 'chn', 'arz']:
return 'original'
if (attrib.find('United Nations') != -1 or
attrib.find('High Commissioner for Human Rights') != -1):
return 'UN'
return 'other'
def parse_ohchr_html_file(htmlfile, outfile):
parser = ParseOhchr(False)
with open(htmlfile) as f:
parser.feed(f.read())
lines = []
for ohchr_code, lang, attrib in parser.results():
s = get_ohchr_status(ohchr_code, lang, attrib)
lines.append('\t'.join([ohchr_code, s, lang, attrib]))
data = '\n'.join(lines) + '\n'
print 'outfile: "%s"' % outfile
if not outfile or outfile == '-':
print data
else:
with open(outfile, 'w') as f:
f.write(data)
def main():
default_input = '[tools]/third_party/ohchr/ohchr_all.html'
default_output = '[tools]/third_party/ohchr/attributions.tsv'
parser = argparse.ArgumentParser()
parser.add_argument('--src', help='input ohchr html file (default %s)' % default_input,
default=default_input, metavar='file', dest='htmlfile')
parser.add_argument('--dst', help='output tsv file (default %s)' % default_output,
default=default_output, metavar='file', dest='outfile')
args = parser.parse_args()
htmlfile = tool_utils.resolve_path(args.htmlfile)
outfile = tool_utils.resolve_path(args.outfile)
parse_ohchr_html_file(htmlfile, outfile)
if __name__ == '__main__':
main()
| |
import datetime
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models.signals import post_init, post_save
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from symposion.sponsors_pro import SPONSOR_COORDINATORS
from symposion.sponsors_pro.managers import SponsorManager
from symposion.utils.mail import send_email
class SponsorLevel(models.Model):
name = models.CharField(_("name"), max_length=100)
order = models.IntegerField(_("order"), default=0)
cost = models.PositiveIntegerField(_("cost"))
description = models.TextField(_("description"), blank=True, help_text=_("This is private."))
class Meta:
ordering = ["order"]
def __unicode__(self):
return self.name
class Sponsor(models.Model):
applicant = models.OneToOneField(
User, related_name="sponsorship", verbose_name=_("applicant"), null=True
)
name = models.CharField(_("sponsor name"), max_length=100)
external_url = models.URLField(_("external URL"))
annotation = models.TextField(_("annotation"), blank=True)
contact_name = models.CharField(_("contact name"), max_length=100)
contact_email = models.EmailField(_(u"Contact email"))
level = models.ForeignKey(SponsorLevel, verbose_name=_("level"), null=True)
added = models.DateTimeField(_("added"), default=datetime.datetime.now)
active = models.NullBooleanField(_("active"))
# Denormalization # @@@ This'll break if we can ever have more than one logo
sponsor_logo = models.ForeignKey("SponsorBenefit", related_name="+", null=True, blank=True, editable=False)
objects = SponsorManager()
def __unicode__(self):
return self.name
def get_absolute_url(self):
if self.active:
return reverse("sponsor_detail", kwargs={"pk": self.pk})
return reverse("sponsor_info")
@property
def website_logo_url(self):
if not hasattr(self, '_website_logo_url'):
self._website_logo_url = None
benefits = self.sponsor_benefits.filter(benefit__type="weblogo", upload__isnull=False)
if benefits.count():
# @@@ smarter handling of multiple weblogo benefits?
# shouldn't happen
if benefits[0].upload:
self._website_logo_url = benefits[0].upload.url
return self._website_logo_url
@property
def listing_text(self):
if not hasattr(self, '_listing_text'):
self._listing_text = None
benefits = self.sponsor_benefits.filter(benefit__id=8)
if benefits.count():
self._listing_text = benefits[0].text
return self._listing_text
@property
def joblisting_text(self):
if not hasattr(self, '_joblisting_text'):
self._joblisting_text = None
benefits = self.sponsor_benefits.filter(benefit__id=21)
if benefits.count():
self._joblisting_text = benefits[0].text
return self._joblisting_text
@property
def website_logo(self):
if self.sponsor_logo is None:
benefits = self.sponsor_benefits.filter(benefit__type="weblogo", upload__isnull=False)[:1]
if benefits.count():
if benefits[0].upload:
self.sponsor_logo = benefits[0]
self.save()
return self.sponsor_logo.upload
def reset_benefits(self):
"""
Reset all benefits for this sponsor to the defaults for their
sponsorship level.
"""
level = None
try:
level = self.level
except SponsorLevel.DoesNotExist:
pass
allowed_benefits = []
if level:
for benefit_level in level.benefit_levels.all():
# Create all needed benefits if they don't exist already
sponsor_benefit, created = SponsorBenefit.objects.get_or_create(
sponsor=self, benefit=benefit_level.benefit)
# and set to default limits for this level.
sponsor_benefit.max_words = benefit_level.max_words
sponsor_benefit.other_limits = benefit_level.other_limits
# and set to active
sponsor_benefit.active = True
# @@@ We don't call sponsor_benefit.clean here. This means
# that if the sponsorship level for a sponsor is adjusted
# downwards, an existing too-long text entry can remain,
# and won't raise a validation error until it's next
# edited.
sponsor_benefit.save()
allowed_benefits.append(sponsor_benefit.pk)
# Any remaining sponsor benefits that don't normally belong to
# this level are set to inactive
self.sponsor_benefits.exclude(pk__in=allowed_benefits).update(active=False, max_words=None, other_limits="")
def send_coordinator_emails(self):
for user in User.objects.filter(groups__name=SPONSOR_COORDINATORS):
send_email(
[user.email], "sponsor_signup",
context = {"sponsor": self}
)
def _store_initial_level(sender, instance, **kwargs):
if instance:
instance._initial_level_id = instance.level_id
post_init.connect(_store_initial_level, sender=Sponsor)
def _check_level_change(sender, instance, created, **kwargs):
if instance and (created or instance.level_id != instance._initial_level_id):
instance.reset_benefits()
post_save.connect(_check_level_change, sender=Sponsor)
def _send_sponsor_notification_emails(sender, instance, created, **kwargs):
if instance and created:
instance.send_coordinator_emails()
post_save.connect(_send_sponsor_notification_emails, sender=Sponsor)
class Benefit(models.Model):
name = models.CharField(_("name"), max_length=100)
description = models.TextField(_("description"), blank=True)
type = models.CharField(
_("type"),
choices=[
("text", "Text"),
("file", "File"),
("weblogo", "Web Logo"),
("simple", "Simple")
],
max_length=10,
default="simple"
)
def __unicode__(self):
return self.name
class BenefitLevel(models.Model):
benefit = models.ForeignKey(
Benefit,
related_name="benefit_levels",
verbose_name=_("benefit")
)
level = models.ForeignKey(
SponsorLevel,
related_name="benefit_levels",
verbose_name=_("level")
)
max_words = models.PositiveIntegerField(_("max words"), blank=True, null=True)
other_limits = models.CharField(_("other limits"), max_length=200, blank=True)
class Meta:
ordering = ["level"]
def __unicode__(self):
return u"%s - %s" % (self.level, self.benefit)
class SponsorBenefit(models.Model):
sponsor = models.ForeignKey(
Sponsor,
related_name="sponsor_benefits",
verbose_name=_("sponsor")
)
benefit = models.ForeignKey(Benefit,
related_name="sponsor_benefits",
verbose_name=_("benefit")
)
active = models.BooleanField(default=True)
# Limits: will initially be set to defaults from corresponding BenefitLevel
max_words = models.PositiveIntegerField(_("max words"), blank=True, null=True)
other_limits = models.CharField(_("other limits"), max_length=200, blank=True)
# Data: zero or one of these fields will be used, depending on the
# type of the Benefit (text, file, or simple)
text = models.TextField(_("text"), blank=True)
upload = models.FileField(_("file"), blank=True, upload_to="sponsor_files")
class Meta:
ordering = ['-active']
def __unicode__(self):
return u"%s - %s" % (self.sponsor, self.benefit)
def clean(self):
if self.max_words and len(self.text.split()) > self.max_words:
raise ValidationError("Sponsorship level only allows for %s words." % self.max_words)
def data_fields(self):
"""
Return list of data field names which should be editable for
this ``SponsorBenefit``, depending on its ``Benefit`` type.
"""
if self.benefit.type == "file" or self.benefit.type == "weblogo":
return ["upload"]
elif self.benefit.type == "text":
return ["text"]
return []
def _denorm_weblogo(sender, instance, created, **kwargs):
if instance:
if instance.benefit.type == "weblogo" and instance.upload:
sponsor = instance.sponsor
sponsor.sponsor_logo = instance
sponsor.save()
post_save.connect(_denorm_weblogo, sender=SponsorBenefit)
| |
"""
Author: Justin Cappos
Start Date: 22 July 2008
Description:
Refactored logging code that used to be in emulfile
"""
# needed for remove and path.exists
import os
# for Lock
import threading
# I need to rename file so that the checker doesn't complain...
myfile = file
# used to make stdout flush as written This is private to my code
class flush_logger_core:
"""
A file-like class that can be used in lieu of stdout. It always flushes
data after a write.
"""
def __init__(self, fobj):
self.fileobj = fobj
# I do not use these. This is merely for API convenience
self.mode = None
self.name = None
self.softspace = 0
return None
def close(self):
return self.fileobj.close()
def flush(self):
return self.fileobj.flush()
def write(self,writeitem):
self.fileobj.write(writeitem)
self.flush()
def writelines(self,writelist):
self.fileobj.writelines(writelist)
self.flush()
# End of flush_logger class
# helper function
def get_size(fn):
fo = myfile(fn,"r")
data = fo.read()
fo.close()
return len(data)
# used to implement the circular log buffer
class circular_logger_core:
"""
A file-like class that writes to what is conceptually a circular buffer.
After being filled, the buffer is always >=16KB and always flushed after
write...
I accomplish this by actually writing to two files. Once >=16 KB has been
written, the first file will always* be of size 16KB and the second file
fills as the user writes. Once the second file reaches 16KB, it is
moved to overwrite the first file and a new second file is created.
*not always on some systems because moving files isn't atomic
"""
def __init__(self, fnp, mbs = 16 * 1024):
# I do not use these. This is merely for API convenience
self.mode = None
self.name = None
self.softspace = 0
# the size before we "rotate" the logfiles
self.maxbuffersize = mbs # default listed in constructor
# filenames we'll use for the log data
self.filenameprefix = fnp
self.oldfn = fnp+".old"
self.newfn = fnp+".new"
# prevent race conditions when writing
self.writelock = threading.Lock()
# we need to set up the currentsize, activefo and first variables...
if os.path.exists(self.newfn):
# the new file exists.
if os.path.exists(self.oldfn):
# the old file exists too (the common case)
self.currentsize = get_size(self.newfn)
self.activefo = myfile(self.newfn,"a")
self.first = False
# now we have the fileobject and the size set up. We're ready...
return
else:
# a corner case. The old file was removed but the new was not yet
# copied over
os.rename(self.newfn, self.oldfn)
self.currentsize = 0
self.activefo = myfile(self.newfn,"w")
self.first = False
return
else:
if os.path.exists(self.oldfn):
# the old file name exists, so we should start from here
self.currentsize = get_size(self.oldfn)
self.activefo = myfile(self.oldfn,"a")
self.first = True
# now we have the fileobject and the size set up. We're ready...
return
else:
# starting from nothing...
self.currentsize = 0
self.activefo = myfile(self.oldfn,"w")
self.first = True
return
# No-op
def close(self):
return
# No-op, I always flush myself
def flush(self):
return
def write(self,writeitem):
# they / we can always log info (or else what happens on exception?)
# acquire (and release later no matter what)
self.writelock.acquire()
try:
writeamt = self.writedata(writeitem)
finally:
self.writelock.release()
def writelines(self,writelist):
# we / they can always log info (or else what happens on exception?)
# acquire (and release later no matter what)
self.writelock.acquire()
try:
for writeitem in writelist:
self.writedata(writeitem)
finally:
self.writelock.release()
# internal functions (not externally called)
# rotate the log files (make the new the old, and get a new file
def rotate_log(self):
self.activefo.close()
try:
os.rename(self.newfn, self.oldfn)
except WindowsError: # Windows no likey when rename overwrites
os.remove(self.oldfn)
os.rename(self.newfn, self.oldfn)
self.activefo = myfile(self.newfn,"w")
def write_first_log(self):
self.activefo.close()
self.activefo = myfile(self.newfn,"w")
# I could write this in about 1/4 the code, but it would be much harder to
# read.
def writedata(self, data):
# first I'll dispose of the common case
if len(str(data)) + self.currentsize <= self.maxbuffersize:
# didn't fill the buffer
self.activefo.write(str(data))
self.activefo.flush()
self.currentsize = self.currentsize + len(str(data))
return len(str(data))
# now I'll deal with the "longer-but-still-fits case"
if len(str(data))+self.currentsize <= self.maxbuffersize*2:
# finish off this file
splitindex = self.maxbuffersize - self.currentsize
self.activefo.write(str(data[:splitindex]))
self.activefo.flush()
# rotate logs
if self.first:
self.write_first_log()
self.first = False
else:
self.rotate_log()
# now write the last bit of data...
self.activefo.write(str(data[splitindex:]))
self.activefo.flush()
self.currentsize = len(str(data[splitindex:]))
return len(str(data))
# now the "really-long-write case"
# Note, I'm going to avoid doing any extra "alignment" on the data. In
# other words, if they write some multiple of 16KB, and they currently have
# a full file and a file with 7 bytes, they'll end up with a full file and
# a file with 7 bytes
datasize = len(str(data))
# this is what data the new file should contain (the old file will contain
# the 16KB of data before this)
lastchunk = (datasize + self.currentsize) % self.maxbuffersize
# I'm going to write the old file and new file now
#
# Note: I break some of the guarantees about being able to
# recover disk state here
self.activefo.close()
if self.first:
# remove existing files (unnecessary on some platforms)
os.remove(self.oldfn)
else:
# remove existing files (unnecessary on some platforms)
os.remove(self.oldfn)
os.remove(self.newfn)
oldfo = myfile(self.oldfn,"w")
# write the data counting backwards from the end of the file
oldfo.write(data[-(lastchunk+self.maxbuffersize):-lastchunk])
oldfo.close()
# next...
self.activefo = myfile(self.newfn,"w")
# now write the last bit of data...
self.activefo.write(str(data[-lastchunk:]))
self.activefo.flush()
self.currentsize = len(str(data[-lastchunk:]))
# charge them for only the data we actually wrote
return self.currentsize + self.maxbuffersize
# End of circular_logger class
| |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.fluid.optimizer as optimizer
from paddle.fluid.framework import Program, program_guard
import paddle.fluid.core as core
BATCH_SIZE = 1
INPUT_SIZE = 784
CLASS_NUM = 10
FC_SIZE = 40
EPOCH_NUM = 5
LR = 0.001
SEED = 2020
paddle.enable_static()
def static(train_data,
loss_in_switch=True,
use_cuda=False,
use_parallel_exe=False):
startup_program = Program()
main_program = Program()
startup_program.random_seed = SEED
main_program.random_seed = SEED
with program_guard(main_program, startup_program):
def double_fc_net(image):
hidden = layers.fc(
image,
size=FC_SIZE,
act='relu',
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.99)),
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.5)),
name="hidden")
prediction = layers.fc(
hidden,
size=CLASS_NUM,
act='softmax',
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=1.2)),
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.8)),
name="prediction")
return hidden, prediction
def fn_1(opt, avg_loss=None, pred=None, label=None):
if avg_loss is None:
loss = layers.cross_entropy(input=pred, label=label)
avg_loss = layers.mean(loss, name='mean_cross_entropy_loss')
opt.minimize(avg_loss)
return avg_loss
def fn_2(opt, avg_loss=None, pred=None, label=None):
if avg_loss is None:
loss = layers.softmax_with_cross_entropy(
logits=pred, label=label)
avg_loss = layers.mean(loss, name='mean_softmax_loss')
opt.minimize(avg_loss)
return avg_loss
image = fluid.data('image', [BATCH_SIZE, INPUT_SIZE], 'float32')
label = fluid.data('label', [BATCH_SIZE, 1], 'int64')
hidden, prediction = double_fc_net(image)
adam = optimizer.Adam(learning_rate=LR)
sgd = optimizer.SGD(learning_rate=LR)
id = fluid.data('id', [1], 'int32')
two = layers.fill_constant([1], 'int32', 2)
mod_two = layers.elementwise_mod(id, two) == 0
if loss_in_switch:
avg_loss = layers.case([(
mod_two, lambda: fn_1(adam, None, prediction, label))],
lambda: fn_2(sgd, None, prediction, label))
else:
loss_1 = layers.cross_entropy(input=prediction, label=label)
avg_loss_1 = layers.mean(loss_1)
loss_2 = layers.softmax_with_cross_entropy(
logits=prediction, label=label)
avg_loss_2 = layers.mean(loss_2)
avg_loss = layers.case([(mod_two, lambda: fn_1(adam, avg_loss_1))],
lambda: fn_2(sgd, avg_loss_2))
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_program)
for epoch in range(EPOCH_NUM):
feed_image, feed_label = train_data[epoch]
fetch_list = [hidden, prediction, avg_loss]
feed = {
'image': feed_image,
'label': feed_label,
'id': np.array([epoch]).astype('int32')
}
out = exe.run(main_program, feed=feed, fetch_list=fetch_list)
out_hidden, out_pred, loss = out
return out_hidden, out_pred, loss
class DygraphLayer(fluid.dygraph.Layer):
def __init__(self):
super(DygraphLayer, self).__init__()
self.fc_1 = fluid.dygraph.nn.Linear(
INPUT_SIZE,
FC_SIZE,
act='relu',
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(
value=0.99)),
bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(
value=0.5)), )
self.fc_2 = fluid.dygraph.nn.Linear(
FC_SIZE,
CLASS_NUM,
act='softmax',
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(
value=1.2)),
bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(
value=0.8)))
def forward(self, inputs):
hidden = self.fc_1(inputs)
prediction = self.fc_2(hidden)
return hidden, prediction
def dynamic(train_data, use_cuda=False, use_parallel_exe=False):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
with fluid.dygraph.guard(place):
fluid.default_startup_program().random_seed = SEED
fluid.default_main_program().random_seed = SEED
dy_layer = DygraphLayer()
adam = fluid.optimizer.Adam(
learning_rate=LR, parameter_list=dy_layer.parameters())
sgd = fluid.optimizer.SGD(learning_rate=LR,
parameter_list=dy_layer.parameters())
for epoch in range(EPOCH_NUM):
image_data, label = train_data[epoch]
var_input = fluid.dygraph.to_variable(image_data)
var_label = fluid.dygraph.to_variable(label)
hidden, prediction = dy_layer(var_input)
if epoch % 2 == 0:
cross_entropy_loss = layers.cross_entropy(prediction, var_label)
loss = layers.mean(cross_entropy_loss)
loss.backward()
adam.minimize(loss)
else:
softmax_loss = layers.softmax_with_cross_entropy(prediction,
var_label)
loss = layers.mean(softmax_loss)
loss.backward()
sgd.minimize(loss)
dy_layer.clear_gradients()
return hidden.numpy(), prediction.numpy(), loss.numpy()
class TestMultiTask(unittest.TestCase):
'''
Compare results of static graph and dynamic graph.
Todo(liym27): add parallel GPU train.
'''
def random_input(self,
seed,
image_shape=[BATCH_SIZE, INPUT_SIZE],
label_shape=[BATCH_SIZE, 1]):
np.random.seed(seed)
image_np = np.random.random(size=image_shape).astype('float32')
np.random.seed(seed)
label_np = np.random.randint(
low=0, high=CLASS_NUM - 1, size=label_shape).astype('int64')
return image_np, label_np
def init_train_data(self):
self.train_data = []
for epoch in range(EPOCH_NUM):
self.train_data.append(self.random_input(epoch))
def test_optimzier_in_switch(self):
self.init_train_data()
use_cuda = core.is_compiled_with_cuda()
hidden_2, pre_2, loss_2 = dynamic(self.train_data, use_cuda)
for loss_in_switch in [True, False]:
hidden_1, pre_1, loss_1 = static(self.train_data, loss_in_switch,
use_cuda)
self.assertTrue(
np.allclose(hidden_1, hidden_2),
msg='static hidden is {}\ndynamic hidden is {}'.format(
hidden_1, hidden_2))
self.assertTrue(
np.allclose(pre_1, pre_2),
msg='static prediction is {}\ndynamic prediction is {}'.format(
pre_1, pre_2))
self.assertTrue(
np.allclose(loss_1, loss_2),
msg='static loss is {}\ndynamic loss is {}'.format(loss_1,
loss_2))
class TestMultiOptimizersMultiCardsError(unittest.TestCase):
def test_error(self):
startup_program = Program()
main_program = Program()
use_cuda = core.is_compiled_with_cuda()
with program_guard(main_program, startup_program):
def fn_1(opt, avg_loss):
opt.minimize(avg_loss)
def fn_2(opt, avg_loss):
opt.minimize(avg_loss)
x = fluid.layers.data("X", [10], 'float32')
hidden = layers.fc(x, 5)
avg_loss = layers.mean(hidden)
adam = optimizer.Adam(learning_rate=LR)
sgd = optimizer.SGD(learning_rate=LR)
cond = layers.fill_constant([1], 'bool', True)
layers.case([(cond, lambda: fn_1(adam, avg_loss))],
lambda: fn_2(sgd, avg_loss))
cpu_place = fluid.CPUPlace()
cuda_place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
for place in [cpu_place, cuda_place]:
exe = fluid.Executor(place)
exe.run(startup_program)
np.random.seed(SEED)
# NOTE(liym27):
# This test needs to run in multi cards to test NotImplementedError.
# Here, move this test from RUN_TYPE=DIST in tests/unittests/CMakeList.txt,
# to use multi cards ** only on CPU ** not GPU to reduce CI time.
os.environ['CPU_NUM'] = str(2)
pe_exe = fluid.ParallelExecutor(
use_cuda=use_cuda,
main_program=main_program,
loss_name=avg_loss.name)
num_devices = pe_exe.device_count
def not_implemented_error():
pe_exe.run(feed={
'X': np.random.random(size=[64, 10]).astype('float32'),
},
fetch_list=[avg_loss.name])
if num_devices > 1:
self.assertRaises(NotImplementedError, not_implemented_error)
if __name__ == '__main__':
unittest.main()
| |
import os
import pytest
import ray
import re
from filelock import FileLock
from ray._private.test_utils import run_string_as_driver, SignalActor
from ray import workflow
from ray.tests.conftest import * # noqa
from unittest.mock import patch
def test_init_twice(call_ray_start, reset_workflow, tmp_path):
workflow.init()
with pytest.raises(RuntimeError):
workflow.init(str(tmp_path))
driver_script = """
from ray import workflow
if __name__ == "__main__":
workflow.init()
"""
def test_init_twice_2(call_ray_start, reset_workflow, tmp_path):
with patch.dict(os.environ, {"RAY_ADDRESS": call_ray_start}):
run_string_as_driver(driver_script)
with pytest.raises(
RuntimeError, match=".*different from the workflow manager.*"
):
workflow.init(str(tmp_path))
@pytest.mark.parametrize(
"workflow_start_regular",
[
{
"num_cpus": 2,
}
],
indirect=True,
)
def test_step_resources(workflow_start_regular, tmp_path):
lock_path = str(tmp_path / "lock")
# We use signal actor here because we can't guarantee the order of tasks
# sent from worker to raylet.
signal_actor = SignalActor.remote()
@workflow.step
def step_run():
ray.wait([signal_actor.send.remote()])
with FileLock(lock_path):
return None
@ray.remote(num_cpus=1)
def remote_run():
return None
lock = FileLock(lock_path)
lock.acquire()
ret = step_run.options(num_cpus=2).step().run_async()
ray.wait([signal_actor.wait.remote()])
obj = remote_run.remote()
with pytest.raises(ray.exceptions.GetTimeoutError):
ray.get(obj, timeout=2)
lock.release()
assert ray.get(ret) is None
assert ray.get(obj) is None
def test_get_output_1(workflow_start_regular, tmp_path):
@workflow.step
def simple(v):
return v
assert 0 == simple.step(0).run("simple")
assert 0 == ray.get(workflow.get_output("simple"))
def test_get_output_2(workflow_start_regular, tmp_path):
lock_path = str(tmp_path / "lock")
lock = FileLock(lock_path)
@workflow.step
def simple(v):
with FileLock(lock_path):
return v
lock.acquire()
obj = simple.step(0).run_async("simple")
obj2 = workflow.get_output("simple")
lock.release()
assert ray.get([obj, obj2]) == [0, 0]
def test_get_output_3(workflow_start_regular, tmp_path):
cnt_file = tmp_path / "counter"
cnt_file.write_text("0")
error_flag = tmp_path / "error"
error_flag.touch()
@workflow.step
def incr():
v = int(cnt_file.read_text())
cnt_file.write_text(str(v + 1))
if error_flag.exists():
raise ValueError()
return 10
with pytest.raises(ray.exceptions.RaySystemError):
incr.options(max_retries=1).step().run("incr")
assert cnt_file.read_text() == "1"
with pytest.raises(ray.exceptions.RaySystemError):
ray.get(workflow.get_output("incr"))
assert cnt_file.read_text() == "1"
error_flag.unlink()
with pytest.raises(ray.exceptions.RaySystemError):
ray.get(workflow.get_output("incr"))
assert ray.get(workflow.resume("incr")) == 10
def test_get_named_step_output_finished(workflow_start_regular, tmp_path):
@workflow.step
def double(v):
return 2 * v
# Get the result from named step after workflow finished
assert 4 == double.options(name="outer").step(
double.options(name="inner").step(1)
).run("double")
assert ray.get(workflow.get_output("double", name="inner")) == 2
assert ray.get(workflow.get_output("double", name="outer")) == 4
def test_get_named_step_output_running(workflow_start_regular, tmp_path):
@workflow.step
def double(v, lock=None):
if lock is not None:
with FileLock(lock_path):
return 2 * v
else:
return 2 * v
# Get the result from named step after workflow before it's finished
lock_path = str(tmp_path / "lock")
lock = FileLock(lock_path)
lock.acquire()
output = (
double.options(name="outer")
.step(double.options(name="inner").step(1, lock_path), lock_path)
.run_async("double-2")
)
inner = workflow.get_output("double-2", name="inner")
outer = workflow.get_output("double-2", name="outer")
@ray.remote
def wait(obj_ref):
return ray.get(obj_ref[0])
# Make sure nothing is finished.
ready, waiting = ray.wait(
[wait.remote([output]), wait.remote([inner]), wait.remote([outer])], timeout=1
)
assert 0 == len(ready)
assert 3 == len(waiting)
# Once job finished, we'll be able to get the result.
lock.release()
assert 4 == ray.get(output)
# Here sometimes inner will not be generated when we call
# run_async. So there is a race condition here.
try:
v = ray.get(inner)
except Exception:
v = None
if v is not None:
assert 2 == v
assert 4 == ray.get(outer)
inner = workflow.get_output("double-2", name="inner")
outer = workflow.get_output("double-2", name="outer")
assert 2 == ray.get(inner)
assert 4 == ray.get(outer)
def test_get_named_step_output_error(workflow_start_regular, tmp_path):
@workflow.step
def double(v, error):
if error:
raise Exception()
return v + v
# Force it to fail for the outer step
with pytest.raises(Exception):
double.options(name="outer").step(
double.options(name="inner").step(1, False), True
).run("double")
# For the inner step, it should have already been executed.
assert 2 == ray.get(workflow.get_output("double", name="inner"))
outer = workflow.get_output("double", name="outer")
with pytest.raises(Exception):
ray.get(outer)
def test_get_named_step_default(workflow_start_regular, tmp_path):
@workflow.step
def factorial(n, r=1):
if n == 1:
return r
return factorial.step(n - 1, r * n)
import math
assert math.factorial(5) == factorial.step(5).run("factorial")
for i in range(5):
step_name = (
"test_basic_workflows_2." "test_get_named_step_default.locals.factorial"
)
if i != 0:
step_name += "_" + str(i)
# All outputs will be 120
assert math.factorial(5) == ray.get(
workflow.get_output("factorial", name=step_name)
)
def test_get_named_step_duplicate(workflow_start_regular):
@workflow.step(name="f")
def f(n, dep):
return n
inner = f.step(10, None)
outer = f.step(20, inner)
assert 20 == outer.run("duplicate")
# The outer will be checkpointed first. So there is no suffix for the name
assert ray.get(workflow.get_output("duplicate", name="f")) == 20
# The inner will be checkpointed after the outer. And there is a duplicate
# for the name. suffix _1 is added automatically
assert ray.get(workflow.get_output("duplicate", name="f_1")) == 10
def test_no_init(shutdown_only):
@workflow.step
def f():
pass
fail_wf_init_error_msg = re.escape(
"`workflow.init()` must be called prior to using " "the workflows API."
)
with pytest.raises(RuntimeError, match=fail_wf_init_error_msg):
f.step().run()
with pytest.raises(RuntimeError, match=fail_wf_init_error_msg):
workflow.list_all()
with pytest.raises(RuntimeError, match=fail_wf_init_error_msg):
workflow.resume_all()
with pytest.raises(RuntimeError, match=fail_wf_init_error_msg):
workflow.cancel("wf")
with pytest.raises(RuntimeError, match=fail_wf_init_error_msg):
workflow.get_actor("wf")
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| |
"""Hierarchical Agglomerative Clustering
These routines perform some hierarchical agglomerative clustering of some
input data.
Authors : Vincent Michel, Bertrand Thirion, Alexandre Gramfort,
Gael Varoquaux
License: BSD 3 clause
"""
from heapq import heapify, heappop, heappush, heappushpop
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClusterMixin
from ..externals.joblib import Memory
from ..externals import six
from ..metrics.pairwise import paired_distances, pairwise_distances
from ..utils import check_array
from ..utils.sparsetools import connected_components
from . import _hierarchical
from ._feature_agglomeration import AgglomerationTransform
from ..utils.fast_dict import IntFloatDict
from ..externals.six.moves import xrange
###############################################################################
# For non fully-connected graphs
def _fix_connectivity(X, connectivity, n_components=None,
affinity="euclidean"):
"""
Fixes the connectivity matrix
- copies it
- makes it symmetric
- converts it to LIL if necessary
- completes it if necessary
"""
n_samples = X.shape[0]
if (connectivity.shape[0] != n_samples or
connectivity.shape[1] != n_samples):
raise ValueError('Wrong shape for connectivity matrix: %s '
'when X is %s' % (connectivity.shape, X.shape))
# Make the connectivity matrix symmetric:
connectivity = connectivity + connectivity.T
# Convert connectivity matrix to LIL
if not sparse.isspmatrix_lil(connectivity):
if not sparse.isspmatrix(connectivity):
connectivity = sparse.lil_matrix(connectivity)
else:
connectivity = connectivity.tolil()
# Compute the number of nodes
n_components, labels = connected_components(connectivity)
if n_components > 1:
warnings.warn("the number of connected components of the "
"connectivity matrix is %d > 1. Completing it to avoid "
"stopping the tree early." % n_components,
stacklevel=2)
# XXX: Can we do without completing the matrix?
for i in xrange(n_components):
idx_i = np.where(labels == i)[0]
Xi = X[idx_i]
for j in xrange(i):
idx_j = np.where(labels == j)[0]
Xj = X[idx_j]
D = pairwise_distances(Xi, Xj, metric=affinity)
ii, jj = np.where(D == np.min(D))
ii = ii[0]
jj = jj[0]
connectivity[idx_i[ii], idx_j[jj]] = True
connectivity[idx_j[jj], idx_i[ii]] = True
return connectivity, n_components
###############################################################################
# Hierarchical tree building functions
def ward_tree(X, connectivity=None, n_clusters=None, return_distance=False):
"""Ward clustering based on a Feature matrix.
Recursively merges the pair of clusters that minimally increases
within-cluster variance.
The inertia matrix uses a Heapq-based representation.
This is the structured version, that takes into account some topological
structure between samples.
Read more in the :ref:`User Guide <hierarchical_clustering>`.
Parameters
----------
X : array, shape (n_samples, n_features)
feature matrix representing n_samples samples to be clustered
connectivity : sparse matrix (optional).
connectivity matrix. Defines for each sample the neighboring samples
following a given structure of the data. The matrix is assumed to
be symmetric and only the upper triangular half is used.
Default is None, i.e, the Ward algorithm is unstructured.
n_clusters : int (optional)
Stop early the construction of the tree at n_clusters. This is
useful to decrease computation time if the number of clusters is
not small compared to the number of samples. In this case, the
complete tree is not computed, thus the 'children' output is of
limited use, and the 'parents' output should rather be used.
This option is valid only when specifying a connectivity matrix.
return_distance : bool (optional)
If True, return the distance between the clusters.
Returns
-------
children : 2D array, shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`
n_components : int
The number of connected components in the graph.
n_leaves : int
The number of leaves in the tree
parents : 1D array, shape (n_nodes, ) or None
The parent of each node. Only returned when a connectivity matrix
is specified, elsewhere 'None' is returned.
distances : 1D array, shape (n_nodes-1, )
Only returned if return_distance is set to True (for compatibility).
The distances between the centers of the nodes. `distances[i]`
corresponds to a weighted euclidean distance between
the nodes `children[i, 1]` and `children[i, 2]`. If the nodes refer to
leaves of the tree, then `distances[i]` is their unweighted euclidean
distance. Distances are updated in the following way
(from scipy.hierarchy.linkage):
The new entry :math:`d(u,v)` is computed as follows,
.. math::
d(u,v) = \\sqrt{\\frac{|v|+|s|}
{T}d(v,s)^2
+ \\frac{|v|+|t|}
{T}d(v,t)^2
- \\frac{|v|}
{T}d(s,t)^2}
where :math:`u` is the newly joined cluster consisting of
clusters :math:`s` and :math:`t`, :math:`v` is an unused
cluster in the forest, :math:`T=|v|+|s|+|t|`, and
:math:`|*|` is the cardinality of its argument. This is also
known as the incremental algorithm.
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (-1, 1))
n_samples, n_features = X.shape
if connectivity is None:
from scipy.cluster import hierarchy # imports PIL
if n_clusters is not None:
warnings.warn('Partial build of the tree is implemented '
'only for structured clustering (i.e. with '
'explicit connectivity). The algorithm '
'will build the full tree and only '
'retain the lower branches required '
'for the specified number of clusters',
stacklevel=2)
out = hierarchy.ward(X)
children_ = out[:, :2].astype(np.intp)
if return_distance:
distances = out[:, 2]
return children_, 1, n_samples, None, distances
else:
return children_, 1, n_samples, None
connectivity, n_components = _fix_connectivity(X, connectivity)
if n_clusters is None:
n_nodes = 2 * n_samples - 1
else:
if n_clusters > n_samples:
raise ValueError('Cannot provide more clusters than samples. '
'%i n_clusters was asked, and there are %i samples.'
% (n_clusters, n_samples))
n_nodes = 2 * n_samples - n_clusters
# create inertia matrix
coord_row = []
coord_col = []
A = []
for ind, row in enumerate(connectivity.rows):
A.append(row)
# We keep only the upper triangular for the moments
# Generator expressions are faster than arrays on the following
row = [i for i in row if i < ind]
coord_row.extend(len(row) * [ind, ])
coord_col.extend(row)
coord_row = np.array(coord_row, dtype=np.intp, order='C')
coord_col = np.array(coord_col, dtype=np.intp, order='C')
# build moments as a list
moments_1 = np.zeros(n_nodes, order='C')
moments_1[:n_samples] = 1
moments_2 = np.zeros((n_nodes, n_features), order='C')
moments_2[:n_samples] = X
inertia = np.empty(len(coord_row), dtype=np.float64, order='C')
_hierarchical.compute_ward_dist(moments_1, moments_2, coord_row, coord_col,
inertia)
inertia = list(six.moves.zip(inertia, coord_row, coord_col))
heapify(inertia)
# prepare the main fields
parent = np.arange(n_nodes, dtype=np.intp)
used_node = np.ones(n_nodes, dtype=bool)
children = []
if return_distance:
distances = np.empty(n_nodes - n_samples)
not_visited = np.empty(n_nodes, dtype=np.int8, order='C')
# recursive merge loop
for k in range(n_samples, n_nodes):
# identify the merge
while True:
inert, i, j = heappop(inertia)
if used_node[i] and used_node[j]:
break
parent[i], parent[j] = k, k
children.append((i, j))
used_node[i] = used_node[j] = False
if return_distance: # store inertia value
distances[k - n_samples] = inert
# update the moments
moments_1[k] = moments_1[i] + moments_1[j]
moments_2[k] = moments_2[i] + moments_2[j]
# update the structure matrix A and the inertia matrix
coord_col = []
not_visited.fill(1)
not_visited[k] = 0
_hierarchical._get_parents(A[i], coord_col, parent, not_visited)
_hierarchical._get_parents(A[j], coord_col, parent, not_visited)
# List comprehension is faster than a for loop
[A[l].append(k) for l in coord_col]
A.append(coord_col)
coord_col = np.array(coord_col, dtype=np.intp, order='C')
coord_row = np.empty(coord_col.shape, dtype=np.intp, order='C')
coord_row.fill(k)
n_additions = len(coord_row)
ini = np.empty(n_additions, dtype=np.float64, order='C')
_hierarchical.compute_ward_dist(moments_1, moments_2,
coord_row, coord_col, ini)
# List comprehension is faster than a for loop
[heappush(inertia, (ini[idx], k, coord_col[idx]))
for idx in range(n_additions)]
# Separate leaves in children (empty lists up to now)
n_leaves = n_samples
# sort children to get consistent output with unstructured version
children = [c[::-1] for c in children]
children = np.array(children) # return numpy array for efficient caching
if return_distance:
# 2 is scaling factor to compare w/ unstructured version
distances = np.sqrt(2. * distances)
return children, n_components, n_leaves, parent, distances
else:
return children, n_components, n_leaves, parent
# average and complete linkage
def linkage_tree(X, connectivity=None, n_components=None,
n_clusters=None, linkage='complete', affinity="euclidean",
return_distance=False):
"""Linkage agglomerative clustering based on a Feature matrix.
The inertia matrix uses a Heapq-based representation.
This is the structured version, that takes into account some topological
structure between samples.
Read more in the :ref:`User Guide <hierarchical_clustering>`.
Parameters
----------
X : array, shape (n_samples, n_features)
feature matrix representing n_samples samples to be clustered
connectivity : sparse matrix (optional).
connectivity matrix. Defines for each sample the neighboring samples
following a given structure of the data. The matrix is assumed to
be symmetric and only the upper triangular half is used.
Default is None, i.e, the Ward algorithm is unstructured.
n_clusters : int (optional)
Stop early the construction of the tree at n_clusters. This is
useful to decrease computation time if the number of clusters is
not small compared to the number of samples. In this case, the
complete tree is not computed, thus the 'children' output is of
limited use, and the 'parents' output should rather be used.
This option is valid only when specifying a connectivity matrix.
linkage : {"average", "complete"}, optional, default: "complete"
Which linkage criteria to use. The linkage criterion determines which
distance to use between sets of observation.
- average uses the average of the distances of each observation of
the two sets
- complete or maximum linkage uses the maximum distances between
all observations of the two sets.
affinity : string or callable, optional, default: "euclidean".
which metric to use. Can be "euclidean", "manhattan", or any
distance know to paired distance (see metric.pairwise)
return_distance : bool, default False
whether or not to return the distances between the clusters.
Returns
-------
children : 2D array, shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`
n_components : int
The number of connected components in the graph.
n_leaves : int
The number of leaves in the tree.
parents : 1D array, shape (n_nodes, ) or None
The parent of each node. Only returned when a connectivity matrix
is specified, elsewhere 'None' is returned.
distances : ndarray, shape (n_nodes-1,)
Returned when return_distance is set to True.
distances[i] refers to the distance between children[i][0] and
children[i][1] when they are merged.
See also
--------
ward_tree : hierarchical clustering with ward linkage
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (-1, 1))
n_samples, n_features = X.shape
linkage_choices = {'complete': _hierarchical.max_merge,
'average': _hierarchical.average_merge}
try:
join_func = linkage_choices[linkage]
except KeyError:
raise ValueError(
'Unknown linkage option, linkage should be one '
'of %s, but %s was given' % (linkage_choices.keys(), linkage))
if connectivity is None:
from scipy.cluster import hierarchy # imports PIL
if n_clusters is not None:
warnings.warn('Partial build of the tree is implemented '
'only for structured clustering (i.e. with '
'explicit connectivity). The algorithm '
'will build the full tree and only '
'retain the lower branches required '
'for the specified number of clusters',
stacklevel=2)
if affinity == 'precomputed':
# for the linkage function of hierarchy to work on precomputed
# data, provide as first argument an ndarray of the shape returned
# by pdist: it is a flat array containing the upper triangular of
# the distance matrix.
i, j = np.triu_indices(X.shape[0], k=1)
X = X[i, j]
elif affinity == 'l2':
# Translate to something understood by scipy
affinity = 'euclidean'
elif affinity in ('l1', 'manhattan'):
affinity = 'cityblock'
elif callable(affinity):
X = affinity(X)
i, j = np.triu_indices(X.shape[0], k=1)
X = X[i, j]
out = hierarchy.linkage(X, method=linkage, metric=affinity)
children_ = out[:, :2].astype(np.int)
if return_distance:
distances = out[:, 2]
return children_, 1, n_samples, None, distances
return children_, 1, n_samples, None
connectivity, n_components = _fix_connectivity(X, connectivity)
connectivity = connectivity.tocoo()
# Put the diagonal to zero
diag_mask = (connectivity.row != connectivity.col)
connectivity.row = connectivity.row[diag_mask]
connectivity.col = connectivity.col[diag_mask]
connectivity.data = connectivity.data[diag_mask]
del diag_mask
if affinity == 'precomputed':
distances = X[connectivity.row, connectivity.col]
else:
# FIXME We compute all the distances, while we could have only computed
# the "interesting" distances
distances = paired_distances(X[connectivity.row],
X[connectivity.col],
metric=affinity)
connectivity.data = distances
if n_clusters is None:
n_nodes = 2 * n_samples - 1
else:
assert n_clusters <= n_samples
n_nodes = 2 * n_samples - n_clusters
if return_distance:
distances = np.empty(n_nodes - n_samples)
# create inertia heap and connection matrix
A = np.empty(n_nodes, dtype=object)
inertia = list()
# LIL seems to the best format to access the rows quickly,
# without the numpy overhead of slicing CSR indices and data.
connectivity = connectivity.tolil()
# We are storing the graph in a list of IntFloatDict
for ind, (data, row) in enumerate(zip(connectivity.data,
connectivity.rows)):
A[ind] = IntFloatDict(np.asarray(row, dtype=np.intp),
np.asarray(data, dtype=np.float64))
# We keep only the upper triangular for the heap
# Generator expressions are faster than arrays on the following
inertia.extend(_hierarchical.WeightedEdge(d, ind, r)
for r, d in zip(row, data) if r < ind)
del connectivity
heapify(inertia)
# prepare the main fields
parent = np.arange(n_nodes, dtype=np.intp)
used_node = np.ones(n_nodes, dtype=np.intp)
children = []
# recursive merge loop
for k in xrange(n_samples, n_nodes):
# identify the merge
while True:
edge = heappop(inertia)
if used_node[edge.a] and used_node[edge.b]:
break
i = edge.a
j = edge.b
if return_distance:
# store distances
distances[k - n_samples] = edge.weight
parent[i] = parent[j] = k
children.append((i, j))
# Keep track of the number of elements per cluster
n_i = used_node[i]
n_j = used_node[j]
used_node[k] = n_i + n_j
used_node[i] = used_node[j] = False
# update the structure matrix A and the inertia matrix
# a clever 'min', or 'max' operation between A[i] and A[j]
coord_col = join_func(A[i], A[j], used_node, n_i, n_j)
for l, d in coord_col:
A[l].append(k, d)
# Here we use the information from coord_col (containing the
# distances) to update the heap
heappush(inertia, _hierarchical.WeightedEdge(d, k, l))
A[k] = coord_col
# Clear A[i] and A[j] to save memory
A[i] = A[j] = 0
# Separate leaves in children (empty lists up to now)
n_leaves = n_samples
# # return numpy array for efficient caching
children = np.array(children)[:, ::-1]
if return_distance:
return children, n_components, n_leaves, parent, distances
return children, n_components, n_leaves, parent
# Matching names to tree-building strategies
def _complete_linkage(*args, **kwargs):
kwargs['linkage'] = 'complete'
return linkage_tree(*args, **kwargs)
def _average_linkage(*args, **kwargs):
kwargs['linkage'] = 'average'
return linkage_tree(*args, **kwargs)
_TREE_BUILDERS = dict(
ward=ward_tree,
complete=_complete_linkage,
average=_average_linkage)
###############################################################################
# Functions for cutting hierarchical clustering tree
def _hc_cut(n_clusters, children, n_leaves):
"""Function cutting the ward tree for a given number of clusters.
Parameters
----------
n_clusters : int or ndarray
The number of clusters to form.
children : 2D array, shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`
n_leaves : int
Number of leaves of the tree.
Returns
-------
labels : array [n_samples]
cluster labels for each point
"""
if n_clusters > n_leaves:
raise ValueError('Cannot extract more clusters than samples: '
'%s clusters where given for a tree with %s leaves.'
% (n_clusters, n_leaves))
# In this function, we store nodes as a heap to avoid recomputing
# the max of the nodes: the first element is always the smallest
# We use negated indices as heaps work on smallest elements, and we
# are interested in largest elements
# children[-1] is the root of the tree
nodes = [-(max(children[-1]) + 1)]
for i in xrange(n_clusters - 1):
# As we have a heap, nodes[0] is the smallest element
these_children = children[-nodes[0] - n_leaves]
# Insert the 2 children and remove the largest node
heappush(nodes, -these_children[0])
heappushpop(nodes, -these_children[1])
label = np.zeros(n_leaves, dtype=np.intp)
for i, node in enumerate(nodes):
label[_hierarchical._hc_get_descendent(-node, children, n_leaves)] = i
return label
###############################################################################
class AgglomerativeClustering(BaseEstimator, ClusterMixin):
"""
Agglomerative Clustering
Recursively merges the pair of clusters that minimally increases
a given linkage distance.
Read more in the :ref:`User Guide <hierarchical_clustering>`.
Parameters
----------
n_clusters : int, default=2
The number of clusters to find.
connectivity : array-like or callable, optional
Connectivity matrix. Defines for each sample the neighboring
samples following a given structure of the data.
This can be a connectivity matrix itself or a callable that transforms
the data into a connectivity matrix, such as derived from
kneighbors_graph. Default is None, i.e, the
hierarchical clustering algorithm is unstructured.
affinity : string or callable, default: "euclidean"
Metric used to compute the linkage. Can be "euclidean", "l1", "l2",
"manhattan", "cosine", or 'precomputed'.
If linkage is "ward", only "euclidean" is accepted.
memory : Instance of joblib.Memory or string (optional)
Used to cache the output of the computation of the tree.
By default, no caching is done. If a string is given, it is the
path to the caching directory.
compute_full_tree : bool or 'auto' (optional)
Stop early the construction of the tree at n_clusters. This is
useful to decrease computation time if the number of clusters is
not small compared to the number of samples. This option is
useful only when specifying a connectivity matrix. Note also that
when varying the number of clusters and using caching, it may
be advantageous to compute the full tree.
linkage : {"ward", "complete", "average"}, optional, default: "ward"
Which linkage criterion to use. The linkage criterion determines which
distance to use between sets of observation. The algorithm will merge
the pairs of cluster that minimize this criterion.
- ward minimizes the variance of the clusters being merged.
- average uses the average of the distances of each observation of
the two sets.
- complete or maximum linkage uses the maximum distances between
all observations of the two sets.
pooling_func : callable, default=np.mean
This combines the values of agglomerated features into a single
value, and should accept an array of shape [M, N] and the keyword
argument ``axis=1``, and reduce it to an array of size [M].
Attributes
----------
labels_ : array [n_samples]
cluster labels for each point
n_leaves_ : int
Number of leaves in the hierarchical tree.
n_components_ : int
The estimated number of connected components in the graph.
children_ : array-like, shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`
"""
def __init__(self, n_clusters=2, affinity="euclidean",
memory=Memory(cachedir=None, verbose=0),
connectivity=None, compute_full_tree='auto',
linkage='ward', pooling_func=np.mean):
self.n_clusters = n_clusters
self.memory = memory
self.connectivity = connectivity
self.compute_full_tree = compute_full_tree
self.linkage = linkage
self.affinity = affinity
self.pooling_func = pooling_func
def fit(self, X, y=None):
"""Fit the hierarchical clustering on the data
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The samples a.k.a. observations.
Returns
-------
self
"""
X = check_array(X, ensure_min_samples=2, estimator=self)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory, verbose=0)
if self.n_clusters <= 0:
raise ValueError("n_clusters should be an integer greater than 0."
" %s was provided." % str(self.n_clusters))
if self.linkage == "ward" and self.affinity != "euclidean":
raise ValueError("%s was provided as affinity. Ward can only "
"work with euclidean distances." %
(self.affinity, ))
if self.linkage not in _TREE_BUILDERS:
raise ValueError("Unknown linkage type %s."
"Valid options are %s" % (self.linkage,
_TREE_BUILDERS.keys()))
tree_builder = _TREE_BUILDERS[self.linkage]
connectivity = self.connectivity
if self.connectivity is not None:
if callable(self.connectivity):
connectivity = self.connectivity(X)
connectivity = check_array(
connectivity, accept_sparse=['csr', 'coo', 'lil'])
n_samples = len(X)
compute_full_tree = self.compute_full_tree
if self.connectivity is None:
compute_full_tree = True
if compute_full_tree == 'auto':
# Early stopping is likely to give a speed up only for
# a large number of clusters. The actual threshold
# implemented here is heuristic
compute_full_tree = self.n_clusters < max(100, .02 * n_samples)
n_clusters = self.n_clusters
if compute_full_tree:
n_clusters = None
# Construct the tree
kwargs = {}
if self.linkage != 'ward':
kwargs['linkage'] = self.linkage
kwargs['affinity'] = self.affinity
self.children_, self.n_components_, self.n_leaves_, parents = \
memory.cache(tree_builder)(X, connectivity,
n_clusters=n_clusters,
**kwargs)
# Cut the tree
if compute_full_tree:
self.labels_ = _hc_cut(self.n_clusters, self.children_,
self.n_leaves_)
else:
labels = _hierarchical.hc_get_heads(parents, copy=False)
# copy to avoid holding a reference on the original array
labels = np.copy(labels[:n_samples])
# Reassign cluster numbers
self.labels_ = np.searchsorted(np.unique(labels), labels)
return self
class FeatureAgglomeration(AgglomerativeClustering, AgglomerationTransform):
"""Agglomerate features.
Similar to AgglomerativeClustering, but recursively merges features
instead of samples.
Read more in the :ref:`User Guide <hierarchical_clustering>`.
Parameters
----------
n_clusters : int, default 2
The number of clusters to find.
connectivity : array-like or callable, optional
Connectivity matrix. Defines for each feature the neighboring
features following a given structure of the data.
This can be a connectivity matrix itself or a callable that transforms
the data into a connectivity matrix, such as derived from
kneighbors_graph. Default is None, i.e, the
hierarchical clustering algorithm is unstructured.
affinity : string or callable, default "euclidean"
Metric used to compute the linkage. Can be "euclidean", "l1", "l2",
"manhattan", "cosine", or 'precomputed'.
If linkage is "ward", only "euclidean" is accepted.
memory : Instance of joblib.Memory or string, optional
Used to cache the output of the computation of the tree.
By default, no caching is done. If a string is given, it is the
path to the caching directory.
compute_full_tree : bool or 'auto', optional, default "auto"
Stop early the construction of the tree at n_clusters. This is
useful to decrease computation time if the number of clusters is
not small compared to the number of features. This option is
useful only when specifying a connectivity matrix. Note also that
when varying the number of clusters and using caching, it may
be advantageous to compute the full tree.
linkage : {"ward", "complete", "average"}, optional, default "ward"
Which linkage criterion to use. The linkage criterion determines which
distance to use between sets of features. The algorithm will merge
the pairs of cluster that minimize this criterion.
- ward minimizes the variance of the clusters being merged.
- average uses the average of the distances of each feature of
the two sets.
- complete or maximum linkage uses the maximum distances between
all features of the two sets.
pooling_func : callable, default np.mean
This combines the values of agglomerated features into a single
value, and should accept an array of shape [M, N] and the keyword
argument `axis=1`, and reduce it to an array of size [M].
Attributes
----------
labels_ : array-like, (n_features,)
cluster labels for each feature.
n_leaves_ : int
Number of leaves in the hierarchical tree.
n_components_ : int
The estimated number of connected components in the graph.
children_ : array-like, shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_features`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_features` is a non-leaf
node and has children `children_[i - n_features]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_features + i`
"""
def fit(self, X, y=None, **params):
"""Fit the hierarchical clustering on the data
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The data
Returns
-------
self
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
ensure_min_features=2, estimator=self)
return AgglomerativeClustering.fit(self, X.T, **params)
@property
def fit_predict(self):
raise AttributeError
| |
import logging
import ntpath
import os
import shutil
from functools import partial
from tempfile import mkstemp
import requests
from django.apps.registry import AppRegistryNotReady
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.core.files.uploadedfile import UploadedFile
from django.core.management import call_command
from django.core.management.base import CommandError
from django.http.response import Http404
from django.http.response import HttpResponseBadRequest
from django.utils.translation import get_language_from_request
from django.utils.translation import gettext_lazy as _
from morango.models import ScopeDefinition
from morango.sync.controller import MorangoProfileController
from requests.exceptions import HTTPError
from rest_framework import decorators
from rest_framework import serializers
from rest_framework import status
from rest_framework import viewsets
from rest_framework.authentication import BasicAuthentication
from rest_framework.authentication import SessionAuthentication
from rest_framework.exceptions import APIException
from rest_framework.exceptions import AuthenticationFailed
from rest_framework.exceptions import ParseError
from rest_framework.exceptions import PermissionDenied
from rest_framework.exceptions import ValidationError
from rest_framework.response import Response
from six import string_types
from .permissions import FacilitySyncPermissions
from kolibri.core.auth.constants.morango_sync import PROFILE_FACILITY_DATA
from kolibri.core.auth.constants.morango_sync import State as FacilitySyncState
from kolibri.core.auth.management.utils import get_client_and_server_certs
from kolibri.core.auth.management.utils import get_facility_dataset_id
from kolibri.core.auth.models import Facility
from kolibri.core.content.models import ChannelMetadata
from kolibri.core.content.permissions import CanExportLogs
from kolibri.core.content.permissions import CanImportUsers
from kolibri.core.content.permissions import CanManageContent
from kolibri.core.content.utils.channels import get_mounted_drive_by_id
from kolibri.core.content.utils.channels import get_mounted_drives_with_channel_info
from kolibri.core.content.utils.channels import read_channel_metadata_from_db_file
from kolibri.core.content.utils.paths import get_channel_lookup_url
from kolibri.core.content.utils.paths import get_content_database_file_path
from kolibri.core.content.utils.upgrade import diff_stats
from kolibri.core.device.permissions import IsSuperuser
from kolibri.core.device.permissions import NotProvisionedCanPost
from kolibri.core.discovery.models import NetworkLocation
from kolibri.core.discovery.utils.network.client import NetworkClient
from kolibri.core.discovery.utils.network.errors import NetworkLocationNotFound
from kolibri.core.discovery.utils.network.errors import URLParseError
from kolibri.core.logger.csv_export import CSV_EXPORT_FILENAMES
from kolibri.core.tasks.exceptions import JobNotFound
from kolibri.core.tasks.exceptions import JobNotRestartable
from kolibri.core.tasks.exceptions import UserCancelledError
from kolibri.core.tasks.job import JobRegistry
from kolibri.core.tasks.job import State
from kolibri.core.tasks.main import facility_queue
from kolibri.core.tasks.main import job_storage
from kolibri.core.tasks.main import priority_queue
from kolibri.core.tasks.main import queue
from kolibri.core.tasks.utils import get_current_job
from kolibri.utils import conf
try:
from django.apps import apps
apps.check_apps_ready()
except AppRegistryNotReady:
import django
django.setup()
logger = logging.getLogger(__name__)
NETWORK_ERROR_STRING = _("There was a network error.")
DISK_IO_ERROR_STRING = _("There was a disk access error.")
CATCHALL_SERVER_ERROR_STRING = _("There was an unknown error.")
def get_channel_name(channel_id, require_channel=False):
try:
channel = ChannelMetadata.objects.get(id=channel_id)
channel_name = channel.name
except ChannelMetadata.DoesNotExist:
if require_channel:
raise serializers.ValidationError("This channel does not exist")
channel_name = ""
return channel_name
def validate_content_task(request, task_description, require_channel=False):
try:
channel_id = task_description["channel_id"]
except KeyError:
raise serializers.ValidationError("The channel_id field is required.")
channel_name = task_description.get(
"channel_name", get_channel_name(channel_id, require_channel)
)
node_ids = task_description.get("node_ids", None)
exclude_node_ids = task_description.get("exclude_node_ids", None)
if node_ids and not isinstance(node_ids, list):
raise serializers.ValidationError("node_ids must be a list.")
if exclude_node_ids and not isinstance(exclude_node_ids, list):
raise serializers.ValidationError("exclude_node_ids must be a list.")
return {
"channel_id": channel_id,
"channel_name": channel_name,
"exclude_node_ids": exclude_node_ids,
"node_ids": node_ids,
"started_by": request.user.pk,
"started_by_username": request.user.username,
}
def validate_remote_import_task(request, task_description):
import_task = validate_content_task(request, task_description)
try:
peer_id = task_description["peer_id"]
baseurl = NetworkLocation.objects.values_list("base_url", flat=True).get(
id=peer_id
)
except NetworkLocation.DoesNotExist:
raise serializers.ValidationError(
"Peer with id {} does not exist".format(peer_id)
)
except KeyError:
baseurl = conf.OPTIONS["Urls"]["CENTRAL_CONTENT_BASE_URL"]
peer_id = None
import_task.update({"baseurl": baseurl, "peer_id": peer_id})
return import_task
def _add_drive_info(import_task, task_description):
try:
drive_id = task_description["drive_id"]
except KeyError:
raise serializers.ValidationError("The drive_id field is required.")
try:
drive = get_mounted_drive_by_id(drive_id)
except KeyError:
raise serializers.ValidationError(
"That drive_id was not found in the list of drives."
)
import_task.update({"drive_id": drive_id, "datafolder": drive.datafolder})
return import_task
def validate_local_import_task(request, task_description):
task = validate_content_task(request, task_description)
task = _add_drive_info(task, task_description)
return task
def validate_local_export_task(request, task_description):
task = validate_content_task(request, task_description, require_channel=True)
task = _add_drive_info(task, task_description)
return task
def validate_deletion_task(request, task_description):
task = validate_content_task(request, task_description, require_channel=True)
task["force_delete"] = bool(task_description.get("force_delete"))
return task
class BaseViewSet(viewsets.ViewSet):
queues = []
permission_classes = []
# Adding auth classes explicitly until we find a fix for BasicAuth not
# working on tasks API (in dev settings)
authentication_classes = [SessionAuthentication, BasicAuthentication]
def initial(self, request, *args, **kwargs):
if len(self.permission_classes) == 0:
self.permission_classes = self.default_permission_classes()
if self.permission_classes is None:
self.permission_classes = []
return super(BaseViewSet, self).initial(request, *args, **kwargs)
def default_permission_classes(self):
# For all /api/tasks/ endpoints
return [CanManageContent]
def validate_create_req_data(self, request):
"""
Validates the request data received on POST /api/tasks/.
If `request.user` is authorized to initiate the `task` function, this returns
a list of `request.data` otherwise raises PermissionDenied.
"""
if isinstance(request.data, list):
request_data_list = request.data
else:
request_data_list = [request.data]
for request_data in request_data_list:
if "task" not in request_data:
raise serializers.ValidationError("The 'task' field is required.")
if not isinstance(request_data["task"], string_types):
raise serializers.ValidationError("The 'task' value must be a string.")
funcstr = request_data.get("task")
# Make sure the task is registered
try:
registered_job = JobRegistry.REGISTERED_JOBS[funcstr]
except KeyError:
raise serializers.ValidationError(
"'{funcstr}' is not registered.".format(funcstr=funcstr)
)
# Check permissions the DRF way
for permission in registered_job.permissions:
if not permission.has_permission(request, self):
self.permission_denied(request)
return request_data_list
def list(self, request):
jobs_response = [
_job_to_response(j) for _queue in self.queues for j in _queue.jobs
]
return Response(jobs_response)
def create(self, request):
"""
Enqueue a task for async processing.
API endpoint:
POST /api/tasks/
Request payload parameters:
- `task` (required): a string representing the dotted path to task function.
- all other key value pairs are passed to the validator if the
task function has one otherwise they are passed to the task function itself
as keyword args.
Keep in mind:
If a task function has a validator then dict returned by the validator
is passed to the task function as keyword args.
The validator can add `extra_metadata` in the returning dict to set `extra_metadata`
in the enqueued task.
"""
request_data_list = self.validate_create_req_data(request)
enqueued_jobs_response = []
# Once we have validated all the tasks, we are good to go!
for request_data in request_data_list:
funcstr = request_data.pop("task")
registered_job = JobRegistry.REGISTERED_JOBS[funcstr]
# Run validator with request and request_data as its argument
if registered_job.validator is not None:
try:
validator_result = registered_job.validator(request, request_data)
except Exception as e:
raise e
if not isinstance(validator_result, dict):
raise serializers.ValidationError("Validator must return a dict.")
extra_metadata = validator_result.get("extra_metadata")
if extra_metadata is not None and not isinstance(extra_metadata, dict):
raise serializers.ValidationError(
"In the dict returned by validator, 'extra_metadata' must be a dict."
)
request_data = validator_result
job_id = registered_job.enqueue(**request_data)
enqueued_jobs_response.append(_job_to_response(job_storage.get_job(job_id)))
if len(enqueued_jobs_response) == 1:
enqueued_jobs_response = enqueued_jobs_response[0]
return Response(enqueued_jobs_response)
def retrieve(self, request, pk=None):
for _queue in self.queues:
try:
task = _job_to_response(_queue.fetch_job(pk))
break
except JobNotFound:
continue
else:
raise Http404("Task with {pk} not found".format(pk=pk))
return Response(task)
@decorators.action(methods=["post"], detail=False)
def restarttask(self, request):
"""
Restart a task with its task id given in the task_id parameter.
"""
if "task_id" not in request.data:
raise serializers.ValidationError("The 'task_id' field is required.")
if not isinstance(request.data["task_id"], string_types):
raise serializers.ValidationError("The 'task_id' should be a string.")
resp = {}
for _queue in self.queues:
try:
task_id = _queue.restart_job(request.data["task_id"])
resp = _job_to_response(_queue.fetch_job(task_id))
break
except JobNotFound:
continue
except JobNotRestartable as e:
raise serializers.ValidationError(str(e))
return Response(resp)
def destroy(self, request, pk=None):
# unimplemented for now.
pass
@decorators.action(methods=["post"], detail=False)
def canceltask(self, request):
"""
Cancel a task with its task id given in the task_id parameter.
"""
if "task_id" not in request.data:
raise serializers.ValidationError("The 'task_id' field is required.")
if not isinstance(request.data["task_id"], string_types):
raise serializers.ValidationError("The 'task_id' should be a string.")
for _queue in self.queues:
try:
_queue.cancel(request.data["task_id"])
break
except JobNotFound:
continue
return Response({})
@decorators.action(methods=["post"], detail=False)
def cleartasks(self, request):
"""
Cancels all running tasks.
"""
for _queue in self.queues:
_queue.empty()
return Response({})
@decorators.action(methods=["post"], detail=False)
def cleartask(self, request):
# Given a single task ID, clear it from the queue
task_id = request.data.get("task_id")
if not task_id:
return Response({})
for _queue in self.queues:
_queue.clear_job(task_id)
return Response({"task_id": task_id})
@decorators.action(methods=["post"], detail=False)
def deletefinishedtasks(self, request):
"""
Delete all tasks that have succeeded, failed, or been cancelled.
"""
task_id = request.data.get("task_id")
if task_id:
for _queue in self.queues:
_queue.clear_job(task_id)
else:
for _queue in self.queues:
_queue.clear()
return Response({})
class TasksViewSet(BaseViewSet):
@property
def queues(self):
return [queue, priority_queue]
def default_permission_classes(self):
if self.action in ["list", "deletefinishedtasks"]:
return [CanManageContent | CanExportLogs]
elif self.action == "startexportlogcsv":
return [CanExportLogs]
elif self.action in ["importusersfromcsv", "exportuserstocsv"]:
return [CanImportUsers]
# For all other tasks
return [CanManageContent]
@decorators.action(methods=["post"], detail=False)
def startchannelupdate(self, request):
sourcetype = request.data.get("sourcetype", None)
new_version = request.data.get("new_version", None)
if sourcetype == "remote":
task = validate_remote_import_task(request, request.data)
task.update({"type": "UPDATECHANNEL", "new_version": new_version})
job_id = queue.enqueue(
_remoteimport,
task["channel_id"],
task["baseurl"],
peer_id=task["peer_id"],
node_ids=task["node_ids"],
is_updating=True,
extra_metadata=task,
track_progress=True,
cancellable=True,
)
elif sourcetype == "local":
task = validate_local_import_task(request, request.data)
task.update({"type": "UPDATECHANNEL", "new_version": new_version})
job_id = queue.enqueue(
_diskimport,
task["channel_id"],
task["datafolder"],
drive_id=task["drive_id"],
node_ids=task["node_ids"],
is_updating=True,
extra_metadata=task,
track_progress=True,
cancellable=True,
)
else:
raise serializers.ValidationError("sourcetype must be 'remote' or 'local'")
resp = _job_to_response(queue.fetch_job(job_id))
return Response(resp)
@decorators.action(methods=["post"], detail=False)
def startremotebulkimport(self, request):
if not isinstance(request.data, list):
raise serializers.ValidationError(
"POST data must be a list of task descriptions"
)
tasks = map(partial(validate_remote_import_task, request), request.data)
job_ids = []
for task in tasks:
task.update({"type": "REMOTEIMPORT", "database_ready": False})
import_job_id = queue.enqueue(
_remoteimport,
task["channel_id"],
task["baseurl"],
peer_id=task["peer_id"],
extra_metadata=task,
cancellable=True,
track_progress=True,
)
job_ids.append(import_job_id)
resp = [_job_to_response(queue.fetch_job(job_id)) for job_id in job_ids]
return Response(resp)
@decorators.action(methods=["post"], detail=False)
def startremotechannelimport(self, request):
task = validate_remote_import_task(request, request.data)
task.update({"type": "REMOTECHANNELIMPORT"})
job_id = priority_queue.enqueue(
call_command,
"importchannel",
"network",
task["channel_id"],
baseurl=task["baseurl"],
peer_id=task["peer_id"],
extra_metadata=task,
cancellable=True,
)
resp = _job_to_response(priority_queue.fetch_job(job_id))
return Response(resp)
@decorators.action(methods=["post"], detail=False)
def startremotecontentimport(self, request):
task = validate_remote_import_task(request, request.data)
task.update({"type": "REMOTECONTENTIMPORT"})
job_id = queue.enqueue(
call_command,
"importcontent",
"network",
task["channel_id"],
baseurl=task["baseurl"],
peer_id=task["peer_id"],
node_ids=task["node_ids"],
exclude_node_ids=task["exclude_node_ids"],
extra_metadata=task,
track_progress=True,
cancellable=True,
)
resp = _job_to_response(queue.fetch_job(job_id))
return Response(resp)
@decorators.action(methods=["post"], detail=False)
def startdiskbulkimport(self, request):
if not isinstance(request.data, list):
raise serializers.ValidationError(
"POST data must be a list of task descriptions"
)
tasks = map(partial(validate_local_import_task, request), request.data)
job_ids = []
for task in tasks:
task.update({"type": "DISKIMPORT", "database_ready": False})
import_job_id = queue.enqueue(
_diskimport,
task["channel_id"],
task["datafolder"],
drive_id=task["drive_id"],
extra_metadata=task,
track_progress=True,
cancellable=True,
)
job_ids.append(import_job_id)
resp = [_job_to_response(queue.fetch_job(job_id)) for job_id in job_ids]
return Response(resp)
@decorators.action(methods=["post"], detail=False)
def startdiskchannelimport(self, request):
task = validate_local_import_task(request, request.data)
task.update({"type": "DISKCHANNELIMPORT"})
job_id = priority_queue.enqueue(
call_command,
"importchannel",
"disk",
task["channel_id"],
task["datafolder"],
drive_id=task["drive_id"],
extra_metadata=task,
cancellable=True,
)
resp = _job_to_response(priority_queue.fetch_job(job_id))
return Response(resp)
@decorators.action(methods=["post"], detail=False)
def startdiskcontentimport(self, request):
task = validate_local_import_task(request, request.data)
task.update({"type": "DISKCONTENTIMPORT"})
job_id = queue.enqueue(
call_command,
"importcontent",
"disk",
task["channel_id"],
task["datafolder"],
drive_id=task["drive_id"],
node_ids=task["node_ids"],
exclude_node_ids=task["exclude_node_ids"],
extra_metadata=task,
track_progress=True,
cancellable=True,
)
resp = _job_to_response(queue.fetch_job(job_id))
return Response(resp)
@decorators.action(methods=["post"], detail=False)
def startbulkdelete(self, request):
if not isinstance(request.data, list):
raise serializers.ValidationError(
"POST data must be a list of task descriptions"
)
tasks = map(partial(validate_deletion_task, request), request.data)
job_ids = []
for task in tasks:
task.update({"type": "DELETECHANNEL"})
if task["node_ids"] or task["exclude_node_ids"]:
task["file_size"] = None
task["total_resources"] = None
delete_job_id = queue.enqueue(
call_command,
"deletecontent",
task["channel_id"],
track_progress=True,
extra_metadata=task,
)
job_ids.append(delete_job_id)
resp = [_job_to_response(queue.fetch_job(job_id)) for job_id in job_ids]
return Response(resp)
@decorators.action(methods=["post"], detail=False)
def startdeletechannel(self, request):
"""
Delete a channel and all its associated content from the server
"""
task = validate_deletion_task(request, request.data)
task.update({"type": "DELETECONTENT"})
if task["node_ids"] or task["exclude_node_ids"]:
task["file_size"] = None
task["total_resources"] = None
task_id = queue.enqueue(
call_command,
"deletecontent",
task["channel_id"],
node_ids=task["node_ids"],
exclude_node_ids=task["exclude_node_ids"],
force_delete=task["force_delete"],
track_progress=True,
extra_metadata=task,
)
# attempt to get the created Task, otherwise return pending status
resp = _job_to_response(queue.fetch_job(task_id))
return Response(resp)
@decorators.action(methods=["post"], detail=False)
def startdiskbulkexport(self, request):
if not isinstance(request.data, list):
raise serializers.ValidationError(
"POST data must be a list of task descriptions"
)
tasks = map(partial(validate_local_export_task, request), request.data)
job_ids = []
for task in tasks:
task.update({"type": "DISKEXPORT"})
export_job_id = queue.enqueue(
_localexport,
task["channel_id"],
task["drive_id"],
track_progress=True,
cancellable=True,
extra_metadata=task,
)
job_ids.append(export_job_id)
resp = [_job_to_response(queue.fetch_job(job_id)) for job_id in job_ids]
return Response(resp)
@decorators.action(methods=["post"], detail=False)
def startdiskexport(self, request):
"""
Export a channel to a local drive, and copy content to the drive.
"""
task = validate_local_export_task(request, request.data)
task.update({"type": "DISKCONTENTEXPORT"})
task_id = queue.enqueue(
_localexport,
task["channel_id"],
task["drive_id"],
track_progress=True,
cancellable=True,
node_ids=task["node_ids"],
exclude_node_ids=task["exclude_node_ids"],
extra_metadata=task,
)
# attempt to get the created Task, otherwise return pending status
resp = _job_to_response(queue.fetch_job(task_id))
return Response(resp)
@decorators.action(methods=["get"], detail=False)
def localdrive(self, request):
drives = get_mounted_drives_with_channel_info()
# make sure everything is a dict, before converting to JSON
if not isinstance(drives, dict):
raise AssertionError
out = [mountdata._asdict() for mountdata in drives.values()]
return Response(out)
@decorators.action(methods=["post"], detail=False)
def importusersfromcsv(self, request):
"""
Import users, classes, roles and roles assignemnts from a csv file.
:param: FILE: file dictionary with the file object
:param: csvfile: filename of the file stored in kolibri temp folder
:param: dryrun: validate the data but don't modify the database
:param: delete: Users not in the csv will be deleted from the facility, and classes cleared
:returns: An object with the job information
"""
def manage_fileobject(request, temp_dir):
upload = UploadedFile(request.FILES["csvfile"])
# Django uses InMemoryUploadedFile for files less than 2.5Mb
# and TemporaryUploadedFile for bigger files:
if type(upload.file) == InMemoryUploadedFile:
_, filepath = mkstemp(dir=temp_dir, suffix=".upload")
with open(filepath, "w+b") as dest:
filepath = dest.name
for chunk in upload.file.chunks():
dest.write(chunk)
else:
tmpfile = upload.file.temporary_file_path()
filename = ntpath.basename(tmpfile)
filepath = os.path.join(temp_dir, filename)
shutil.copy(tmpfile, filepath)
return filepath
temp_dir = os.path.join(conf.KOLIBRI_HOME, "temp")
if not os.path.isdir(temp_dir):
os.mkdir(temp_dir)
locale = get_language_from_request(request)
# the request must contain either an object file
# or the filename of the csv stored in Kolibri temp folder
# Validation will provide the file object, while
# Importing will provide the filename, previously validated
if not request.FILES:
filename = request.data.get("csvfile", None)
if filename:
filepath = os.path.join(temp_dir, filename)
else:
return HttpResponseBadRequest("The request must contain a file object")
else:
if "csvfile" not in request.FILES:
return HttpResponseBadRequest("Wrong file object")
filepath = manage_fileobject(request, temp_dir)
delete = request.data.get("delete", None)
dryrun = request.data.get("dryrun", None)
userid = request.user.pk
facility_id = request.data.get("facility_id", None)
job_type = "IMPORTUSERSFROMCSV"
job_metadata = {"type": job_type, "started_by": userid, "facility": facility_id}
job_args = ["bulkimportusers"]
if dryrun:
job_args.append("--dryrun")
if delete:
job_args.append("--delete")
job_args.append(filepath)
job_kwd_args = {
"facility": facility_id,
"userid": userid,
"locale": locale,
"extra_metadata": job_metadata,
"track_progress": True,
}
job_id = priority_queue.enqueue(call_command, *job_args, **job_kwd_args)
resp = _job_to_response(priority_queue.fetch_job(job_id))
return Response(resp)
@decorators.action(methods=["post"], detail=False)
def exportuserstocsv(self, request):
"""
Export users, classes, roles and roles assignemnts to a csv file.
:param: facility_id
:returns: An object with the job information
"""
facility_id = request.data.get("facility_id", None)
try:
if facility_id:
facility = Facility.objects.get(pk=facility_id).id
else:
facility = request.user.facility
except Facility.DoesNotExist:
raise serializers.ValidationError(
"Facility with ID {} does not exist".format(facility_id)
)
job_type = "EXPORTUSERSTOCSV"
job_metadata = {
"type": job_type,
"started_by": request.user.pk,
"facility": facility,
}
locale = get_language_from_request(request)
job_id = priority_queue.enqueue(
call_command,
"bulkexportusers",
facility=facility,
locale=locale,
overwrite="true",
extra_metadata=job_metadata,
track_progress=True,
)
resp = _job_to_response(priority_queue.fetch_job(job_id))
return Response(resp)
@decorators.action(methods=["post"], detail=False)
def startexportlogcsv(self, request):
"""
Dumps in csv format the required logs.
By default it will be dump contentsummarylog.
:param: logtype: Kind of log to dump, summary or session
:param: facility
:returns: An object with the job information
"""
facility_id = request.data.get("facility", None)
if facility_id:
facility = Facility.objects.get(pk=facility_id)
else:
facility = request.user.facility
log_type = request.data.get("logtype", "summary")
if log_type in CSV_EXPORT_FILENAMES.keys():
logs_dir = os.path.join(conf.KOLIBRI_HOME, "log_export")
filepath = os.path.join(
logs_dir,
CSV_EXPORT_FILENAMES[log_type].format(facility.name, facility_id[:4]),
)
else:
raise Http404(
"Impossible to create a csv export file for {}".format(log_type)
)
if not os.path.isdir(logs_dir):
os.mkdir(logs_dir)
job_type = (
"EXPORTSUMMARYLOGCSV" if log_type == "summary" else "EXPORTSESSIONLOGCSV"
)
job_metadata = {
"type": job_type,
"started_by": request.user.pk,
"facility": facility.id,
}
job_id = priority_queue.enqueue(
call_command,
"exportlogs",
log_type=log_type,
output_file=filepath,
facility=facility.id,
overwrite="true",
extra_metadata=job_metadata,
track_progress=True,
)
resp = _job_to_response(priority_queue.fetch_job(job_id))
return Response(resp)
@decorators.action(methods=["post"], detail=False)
def channeldiffstats(self, request):
job_metadata = {}
channel_id = request.data.get("channel_id")
method = request.data.get("method")
drive_id = request.data.get("drive_id")
baseurl = request.data.get("baseurl")
# request validation and job metadata info
if not channel_id:
raise serializers.ValidationError("The channel_id field is required.")
if not method:
raise serializers.ValidationError("The method field is required.")
if method == "network":
baseurl = baseurl or conf.OPTIONS["Urls"]["CENTRAL_CONTENT_BASE_URL"]
job_metadata["baseurl"] = baseurl
# get channel version metadata
url = get_channel_lookup_url(baseurl=baseurl, identifier=channel_id)
resp = requests.get(url)
channel_metadata = resp.json()
job_metadata["new_channel_version"] = channel_metadata[0]["version"]
elif method == "disk":
if not drive_id:
raise serializers.ValidationError(
"The drive_id field is required when using 'disk' method."
)
job_metadata = _add_drive_info(job_metadata, request.data)
# get channel version metadata
drive = get_mounted_drive_by_id(drive_id)
channel_metadata = read_channel_metadata_from_db_file(
get_content_database_file_path(channel_id, drive.datafolder)
)
job_metadata["new_channel_version"] = channel_metadata["version"]
else:
raise serializers.ValidationError(
"'method' field should either be 'network' or 'disk'."
)
job_metadata.update(
{
"type": "CHANNELDIFFSTATS",
"started_by": request.user.pk,
"channel_id": channel_id,
}
)
job_id = priority_queue.enqueue(
diff_stats,
channel_id,
method,
drive_id=drive_id,
baseurl=baseurl,
extra_metadata=job_metadata,
track_progress=False,
cancellable=True,
)
resp = _job_to_response(priority_queue.fetch_job(job_id))
return Response(resp)
class FacilityTasksViewSet(BaseViewSet):
@property
def queues(self):
return [facility_queue]
def default_permission_classes(self):
if self.action in ["list", "retrieve"]:
return [FacilitySyncPermissions]
@decorators.action(
methods=["post"], detail=False, permission_classes=[FacilitySyncPermissions]
)
def startdataportalsync(self, request):
"""
Initiate a PUSH sync with Kolibri Data Portal.
"""
facility_id = validate_facility(request)
sync_args = validate_sync_task(request)
job_data = prepare_sync_job(
facility=facility_id,
extra_metadata=prepare_sync_task(*sync_args, type="SYNCDATAPORTAL"),
)
job_id = facility_queue.enqueue(call_command, "sync", **job_data)
resp = _job_to_response(facility_queue.fetch_job(job_id))
return Response(resp)
@decorators.action(methods=["post"], detail=False, permission_classes=[IsSuperuser])
def startdataportalbulksync(self, request):
"""
Initiate a PUSH sync with Kolibri Data Portal for ALL registered facilities.
"""
responses = []
facilities = Facility.objects.filter(dataset__registered=True).values_list(
"id", "name"
)
for id, name in facilities:
request.data.update(facility=id, facility_name=name)
responses.append(self.startdataportalsync(request).data)
return Response(responses)
# Method needs to be available in Setup Wizard as well
@decorators.action(
methods=["post"],
detail=False,
permission_classes=[IsSuperuser | NotProvisionedCanPost],
)
def startpeerfacilityimport(self, request):
"""
Initiate a PULL of a specific facility from another device.
"""
baseurl, facility_id, username, password = validate_peer_sync_job(request)
validate_and_create_sync_credentials(baseurl, facility_id, username, password)
sync_args = validate_sync_task(request)
job_data = prepare_peer_sync_job(
baseurl,
facility_id,
no_push=True,
no_provision=True,
extra_metadata=prepare_sync_task(*sync_args, type="SYNCPEER/PULL"),
)
job_id = facility_queue.enqueue(call_command, "sync", **job_data)
resp = _job_to_response(facility_queue.fetch_job(job_id))
return Response(resp)
@decorators.action(
methods=["post"], detail=False, permission_classes=[FacilitySyncPermissions]
)
def startpeerfacilitysync(self, request):
"""
Initiate a SYNC (PULL + PUSH) of a specific facility from another device.
"""
baseurl, facility_id, username, password = validate_peer_sync_job(request)
validate_and_create_sync_credentials(baseurl, facility_id, username, password)
sync_args = validate_sync_task(request)
job_data = prepare_peer_sync_job(
baseurl,
facility_id,
extra_metadata=prepare_sync_task(*sync_args, type="SYNCPEER/FULL"),
)
job_id = facility_queue.enqueue(call_command, "sync", **job_data)
resp = _job_to_response(facility_queue.fetch_job(job_id))
return Response(resp)
@decorators.action(methods=["post"], detail=False, permission_classes=[IsSuperuser])
def startdeletefacility(self, request):
"""
Initiate a task to delete a facility
"""
try:
facility_id = request.data.get("facility")
if not facility_id:
raise KeyError()
except KeyError:
raise ParseError(
dict(code="INVALID_FACILITY", message="Missing `facility` parameter")
)
if not Facility.objects.filter(id=facility_id).exists():
raise ValidationError(
dict(code="INVALID_FACILITY", message="Facility doesn't exist")
)
if not Facility.objects.exclude(id=facility_id).exists():
raise ValidationError(
dict(
code="SOLE_FACILITY",
message="Cannot delete the sole facility on the device",
)
)
if request.user.is_facility_user and request.user.facility_id == facility_id:
raise ValidationError(
dict(code="FACILITY_MEMBER", message="User is member of facility")
)
facility_name = Facility.objects.get(id=facility_id).name
job_id = facility_queue.enqueue(
call_command,
"deletefacility",
facility=facility_id,
track_progress=True,
noninteractive=True,
cancellable=False,
extra_metadata=dict(
facility=facility_id,
facility_name=facility_name,
started_by=request.user.pk,
started_by_username=request.user.username,
type="DELETEFACILITY",
),
)
resp = _job_to_response(facility_queue.fetch_job(job_id))
return Response(resp)
class ResourceGoneError(APIException):
"""
API error for when a peer no longer is online
"""
status_code = status.HTTP_410_GONE
default_detail = "Unable to connect"
def prepare_sync_task(
facility_id,
user_id,
username,
facility_name,
device_name,
device_id,
baseurl,
**kwargs
):
task_data = dict(
facility=facility_id,
started_by=user_id,
started_by_username=username,
sync_state=FacilitySyncState.PENDING,
bytes_sent=0,
bytes_received=0,
)
task_type = kwargs.get("type")
if task_type in ["SYNCPEER/PULL", "SYNCPEER/FULL"]:
# Extra metadata that can be passed from the client
extra_task_data = dict(
facility_name=facility_name,
device_name=device_name,
device_id=device_id,
baseurl=baseurl,
)
task_data.update(extra_task_data)
elif task_type == "SYNCDATAPORTAL":
# Extra metadata that can be passed from the client
extra_task_data = dict(facility_name=facility_name)
task_data.update(extra_task_data)
task_data.update(kwargs)
return task_data
def validate_facility(request):
# ensure we have the facility
try:
facility_id = request.data.get("facility")
if not facility_id:
raise KeyError()
except KeyError:
raise ParseError("Missing `facility` parameter")
return facility_id
def validate_sync_task(request):
facility_id = validate_facility(request)
user_id = request.user.pk
username = request.user.username
facility_name = request.data.get("facility_name", "")
device_name = request.data.get("device_name", "")
device_id = request.data.get("device_id", "")
baseurl = request.data.get("baseurl", "")
return (
facility_id,
user_id,
username,
facility_name,
device_name,
device_id,
baseurl,
)
def prepare_sync_job(**kwargs):
job_data = dict(
chunk_size=200,
noninteractive=True,
extra_metadata={},
track_progress=True,
cancellable=False,
)
job_data.update(kwargs)
return job_data
def validate_peer_sync_job(request):
# validate the baseurl
try:
address = request.data.get("baseurl")
if not address:
raise KeyError()
baseurl = NetworkClient(address=address).base_url
except KeyError:
raise ParseError("Missing `baseurl` parameter")
except URLParseError:
raise ParseError("Invalid URL")
except NetworkLocationNotFound:
raise ResourceGoneError()
facility_id = validate_facility(request)
username = request.data.get("username", None)
password = request.data.get("password", None)
return (baseurl, facility_id, username, password)
def validate_and_create_sync_credentials(
baseurl, facility_id, username, password, user_id=None
):
"""
Validates user credentials for syncing by performing certificate verification, which will also
save any certificates after successful authentication
:param user_id: Optional user ID for SoUD use case
"""
# call this in case user directly syncs without migrating database
if not ScopeDefinition.objects.filter():
call_command("loaddata", "scopedefinitions")
controller = MorangoProfileController(PROFILE_FACILITY_DATA)
network_connection = controller.create_network_connection(baseurl)
# try to get the certificate, which will save it if successful
try:
# make sure we get the dataset ID
facility_id, dataset_id = get_facility_dataset_id(
baseurl, identifier=facility_id, noninteractive=True
)
# username and password are not required for this to succeed unless there is no cert
get_client_and_server_certs(
username,
password,
dataset_id,
network_connection,
user_id=user_id,
facility_id=facility_id,
noninteractive=True,
)
except (CommandError, HTTPError) as e:
if not username and not password:
raise PermissionDenied()
else:
raise AuthenticationFailed(e)
def prepare_peer_sync_job(baseurl, facility_id, **kwargs):
"""
Initializes and validates connection to peer with username and password for the sync command. If
already initialized, the username and password do not need to be supplied
"""
return prepare_sync_job(facility=facility_id, baseurl=baseurl, **kwargs)
def prepare_soud_sync_job(baseurl, facility_id, user_id, **kwargs):
"""
A SoUD sync requires that the device is already "registered" with the server, so there
shouldn't be a need for username/password and the verification of those. This eliminates the
validation to keep overhead low for automated single-user syncing. To initialize with a peer
for a SoUD, use `prepare_peer_sync_job` with `user` keyword argument
"""
return prepare_sync_job(
baseurl=baseurl, facility=facility_id, user=user_id, **kwargs
)
def prepare_soud_resume_sync_job(baseurl, sync_session_id, user_id, **kwargs):
"""
Resuming a SoUD sync requires that a normal sync has occurred and the `SyncSession` is still
active
"""
return prepare_sync_job(baseurl=baseurl, id=sync_session_id, user=user_id, **kwargs)
def _remoteimport(
channel_id,
baseurl,
peer_id=None,
update_progress=None,
check_for_cancel=None,
node_ids=None,
is_updating=False,
exclude_node_ids=None,
extra_metadata=None,
):
call_command(
"importchannel",
"network",
channel_id,
baseurl=baseurl,
update_progress=update_progress,
check_for_cancel=check_for_cancel,
)
# Make some real-time updates to the metadata
job = get_current_job()
# Signal to UI that the DB-downloading step is done so it knows to display
# progress correctly
job.update_progress(0, 1.0)
job.extra_metadata["database_ready"] = True
# Add the channel name if it wasn't added initially
if job and job.extra_metadata.get("channel_name", "") == "":
job.extra_metadata["channel_name"] = get_channel_name(channel_id)
job.save_meta()
call_command(
"importcontent",
"network",
channel_id,
baseurl=baseurl,
peer_id=peer_id,
node_ids=node_ids,
exclude_node_ids=exclude_node_ids,
import_updates=is_updating,
update_progress=update_progress,
check_for_cancel=check_for_cancel,
)
def _diskimport(
channel_id,
directory,
drive_id=None,
update_progress=None,
check_for_cancel=None,
node_ids=None,
is_updating=False,
exclude_node_ids=None,
extra_metadata=None,
):
call_command(
"importchannel",
"disk",
channel_id,
directory,
update_progress=update_progress,
check_for_cancel=check_for_cancel,
)
# Make some real-time updates to the metadata
job = get_current_job()
# Signal to UI that the DB-downloading step is done so it knows to display
# progress correctly
job.update_progress(0, 1.0)
job.extra_metadata["database_ready"] = True
# Add the channel name if it wasn't added initially
if job and job.extra_metadata.get("channel_name", "") == "":
job.extra_metadata["channel_name"] = get_channel_name(channel_id)
job.save_meta()
# Skip importcontent step if updating and no nodes have changed
if is_updating and (node_ids is not None) and len(node_ids) == 0:
pass
else:
call_command(
"importcontent",
"disk",
channel_id,
directory,
drive_id=drive_id,
node_ids=node_ids,
exclude_node_ids=exclude_node_ids,
update_progress=update_progress,
check_for_cancel=check_for_cancel,
)
def _localexport(
channel_id,
drive_id,
update_progress=None,
check_for_cancel=None,
node_ids=None,
exclude_node_ids=None,
extra_metadata=None,
):
drive = get_mounted_drive_by_id(drive_id)
call_command(
"exportchannel",
channel_id,
drive.datafolder,
update_progress=update_progress,
check_for_cancel=check_for_cancel,
)
try:
call_command(
"exportcontent",
channel_id,
drive.datafolder,
node_ids=node_ids,
exclude_node_ids=exclude_node_ids,
update_progress=update_progress,
check_for_cancel=check_for_cancel,
)
except UserCancelledError:
try:
os.remove(
get_content_database_file_path(channel_id, datafolder=drive.datafolder)
)
except OSError:
pass
raise
def _job_to_response(job):
if not job:
return {
"type": None,
"started_by": None,
"status": State.SCHEDULED,
"percentage": 0,
"progress": [],
"id": None,
"cancellable": False,
"clearable": False,
}
output = {
"status": job.state,
"exception": str(job.exception),
"traceback": str(job.traceback),
"percentage": job.percentage_progress,
"id": job.job_id,
"cancellable": job.cancellable,
"clearable": job.state in [State.FAILED, State.CANCELED, State.COMPLETED],
}
output.update(job.extra_metadata)
return output
| |
#!python
# coding: utf-8
"""This module contains utilities which based on the Pipe mechanism provided
by cmdlet.
"""
import sys
import os
import io
import re
import types
import subprocess
import string
import locale
import codecs
import functools
from six import PY3, StringIO, text_type, string_types
from cmdlet import Pipe, PipeFunction, register_type, unregister_type
#: Alias of cmdlet.PipeFuncion.
pipe = PipeFunction
#: data type of file for different python version.
if PY3:
file = io.IOBase
file_type = file
#: data type of string for different python version.
string_type = string_types
#: data type of unicode for different python version.
unicode_type = text_type
#: Check if is string or unicode
is_str_type = lambda x: isinstance(x, (string_type, unicode_type))
def run(cmd):
"""Run pipe object and return its last result.
:param cmd: The Pipe object to be executed.
:type cmd: Pipe
:returns: The last result.
.. seealso::
:py:meth:`cmdlet.Pipe.run`
"""
return cmd.run()
def result(cmd):
"""Run pipe object and return its result in a list.
:param cmd: The Pipe object to be executed.
:type cmd: Pipe
:returns: The list which contains the result of pipe.
.. seealso::
:py:meth:`cmdlet.Pipe.result`
"""
return cmd.result()
@pipe.func
def seq(prev, sequence):
"""Pipe wrapper for any iterable object.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param sequence: The iterable object to be wrapped.
:type sequence: iterator
:returns: generator
"""
for i in sequence:
yield i
@pipe.func
def items(prev, dict_obj):
"""Pipe wrapper for any dict object.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param dict_obj: The dict object to be wrapped.
:type dict_obj: dict
:returns: generator
"""
for kv in dict_obj.items():
yield kv
@pipe.func
def attr(prev, attr_name):
"""attr pipe can extract attribute value of object.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param attr_name: The name of attribute
:type attr_name: str
:returns: generator
"""
for obj in prev:
if hasattr(obj, attr_name):
yield getattr(obj, attr_name)
@pipe.func
def attrs(prev, attr_names):
"""attrs pipe can extract attribute values of object.
If attr_names is a list and its item is not a valid attribute of
prev's object. It will be excluded from yielded dict.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param attr_names: The list of attribute names
:type attr_names: str of list
:returns: generator
"""
for obj in prev:
attr_values = []
for name in attr_names:
if hasattr(obj, name):
attr_values.append(getattr(obj, name))
yield attr_values
@pipe.func
def attrdict(prev, attr_names):
"""attrdict pipe can extract attribute values of object into a dict.
The argument attr_names can be a list or a dict.
If attr_names is a list and its item is not a valid attribute of
prev's object. It will be excluded from yielded dict.
If attr_names is dict and the key doesn't exist in prev's object.
the value of corresponding attr_names key will be copy to yielded dict.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param attr_names: The list or dict of attribute names
:type attr_names: str of list or dict
:returns: generator
"""
if isinstance(attr_names, dict):
for obj in prev:
attr_values = dict()
for name in attr_names.keys():
if hasattr(obj, name):
attr_values[name] = getattr(obj, name)
else:
attr_values[name] = attr_names[name]
yield attr_values
else:
for obj in prev:
attr_values = dict()
for name in attr_names:
if hasattr(obj, name):
attr_values[name] = getattr(obj, name)
yield attr_values
@pipe.func
def flatten(prev, depth=sys.maxsize):
"""flatten pipe extracts nested item from previous pipe.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param depth: The deepest nested level to be extracted. 0 means no extraction.
:type depth: integer
:returns: generator
"""
def inner_flatten(iterable, curr_level, max_levels):
for i in iterable:
if hasattr(i, '__iter__') and curr_level < max_levels:
for j in inner_flatten(i, curr_level + 1, max_levels):
yield j
else:
yield i
for d in prev:
if hasattr(d, '__iter__') and depth > 0:
for inner_d in inner_flatten(d, 1, depth):
yield inner_d
else:
yield d
@pipe.func
def values(prev, *keys, **kw):
"""values pipe extract value from previous pipe.
If previous pipe send a dictionary to values pipe, keys should contains
the key of dictionary which you want to get. If previous pipe send list or
tuple,
:param prev: The previous iterator of pipe.
:type prev: Pipe
:returns: generator
"""
d = next(prev)
if isinstance(d, dict):
yield [d[k] for k in keys if k in d]
for d in prev:
yield [d[k] for k in keys if k in d]
else:
yield [d[i] for i in keys if 0 <= i < len(d)]
for d in prev:
yield [d[i] for i in keys if 0 <= i < len(d)]
@pipe.func
def counter(prev):
"""counter pipe count how many data pass from previous pipe.
This pipe will dropped all received data and return counting value after
last data.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param dict_obj: The dict object to be wrapped.
:type dict_obj: dict
:returns: generator
"""
count = 0
for data in prev:
count += 1
yield count
@pipe.func
def enum(prev, start=0):
"""enum pipe wrap the built-in function *enumerate*. It passes a tuple
to next pipe. The tuple contains a count(from start which defaults to 0)
and the values passed from previous pipe.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param start: The start value of enumeration.
:type start: integer
:returns: generator
"""
for data in enumerate(prev, start):
yield data
@pipe.func
def pack(prev, n, rest=False, **kw):
"""pack pipe takes n elements from previous generator and yield one
list to next.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param rest: Set True to allow to output the rest part of last elements.
:type prev: boolean
:param padding: Specify the padding element for the rest part of last elements.
:type prev: boolean
:returns: generator
:Example:
>>> result([1,2,3,4,5,6,7] | pack(3))
[[1, 2, 3], [4, 5, 6]]
>>> result([1,2,3,4,5,6,7] | pack(3, rest=True))
[[1, 2, 3], [4, 5, 6], [7,]]
>>> result([1,2,3,4,5,6,7] | pack(3, padding=None))
[[1, 2, 3], [4, 5, 6], [7, None, None]]
"""
if 'padding' in kw:
use_padding = True
padding = kw['padding']
else:
use_padding = False
padding = None
items = []
for i, data in enumerate(prev, 1):
items.append(data)
if (i % n) == 0:
yield items
items = []
if len(items) != 0 and rest:
if use_padding:
items.extend([padding, ] * (n - (i % n)))
yield items
@pipe.func
def fmt(prev, format_string):
"""The pipe formats the data passed from previous generator according to
given format_string argument.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param format_string: format string which used to format the data from
previous iterator.
:type sequence: str
:returns: generator
"""
for i in prev:
print(format_string, i, format_string.format(i))
yield format_string.format(i)
@pipe.func
def grep(prev, *patterns, **kw):
"""The pipe greps the data passed from previous generator according to
given regular expression.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param patterns: The patterns which used to filter out data. When more than one pattern specified, the data is passed if it matches any pattern.
:type pattern: str|unicode|re pattern object
:param inv: If true, invert the match condition.
:type inv: boolean
:param kw:
:type kw: dict
:returns: generator
"""
inv = False if 'inv' not in kw else kw.pop('inv')
pattern_objs = []
for pattern in patterns:
pattern_objs.append(re.compile(pattern, **kw))
for data in prev:
match = False
for pattern_obj in pattern_objs:
if bool(pattern_obj.search(data)):
match = True
break
if bool(inv) ^ match:
yield data
@pipe.func
def match(prev, *patterns, **kw):
"""The pipe greps the data passed from previous generator according to
given regular expression. The data passed to next pipe is MatchObject
, dict or tuple which determined by 'to' in keyword argument.
By default, match pipe yields MatchObject. Use 'to' in keyword argument
to change the type of match result.
If 'to' is dict, yield MatchObject.groupdict().
If 'to' is tuple, yield MatchObject.groups().
If 'to' is list, yield list(MatchObject.groups()).
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param pattern: The pattern which used to filter data. When more than one pattern specified, the data is passed if it matches any pattern.
:type pattern: str|unicode
:param to: What data type the result should be stored. dict|tuple|list
:type to: type
:returns: generator
"""
inv = False if 'inv' not in kw else kw.pop('inv')
to = 'to' in kw and kw.pop('to')
pattern_objs = []
for pattern in patterns:
pattern_objs.append(re.compile(pattern, **kw))
for data in prev:
match = None
for pattern_obj in pattern_objs:
match = pattern_obj.match(data)
if match is not None:
break
if bool(inv) ^ (match is not None):
if to is dict:
yield match.groupdict()
elif to is tuple:
yield tuple(match.groups())
elif to is list:
yield list(match.groups())
else:
yield match
@pipe.func
def resplit(prev, pattern, *args, **kw):
"""The resplit pipe split previous pipe input by regular expression.
Use 'maxsplit' keyword argument to limit the number of split.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param pattern: The pattern which used to split string.
:type pattern: str|unicode
"""
maxsplit = 0 if 'maxsplit' not in kw else kw.pop('maxsplit')
pattern_obj = re.compile(pattern, *args, **kw)
for s in prev:
yield pattern_obj.split(s, maxsplit=maxsplit)
@pipe.func
def sub(prev, pattern, repl, *args, **kw):
"""sub pipe is a wrapper of re.sub method.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param pattern: The pattern string.
:type pattern: str|unicode
:param repl: Check repl argument in re.sub method.
:type repl: str|unicode|callable
"""
count = 0 if 'count' not in kw else kw.pop('count')
pattern_obj = re.compile(pattern, *args, **kw)
for s in prev:
yield pattern_obj.sub(repl, s, count=count)
@pipe.func
def subn(prev, pattern, repl, *args, **kw):
"""subn pipe is a wrapper of re.subn method.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param pattern: The pattern string.
:type pattern: str|unicode
:param repl: Check repl argument in re.sub method.
:type repl: str|unicode|callable
"""
count = 0 if 'count' not in kw else kw.pop('count')
pattern_obj = re.compile(pattern, *args, **kw)
for s in prev:
yield pattern_obj.subn(repl, s, count=count)
@pipe.func
def wildcard(prev, *patterns, **kw):
"""wildcard pipe greps data passed from previous generator
according to given regular expression.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param patterns: The wildcard string which used to filter data. When more than one pattern specified, the data is passed if it matches any pattern.
:type pattern: str|unicode|re pattern object
:param inv: If true, invert the match condition.
:type inv: boolean
:returns: generator
"""
import fnmatch
inv = 'inv' in kw and kw.pop('inv')
pattern_objs = []
for pattern in patterns:
pattern_objs.append(re.compile(fnmatch.translate(pattern), **kw))
for data in prev:
is_match = False
for pattern_obj in pattern_objs:
if pattern_obj.match(data):
is_match = True
break
if bool(inv) ^ is_match:
yield data
@pipe.func
def stdout(prev, endl='\n', thru=False):
"""This pipe read data from previous iterator and write it to stdout.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param endl: The end-of-line symbol for each output.
:type endl: str
:param thru: If true, data will passed to next generator. If false, data
will be dropped.
:type thru: bool
:returns: generator
"""
for i in prev:
sys.stdout.write(str(i) + endl)
if thru:
yield i
@pipe.func
def stderr(prev, endl='\n', thru=False):
"""This pipe read data from previous iterator and write it to stderr.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param endl: The end-of-line symbol for each output.
:type endl: str
:param thru: If true, data will passed to next generator. If false, data
will be dropped.
:type thru: bool
:returns: generator
"""
for i in prev:
sys.stderr.write(str(i) + endl)
if thru:
yield i
@pipe.func
def readline(prev, filename=None, mode='r', trim=None, start=1, end=sys.maxsize):
"""This pipe get filenames or file object from previous pipe and read the
content of file. Then, send the content of file line by line to next pipe.
The start and end parameters are used to limit the range of reading from file.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param filename: The files to be read. If None, use previous pipe input as filenames.
:type filename: None|str|unicode|list|tuple
:param mode: The mode to open file. default is 'r'
:type mode: str
:param trim: The function to trim the line before send to next pipe.
:type trim: function object.
:param start: if star is specified, only line number larger or equal to start will be sent.
:type start: integer
:param end: The last line number to read.
:type end: integer
:returns: generator
"""
if trim is None:
trim = lambda s: s.rstrip()
if prev is None:
if filename is None:
raise Exception('No input available for readline.')
elif is_str_type(filename):
file_list = [filename, ]
else:
file_list = filename
else:
file_list = prev
for fn in file_list:
if isinstance(fn, file_type):
fd = fn
else:
fd = open(fn, mode)
try:
if start <= 1 and end == sys.maxsize:
for line in fd:
yield trim(line)
else:
for line_no, line in enumerate(fd, 1):
if line_no < start:
continue
yield trim(line)
if line_no >= end:
break
finally:
if fd != fn:
fd.close()
@pipe.func
def fileobj(prev, file_handle, endl='', thru=False):
"""This pipe read/write data from/to file object which specified by
file_handle.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param file_handle: The file object to read or write.
:type file_handle: file object
:param endl: The end-of-line symbol for each output.
:type endl: str
:param thru: If true, data will passed to next generator. If false, data
will be dropped.
:type thru: bool
:returns: generator
"""
if prev is not None:
for i in prev:
file_handle.write(str(i)+endl)
if thru:
yield i
else:
for data in file_handle:
yield data
@pipe.func
def sh(prev, *args, **kw):
"""sh pipe executes shell command specified by args. If previous pipe exists,
read data from it and write it to stdin of shell process. The stdout of
shell process will be passed to next pipe object line by line.
Optional keyword arguments:
- trim: Pass a function into sh pipe. It is used to trim the output from shell process.
The default trim function is str.rstrip. Therefore, any space characters in tail of
shell process output line will be removed.
- endl: Append the specified to each input line from previous pipe.
- returncode: Set the expected returncode. It the returncode of process doesn't not equal to this value. A
subprocess.CalledProcessError will be raised.
- decode: The codecs to be used to decode the output of shell.
For example:
py_files = result(sh('ls') | strip | wildcard('*.py'))
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param args: The command line arguments. It will be joined by space character.
:type args: list of string.
:param kw: arguments for subprocess.Popen.
:type kw: dictionary of options.
:returns: generator
"""
endl = '\n' if 'endl' not in kw else kw.pop('endl')
returncode = None if 'returncode' not in kw else kw.pop('returncode')
if PY3:
decode = functools.partial(codecs.decode, encoding=locale.getdefaultlocale()[1]) if 'decode' not in kw else kw.pop('decode')
else:
decode = (lambda ln: codecs.decode(ln, locale.getdefaultlocale()[1])) if 'decode' not in kw else kw.pop('decode')
trim = (lambda s: s.rstrip()) if 'trim' not in kw else kw.pop('trim')
cmdline = ' '.join(args)
if not cmdline:
if prev is not None:
for i in prev:
yield i
else:
while True:
yield None
process = subprocess.Popen(cmdline, shell=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
**kw)
if prev is not None:
stdin_buffer = StringIO()
for i in prev:
stdin_buffer.write(i)
if endl:
stdin_buffer.write(endl)
if PY3:
process.stdin.write(stdin_buffer.getvalue().encode('utf-8'))
else:
process.stdin.write(stdin_buffer.getvalue())
process.stdin.flush()
process.stdin.close()
stdin_buffer.close()
for line in process.stdout:
yield trim(decode(line))
process.wait()
if returncode is not None and returncode != process.returncode:
raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmdline)
@pipe.func
def execmd(prev, *args, **kw):
"""execmd pipe executes shell command specified by previous pipe.
For example:
py_files = result(readline("dir_list.txt", trim=str.strip) | fmt("ls {}") | execmd )
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param kw: arguments for subprocess.Popen.
:type kw: dictionary of options.
:returns: generator
"""
returncode = None if 'returncode' not in kw else kw.pop('returncode')
if PY3:
decode = functools.partial(codecs.decode, encoding=locale.getdefaultlocale()[1]) if 'decode' not in kw else kw.pop('decode')
else:
decode = (lambda ln: codecs.decode(ln, locale.getdefaultlocale()[1])) if 'decode' not in kw else kw.pop('decode')
trim = (lambda s: s.rstrip()) if 'trim' not in kw else kw.pop('trim')
for cmdline in prev:
process = subprocess.Popen(cmdline, shell=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
**kw)
for line in process.stdout:
yield trim(decode(line))
process.wait()
if returncode is not None and returncode != process.returncode:
raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmdline)
@pipe.func
def walk(prev, inital_path, *args, **kw):
"""This pipe wrap os.walk and yield absolute path one by one.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param args: The end-of-line symbol for each output.
:type args: list of string.
:param kw: The end-of-line symbol for each output.
:type kw: dictionary of options. Add 'endl' in kw to specify end-of-line symbol.
:returns: generator
"""
for dir_path, dir_names, filenames in os.walk(inital_path):
for filename in filenames:
yield os.path.join(dir_path, filename)
#: alias of str.upper
upper = pipe.map(lambda s, *args, **kw: s.upper(*args, **kw))
#: alias of str.lower
lower = pipe.map(lambda s, *args, **kw: s.lower(*args, **kw))
#: alias of str.capwords
capwords = pipe.map(lambda s, *args, **kw: s.capwords(*args, **kw))
#: alias of str.capitalize
capitalize = pipe.map(lambda s, *args, **kw: s.capitalize(*args, **kw))
#: alias of str.lstrip
lstrip = pipe.map(lambda s, *args, **kw: s.lstrip(*args, **kw))
#: alias of str.rstrip
rstrip = pipe.map(lambda s, *args, **kw: s.rstrip(*args, **kw))
#: alias of str.strip
strip = pipe.map(lambda s, *args, **kw: s.strip(*args, **kw))
#: alias of str.expandtabs
expandtabs = pipe.map(lambda s, *args, **kw: s.expandtabs(*args, **kw))
#: alias of str.strip
strip = pipe.map(lambda s, *args, **kw: s.strip(*args, **kw))
#: alias of str.find
find = pipe.map(lambda s, *args, **kw: s.find(*args, **kw))
#: alias of str.rfind
rfind = pipe.map(lambda s, *args, **kw: s.rfind(*args, **kw))
#: alias of str.count
count = pipe.map(lambda s, *args, **kw: s.count(*args, **kw))
#: alias of str.split
split = pipe.map(lambda s, *args, **kw: s.split(*args, **kw))
#: alias of str.rsplit
rsplit = pipe.map(lambda s, *args, **kw: s.rsplit(*args, **kw))
#: alias of str.swapcase
swapcase = pipe.map(lambda s, *args, **kw: s.swapcase(*args, **kw))
#: alias of str.translate
translate = pipe.map(lambda s, *args, **kw: s.translate(*args, **kw))
#: alias of str.ljust
ljust = pipe.map(lambda s, *args, **kw: s.ljust(*args, **kw))
#: alias of str.rjust
rjust = pipe.map(lambda s, *args, **kw: s.rjust(*args, **kw))
#: alias of str.center
center = pipe.map(lambda s, *args, **kw: s.center(*args, **kw))
#: alias of str.zfill
zfill = pipe.map(lambda s, *args, **kw: s.zfill(*args, **kw))
#: alias of str.replace
replace = pipe.map(lambda s, *args, **kw: s.replace(*args, **kw))
@pipe.func
def join(prev, sep, *args, **kw):
'''alias of str.join'''
yield sep.join(prev, *args, **kw)
@pipe.func
def substitute(prev, *args, **kw):
'''alias of string.Template.substitute'''
template_obj = string.Template(*args, **kw)
for data in prev:
yield template_obj.substitute(data)
@pipe.func
def safe_substitute(prev, *args, **kw):
'''alias of string.Template.safe_substitute'''
template_obj = string.Template(*args, **kw)
for data in prev:
yield template_obj.safe_substitute(data)
@pipe.func
def to_str(prev, encoding=None):
"""Convert data from previous pipe with specified encoding."""
first = next(prev)
if isinstance(first, str):
if encoding is None:
yield first
for s in prev:
yield s
else:
yield first.encode(encoding)
for s in prev:
yield s.encode(encoding)
else:
if encoding is None:
encoding = sys.stdout.encoding or 'utf-8'
yield first.decode(encoding)
for s in prev:
yield s.decode(encoding)
def register_default_types():
"""Regiser all default type-to-pipe convertors."""
register_type(type, pipe.map)
register_type(types.FunctionType, pipe.map)
register_type(types.MethodType, pipe.map)
register_type(tuple, seq)
register_type(list, seq)
register_type(types.GeneratorType, seq)
register_type(string_type, sh)
register_type(unicode_type, sh)
register_type(file_type, fileobj)
if PY3:
register_type(range, seq)
register_type(map, seq)
register_default_types()
| |
"""
Autopsy Forensic Browser
Copyright 2019-2021 Basis Technology Corp.
Contact: carrier <at> sleuthkit <dot> org
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from java.io import File
from java.lang import Class
from java.lang import ClassNotFoundException
from java.lang import Long
from java.lang import String
from java.sql import ResultSet
from java.sql import SQLException
from java.sql import Statement
from java.util.logging import Level
from java.util import ArrayList
from org.apache.commons.codec.binary import Base64
from org.sleuthkit.autopsy.casemodule import Case
from org.sleuthkit.autopsy.casemodule import NoCurrentCaseException
from org.sleuthkit.autopsy.coreutils import Logger
from org.sleuthkit.autopsy.coreutils import MessageNotifyUtil
from org.sleuthkit.autopsy.coreutils import AppSQLiteDB
from org.sleuthkit.autopsy.datamodel import ContentUtils
from org.sleuthkit.autopsy.ingest import IngestJobContext
from org.sleuthkit.datamodel import AbstractFile
from org.sleuthkit.datamodel import BlackboardArtifact
from org.sleuthkit.datamodel import BlackboardAttribute
from org.sleuthkit.datamodel import Content
from org.sleuthkit.datamodel import TskCoreException
from org.sleuthkit.datamodel.Blackboard import BlackboardException
from org.sleuthkit.datamodel import Account
from org.sleuthkit.datamodel.blackboardutils.attributes import MessageAttachments
from org.sleuthkit.datamodel.blackboardutils.attributes.MessageAttachments import FileAttachment
from org.sleuthkit.datamodel.blackboardutils.attributes.MessageAttachments import URLAttachment
from org.sleuthkit.datamodel.blackboardutils import CommunicationArtifactsHelper
from org.sleuthkit.datamodel.blackboardutils.CommunicationArtifactsHelper import MessageReadStatus
from org.sleuthkit.datamodel.blackboardutils.CommunicationArtifactsHelper import CommunicationDirection
import json
import traceback
import general
class IMOAnalyzer(general.AndroidComponentAnalyzer):
"""
Finds the SQLite DB for IMO, parses the DB for contacts & messages,
and adds artifacts to the case.
IMO version 9.8.0 has the following database structure:
- accountdb.db
-- A 'account' table with the id/name of the IMO account of the owner - used as the self account
- imofriends.db - a database with contacts and messages
-- A friends table, with id and name of the friends
--- buid - application specific unique id
--- name of contact
-- A messages table which stores the message details
--- sender/receiver buid, timestamp, message_type (1: incoming, 0: outgoing), message_read...
--- 'imdata' column stores a json structure with all the message details, including attachments
---- attachment file path may be specified in local_path or original_path. Original path, if available is a better candidate.
---- For sent files, files seem to get uploaded to IMO Servers. There is no URL available in the imdata though.
"""
def __init__(self):
self._logger = Logger.getLogger(self.__class__.__name__)
self._PACKAGE_NAME = "com.imo.android.imous"
self._PARSER_NAME = "IMO Parser"
self._MESSAGE_TYPE = "IMO Message"
self._VERSION = "9.8.0"
def analyze(self, dataSource, fileManager, context):
selfAccountId = None
accountDbs = AppSQLiteDB.findAppDatabases(dataSource, "accountdb.db", True, self._PACKAGE_NAME)
for accountDb in accountDbs:
try:
accountResultSet = accountDb.runQuery("SELECT uid, name FROM account")
if accountResultSet:
# We can determine the IMO user ID of the device owner.
# Therefore we can create and use a app account and use that
# as a 'self' account instead of a Device account
if not selfAccountId:
selfAccountId = accountResultSet.getString("uid")
except SQLException as ex:
self._logger.log(Level.WARNING, "Error processing query result for account", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
finally:
accountDb.close()
friendsDbs = AppSQLiteDB.findAppDatabases(dataSource, "imofriends.db", True, self._PACKAGE_NAME)
for friendsDb in friendsDbs:
try:
current_case = Case.getCurrentCaseThrows()
if selfAccountId is not None:
friendsDBHelper = CommunicationArtifactsHelper(current_case.getSleuthkitCase(),
self._PARSER_NAME,
friendsDb.getDBFile(),
Account.Type.IMO, Account.Type.IMO, selfAccountId, context.getJobId())
else:
friendsDBHelper = CommunicationArtifactsHelper(current_case.getSleuthkitCase(),
self._PARSER_NAME,
friendsDb.getDBFile(),
Account.Type.IMO, context.getJobId())
contactsResultSet = friendsDb.runQuery("SELECT buid, name FROM friends")
if contactsResultSet is not None:
while contactsResultSet.next():
contactId = contactsResultSet.getString("buid")
## add a TSK_ID attribute with contact's IMO Id
additionalAttributes = ArrayList()
additionalAttributes.add(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_ID, self._PARSER_NAME, contactId))
friendsDBHelper.addContact( contactsResultSet.getString("name"), ## contact name
"", ## phone
"", ## home phone
"", ## mobile
"", ## email
additionalAttributes)
queryString = """
SELECT messages.buid AS buid, imdata, last_message, timestamp, message_type, message_read, name
FROM messages
INNER JOIN friends ON friends.buid = messages.buid
"""
messagesResultSet = friendsDb.runQuery(queryString)
if messagesResultSet is not None:
while messagesResultSet.next():
direction = ""
fromId = None
toId = None
name = messagesResultSet.getString("name")
uniqueId = messagesResultSet.getString("buid")
if (messagesResultSet.getInt("message_type") == 1):
direction = CommunicationDirection.INCOMING
fromId = uniqueId
else:
direction = CommunicationDirection.OUTGOING
toId = uniqueId
message_read = messagesResultSet.getInt("message_read")
if (message_read == 1):
msgReadStatus = MessageReadStatus.READ
elif (message_read == 0):
msgReadStatus = MessageReadStatus.UNREAD
else:
msgReadStatus = MessageReadStatus.UNKNOWN
timeStamp = messagesResultSet.getLong("timestamp") / 1000000000
msgBody = messagesResultSet.getString("last_message")
messageArtifact = friendsDBHelper.addMessage(
self._MESSAGE_TYPE,
direction,
fromId,
toId,
timeStamp,
msgReadStatus,
"", # subject
msgBody,
"") # thread id
# Parse the imdata JSON structure to check if there is an attachment.
# If one exists, create an attachment and add to the message.
fileAttachments = ArrayList()
urlAttachments = ArrayList()
imdataJsonStr = messagesResultSet.getString("imdata")
if imdataJsonStr is not None:
imdata_dict = json.loads(imdataJsonStr)
# set to none if the key doesn't exist in the dict
attachmentOriginalPath = imdata_dict.get('original_path', None)
attachmentLocalPath = imdata_dict.get('local_path', None)
if attachmentOriginalPath:
attachmentPath = attachmentOriginalPath
else:
attachmentPath = attachmentLocalPath
if attachmentPath:
# Create a file attachment with given path
fileAttachment = FileAttachment(current_case.getSleuthkitCase(), friendsDb.getDBFile().getDataSource(), attachmentPath)
fileAttachments.add(fileAttachment)
msgAttachments = MessageAttachments(fileAttachments, [])
attachmentArtifact = friendsDBHelper.addAttachments(messageArtifact, msgAttachments)
except SQLException as ex:
self._logger.log(Level.WARNING, "Error processing query result for IMO friends", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
except TskCoreException as ex:
self._logger.log(Level.SEVERE, "Failed to add IMO message artifacts.", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
except BlackboardException as ex:
self._logger.log(Level.WARNING, "Failed to post artifacts.", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
except NoCurrentCaseException as ex:
self._logger.log(Level.WARNING, "No case currently open.", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
finally:
friendsDb.close()
| |
# Copyright 2010 University Of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
import errno
import signal
import socket
import time
import urllib2
import hashlib
from threading import Lock, Thread, Event
from Queue import Queue
from optparse import OptionParser
from xmlrpclib import ServerProxy
import random
from mule import config, log, util, rls, server
from mule import bdb as db
BLOCK_SIZE = int(os.getenv("MULE_BLOCK_SIZE", 64*1024))
DEFAULT_DIR = os.getenv("MULE_CACHE_DIR", "/tmp/mule")
DEFAULT_RLS = os.getenv("MULE_RLS")
CACHE_PORT = 3881
def connect(host='localhost',port=CACHE_PORT):
"""
Connect to the cache server running at host:port
"""
uri = "http://%s:%s" % (host, port)
return ServerProxy(uri, allow_none=True)
def num_cpus():
# Python 2.6+
try:
import multiprocessing
return multiprocessing.cpu_count()
except (ImportError,NotImplementedError):
pass
# POSIX
try:
res = int(os.sysconf('SC_NPROCESSORS_ONLN'))
if res > 0: return res
except (AttributeError,ValueError):
pass
return 1
def fqdn():
"""
Get the fully-qualified domain name of this host
"""
hostname = socket.gethostname()
return socket.getfqdn(hostname)
def copy(src, dest):
"""
Copy file src to file dest
"""
f = None
g = None
try:
f = open(src,"rb")
g = open(dest,"wb")
copyobj(f,g)
finally:
if f: f.close()
if g: g.close()
def copyobj(src, dest):
"""
Copy file-like object src to file-like object dest
"""
while 1:
buf = src.read(BLOCK_SIZE)
if not buf: break
dest.write(buf)
def download(url, path):
"""
Download url and store it at path
"""
f = None
g = None
try:
f = urllib2.urlopen(url)
g = open(path, 'wb')
copyobj(f, g)
finally:
if f: f.close()
if g: g.close()
def ensure_path(path):
"""
Create path if it doesn't exist
"""
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError, e:
if e.errno != errno.EEXIST:
raise
class Statistic(object):
def __init__(self, value=0):
self.lock = Lock()
self._value = value
def increment(self, i=1):
self.lock.acquire()
try:
self._value += i
finally:
self.lock.release()
def value(self):
return self._value
class Statistics(object):
def __init__(self):
self.since = time.ctime()
self.gets = Statistic()
self.puts = Statistic()
self.hits = Statistic()
self.misses = Statistic()
self.near_misses = Statistic()
self.failures = Statistic()
self.duplicates = Statistic()
def get_map(self):
return {
'since': self.since,
'gets': self.gets.value(),
'puts': self.puts.value(),
'hits': self.hits.value(),
'misses': self.misses.value(),
'near_misses': self.near_misses.value(),
'failures': self.failures.value(),
'duplicates': self.duplicates.value()
}
class DownloadRequest(object):
def __init__(self, lfn, pfns):
self.event = Event()
self.lfn = lfn
self.pfns = pfns
self.exception = None
class DownloadThread(Thread):
num = 1
def __init__(self, cache):
Thread.__init__(self)
self.log = log.get_log("downloader %d" % DownloadThread.num)
DownloadThread.num += 1
self.setDaemon(True)
self.cache = cache
def run(self):
while True:
req = self.cache.queue.get()
try:
self.cache.fetch(req.lfn, req.pfns)
self.cache.db.update(req.lfn, 'ready')
except Exception, e:
req.exception = e
self.cache.db.update(req.lfn, 'failed')
finally:
req.event.set()
class CacheHandler(server.MuleRequestHandler):
def do_GET(self):
head, uuid = os.path.split(self.path)
path = self.server.cache.get_cfn(uuid)
f = None
try:
f = open(path, 'rb')
fs = os.fstat(f.fileno())
self.send_response(200)
self.send_header("Content-type", "application/octet-stream")
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified",
self.date_time_string(fs.st_mtime))
self.end_headers()
copyobj(f, self.wfile)
except IOError:
self.send_error(404, "File not found")
finally:
if f: f.close()
class Cache(object):
def __init__(self, rls_host, cache_dir, threads, hostname=fqdn()):
self.log = log.get_log("cache")
self.rls_host = rls_host
self.cache_dir = cache_dir
self.hostname = hostname
self.st = Statistics()
self.server = server.MuleServer('', CACHE_PORT,
requestHandler=CacheHandler)
self.server.cache = self
self.lock = Lock()
self.queue = Queue()
for i in range(0, threads):
t = DownloadThread(self)
t.start()
def stop(self, signum=None, frame=None):
self.log.info("Stopping cache...")
self.db.close()
sys.exit(0)
def run(self):
try:
self.log.info("Starting cache...")
self.db = db.CacheDatabase()
signal.signal(signal.SIGTERM, self.stop)
self.server.register_function(self.get)
self.server.register_function(self.multiget)
self.server.register_function(self.put)
self.server.register_function(self.multiput)
self.server.register_function(self.remove)
self.server.register_function(self.list)
self.server.register_function(self.rls_delete)
self.server.register_function(self.rls_add)
self.server.register_function(self.rls_lookup)
self.server.register_function(self.get_bloom_filter)
self.server.register_function(self.stats)
self.server.register_function(self.rls_clear)
self.server.register_function(self.clear)
self.server.serve_forever()
except KeyboardInterrupt:
self.stop()
def get_uuid(self, lfn):
"""
Generate a unique ID for lfn
"""
return hashlib.sha1(lfn).hexdigest()
def get_cfn(self, uuid):
"""
Generate a path for a given uuid in the cache
"""
l1 = uuid[0:2]
l2 = uuid[2:4]
return os.path.join(self.cache_dir, l1, l2, uuid)
def get_pfn(self, uuid):
"""
Get a pfn for the given uuid
"""
return "http://%s:%s/%s" % (self.hostname, CACHE_PORT, uuid)
def get(self, lfn, path, symlink=True):
"""
Get lfn and store it at path
"""
self.log.debug("get %s %s" % (lfn, path))
self.multiget([[lfn, path]], symlink)
def multiget(self, pairs, symlink=True):
"""
For each [lfn, path] pair get lfn and store at path
"""
created = []
ready = []
unready = []
for lfn, path in pairs:
self.st.gets.increment()
rec = self.db.get(lfn)
if rec is None:
self.lock.acquire()
try:
rec = self.db.get(lfn)
if rec is None:
self.db.put(lfn)
created.append((lfn,path))
self.st.misses.increment()
else:
unready.append((lfn,path))
self.st.near_misses.increment()
finally:
self.lock.release()
elif rec['status'] == 'ready':
ready.append((lfn,path))
self.st.hits.increment()
elif rec['status'] == 'unready':
unready.append((lfn,path))
self.st.near_misses.increment()
elif rec['status'] == 'failed':
self.st.failures.increment()
raise Exception("Unable to get %s: failed" % lfn)
else:
raise Exception("Unrecognized status: %s" % rec['status'])
conn = rls.connect(self.rls_host)
if len(created) > 0:
requests = []
mappings = conn.multilookup([i[0] for i in created])
for lfn, path in created:
req = DownloadRequest(lfn, mappings[lfn])
self.queue.put(req)
requests.append(req)
for lfn, path in ready:
self.get_cached(lfn, path, symlink)
if len(created) > 0:
mappings = []
for req in requests:
req.event.wait()
if req.exception is None:
uuid = self.get_uuid(req.lfn)
pfn = self.get_pfn(uuid)
mappings.append([req.lfn, pfn])
if len(mappings) > 0:
conn.multiadd(mappings)
for req in requests:
if req.exception:
raise req.exception
for lfn, path in created:
unready.append((lfn, path))
while len(unready) > 0:
u = unready[:]
unready = []
for lfn, path in u:
rec = self.db.get(lfn)
if rec is None:
raise Exception("Record disappeared for %s" % lfn)
elif rec['status'] == 'ready':
self.get_cached(lfn, path, symlink)
elif rec['status'] == 'failed':
self.st.failures.increment()
raise Exception("Unable to get %s: failed" % lfn)
else:
unready.append((lfn, path))
if len(unready) > 0:
time.sleep(5)
def get_cached(self, lfn, path, symlink=True):
uuid = self.get_uuid(lfn)
cfn = self.get_cfn(uuid)
if not os.path.exists(cfn):
raise Exception("%s was not found in cache" % (lfn))
# This is to support nested directories inside working dirs
ensure_path(os.path.dirname(path))
if symlink:
os.symlink(cfn, path)
else:
copy(cfn, path)
def fetch(self, lfn, pfns):
# Add some randomness so not all files are fetched
# from the same server.
random.shuffle(pfns)
# Also try the lfn if it is a URL
for protocol in ['http://','https://','ftp://']:
if lfn.startswith(protocol):
pfns.append(lfn)
# Try and prefer lfns that are 'file://' URLs
if lfn.startswith('file://'):
pfns.insert(0,lfn)
if len(pfns) == 0:
raise Exception("%s not found in RLS" % lfn)
# Create new name
uuid = self.get_uuid(lfn)
cfn = self.get_cfn(uuid)
if os.path.exists(cfn):
self.log.warning("Duplicate uuid detected: %s" % uuid)
# Create dir if needed
d = os.path.dirname(cfn)
ensure_path(d)
# Download the file
success = False
for p in pfns:
try:
download(p, cfn)
success = True
break
except Exception, e:
self.log.exception(e)
if not success:
raise Exception('Unable to get %s: all pfns failed' % lfn)
def put(self, path, lfn, smart_move=True):
"""
Put path into cache as lfn
"""
self.log.debug("put %s %s" % (path, lfn))
self.multiput([[path, lfn]], smart_move)
def multiput(self, pairs, smart_move=True):
"""
For all [path, lfn] pairs put path into the cache as lfn
"""
# Make sure the files exist
for path, lfn in pairs:
if not os.path.exists(path):
raise Exception("%s does not exist", path)
# Add them to the cache
mappings = []
for path, lfn in pairs:
self.st.puts.increment()
# If its already in cache, then skip it
if self.db.get(lfn) is not None:
self.log.warning("%s already cached" % lfn)
self.st.duplicates.increment()
continue
# Create new names
uuid = self.get_uuid(lfn)
cfn = self.get_cfn(uuid)
pfn = self.get_pfn(uuid)
if os.path.exists(cfn):
self.log.warning("Possible duplicate uuid detected: %s" % uuid)
# Create dir if needed
d = os.path.dirname(cfn)
ensure_path(d)
# Create an entry in the cache db
self.db.put(lfn)
# Move path to cache
if smart_move:
try:
os.rename(path, cfn)
os.symlink(cfn, path)
except OSError:
#Looks like we can't rename, probably because the files are on different volumes
self.log.warning("Simple rename failed, falling back to copy")
copy(path, cfn)
else:
copy(path, cfn)
# Update the cache db
self.db.update(lfn, 'ready')
mappings.append([lfn, pfn])
# Register lfn->pfn mappings
conn = rls.connect(self.rls_host)
conn.multiadd(mappings)
def remove(self, lfn, force=False):
"""
Remove lfn from cache
"""
self.log.debug("remove %s" % lfn)
rec = self.db.get(lfn)
if rec is None:
return
if not force and rec['status'] != 'ready':
raise Exception('Cannot remove %s' % lfn)
# Remove from database
self.db.remove(lfn)
if rec['status'] == 'ready':
uuid = self.get_uuid(lfn)
# Remove RLS mapping
pfn = self.get_pfn(uuid)
conn = rls.connect(self.rls_host)
conn.delete(lfn, pfn)
# Remove cached copy
cfn = self.get_cfn(uuid)
if os.path.isfile(cfn):
os.unlink(cfn)
def list(self):
"""
List all cached files
"""
self.log.debug("list")
return self.db.list()
def rls_delete(self, lfn, pfn=None):
"""
Delete lfn->pfn mapping
"""
self.log.debug("delete %s %s" % (lfn, pfn))
conn = rls.connect(self.rls_host)
conn.delete(lfn, pfn)
def rls_add(self, lfn, pfn):
"""
Add lfn->pfn mapping to rls
"""
self.log.debug("add %s %s" % (lfn, pfn))
conn = rls.connect(self.rls_host)
conn.add(lfn, pfn)
def rls_lookup(self, lfn):
"""
Lookup RLS mappings for lfn
"""
self.log.debug("lookup %s" % lfn)
conn = rls.connect(self.rls_host)
return conn.lookup(lfn)
def get_bloom_filter(self, m, k):
"""
Return a bloom filter containing all the lfns in the cache
"""
return self.db.get_bloom_filter(m, k).tobase64()
def stats(self):
"""
Return the statistics for this cache
"""
return self.st.get_map()
def clear(self):
# Clear database
self.db.clear()
# Remove files in cache
def remove_all(directory):
for i in os.listdir(directory):
path = os.path.join(directory, i)
if os.path.isdir(path):
remove_all(path)
else:
os.unlink(path)
remove_all(self.cache_dir)
# Clear stats
self.st = Statistics()
def rls_clear(self):
self.log.debug("rls clear")
conn = rls.connect(self.rls_host)
conn.clear()
def main():
parser = OptionParser()
parser.add_option("-f", "--foreground", action="store_true",
dest="foreground", default=False,
help="Do not fork [default: fork]")
parser.add_option("-r", "--rls", action="store", dest="rls",
default=DEFAULT_RLS, metavar="HOST",
help="RLS host [default: %default]")
parser.add_option("-d", "--dir", action="store", dest="cache_dir",
default=DEFAULT_DIR, metavar="DIR",
help="Cache directory [default: %default]")
parser.add_option("-t", "--threads", action="store", dest="threads",
default=num_cpus(), metavar="N",
help="Number of download threads [default: %default]")
(options, args) = parser.parse_args()
if len(args) > 0:
parser.error("Invalid argument")
if not options.rls:
parser.error("Specify --rls or MULE_RLS environment")
if os.path.isfile(options.cache_dir):
parser.error("--directory argument is a file")
if not os.path.isdir(options.cache_dir):
os.makedirs(options.cache_dir)
# See if RLS is ready
try:
conn = rls.connect(options.rls)
conn.ready()
except Exception, e:
print "WARNING: RLS is not ready"
# Fork
if not options.foreground:
util.daemonize()
os.chdir(config.get_home())
# Configure logging (after the fork)
log.configure()
l = log.get_log("cache")
try:
a = Cache(options.rls, options.cache_dir, options.threads)
a.run()
except Exception, e:
l.exception(e)
sys.exit(1)
if __name__ == '__main__':
main()
| |
from numpy import array, compress, zeros
import wx
from wx.lib.mixins.listctrl import ListCtrlAutoWidthMixin
from spacq.interface.list_columns import ListParser
"""
Embeddable, generic, virtual, tabular display.
"""
class VirtualListCtrl(wx.ListCtrl, ListCtrlAutoWidthMixin):
"""
A generic virtual list.
"""
max_value_len = 250 # Characters.
@staticmethod
def find_type(value):
"""
Determine the type of a column based on a single value.
The type is one of: scalar, list, string.
"""
try:
float(value)
except ValueError:
pass
else:
return 'scalar'
try:
ListParser()(value)
except ValueError:
pass
else:
return 'list'
return 'string'
def __init__(self, parent, *args, **kwargs):
wx.ListCtrl.__init__(self, parent,
style=wx.LC_REPORT|wx.LC_VIRTUAL|wx.LC_HRULES|wx.LC_VRULES,
*args, **kwargs)
ListCtrlAutoWidthMixin.__init__(self)
self.reset()
def reset(self):
self.headings = []
self.data = array([])
self.filtered_data = None
self.display_data = array([])
self.types = []
def refresh_with_values(self, data):
self.ItemCount = len(data)
if self.ItemCount > 0:
self.display_data = zeros(data.shape, dtype='|S{0}'.format(self.max_value_len))
for i, _ in enumerate(self.headings):
# Truncate for display.
self.display_data[:,i] = [x[:self.max_value_len] for x in data[:,i]]
self.Refresh()
def apply_filter(self, f, afresh=False):
"""
Set the data to be the old data, along with the application of a filter.
f is a function of two parameters: the index of the row and the row itself.
f must return True if the row is to be kept and False otherwise.
If afresh is True, all old filtered data is discarded.
Otherwise, a new filter can be quickly applied.
"""
if afresh:
self.filtered_data = None
if self.filtered_data is not None:
original_set = self.filtered_data
else:
original_set = self.data
self.filtered_data = compress([f(i, x) for i, x in enumerate(original_set)], original_set, axis=0)
self.refresh_with_values(self.filtered_data)
def GetValue(self, types=None):
# Get all types by default.
if types is None:
types = set(self.types)
else:
types = set(types)
# Find column indices of the correct type.
idxs = [i for i, t in enumerate(self.types) if t in types]
if self.filtered_data is not None:
data = self.filtered_data
else:
data = self.data
return ([self.headings[i] for i in idxs], data[:,idxs], [self.types[i] for i in idxs])
def SetValue(self, headings, data):
"""
headings: A list of strings.
data: A 2D NumPy array.
"""
self.ClearAll()
self.reset()
self.headings = headings
self.data = data
self.refresh_with_values(self.data)
if self.ItemCount > 0:
width, height = self.GetSize()
# Give some room for the scrollbar.
col_width = (width - 50) / len(self.headings)
for i, heading in enumerate(self.headings):
self.InsertColumn(i, heading, width=col_width)
type = self.find_type(data[0,i])
self.types.append(type)
def OnGetItemText(self, item, col):
"""
Return cell value for LC_VIRTUAL.
"""
return self.display_data[item,col]
class TabularDisplayPanel(wx.Panel):
"""
A panel to display arbitrary tabular data.
"""
def __init__(self, parent, *args, **kwargs):
wx.Panel.__init__(self, parent, *args, **kwargs)
# Panel.
panel_box = wx.BoxSizer(wx.VERTICAL)
## Table.
self.table = VirtualListCtrl(self)
panel_box.Add(self.table, proportion=1, flag=wx.EXPAND)
self.SetSizer(panel_box)
def __len__(self):
return self.table.ItemCount
def from_csv_data(self, has_header, values):
"""
Import the given CSV data into the table.
If has_header is True, the first row is treated specially.
"""
if has_header:
headers, rows = values[0], array(values[1:])
else:
headers, rows = [''] * len(values[0]), array(values)
# Ensure that all columns have a header.
for i, header in enumerate(headers):
if not header:
headers[i] = 'Column {0}'.format(i + 1)
self.SetValue(headers, rows)
def GetValue(self, *args, **kwargs):
return self.table.GetValue(*args, **kwargs)
def SetValue(self, headings, values):
self.table.SetValue(headings, values)
class TabularDisplayFrame(wx.Frame):
def __init__(self, parent, *args, **kwargs):
wx.Frame.__init__(self, parent, *args, **kwargs)
# Frame.
frame_box = wx.BoxSizer(wx.VERTICAL)
## Display panel.
self.display_panel = TabularDisplayPanel(self)
frame_box.Add(self.display_panel, proportion=1, flag=wx.EXPAND)
self.SetSizer(frame_box)
| |
#!/usr/bin/env python
"""
Sign a user's SSH public key.
Copyright 2017-2022 Nicolas BEGUIER
Licensed under the Apache License, Version 2.0
Written by Nicolas BEGUIER (nicolas_beguier@hotmail.com)
"""
# pylint: disable=invalid-name,too-many-return-statements,no-self-use,too-many-locals
# pylint: disable=too-many-branches,too-few-public-methods,too-many-statements
# pylint: disable=too-many-nested-blocks,arguments-differ,W1113
from json import dumps
from os import remove
from tempfile import NamedTemporaryFile
from urllib.parse import unquote_plus
# Third party library imports
from cheroot.server import HTTPServer
from cheroot.ssl.builtin import BuiltinSSLAdapter
import web
# Own library
from ssh_utils import get_fingerprint
import lib.constants as constants
import lib.tools as tools
# DEBUG
# from pdb import set_trace as st
VERSION = '2.3.1'
SERVER_OPTS, ARGS, TOOLS = tools.loadconfig(version=VERSION)
class Admin():
"""
Class admin to action or revoke keys.
"""
def POST(self, username):
"""
Revoke or Active keys.
/admin/<username>
revoke=true/false => Revoke user
status=true/false => Display status
"""
# LDAP authentication
is_admin_auth, message = tools.ldap_authentification(SERVER_OPTS, admin=True)
if not is_admin_auth:
return tools.response_render(message, http_code='401 Unauthorized')
payload, message = tools.data2map()
if message:
return tools.response_render(message, http_code='400 Bad Request')
if 'revoke' in payload:
do_revoke = payload['revoke'].lower() == 'true'
else:
do_revoke = False
if 'status' in payload:
do_status = payload['status'].lower() == 'true'
else:
do_status = False
pg_conn, message = TOOLS.pg_connection()
if pg_conn is None:
return tools.response_render(message, http_code='503 Service Unavailable')
cur = pg_conn.cursor()
if username == 'all' and do_status:
return tools.response_render(
TOOLS.list_keys(),
content_type='application/json')
# Search if key already exists
cur.execute(
"""
SELECT STATE FROM USERS WHERE NAME=(%s)
""", (username,))
user_state = cur.fetchone()
# If user dont exist
if user_state is None:
cur.close()
pg_conn.close()
message = 'User does not exists.'
elif do_revoke:
cur.execute(
"""
UPDATE USERS SET STATE=1 WHERE NAME=(%s)
""", (username,))
pg_conn.commit()
pubkey = tools.get_pubkey(username, pg_conn)
cur.execute(
"""
SELECT 1 FROM REVOCATION WHERE SSH_KEY=(%s)
""", (pubkey,))
if cur.fetchone() is None:
cur.execute(
"""
INSERT INTO REVOCATION VALUES ((%s), (%s), (%s))
""", (pubkey, tools.timestamp(), username))
pg_conn.commit()
message = 'Revoke user={}.'.format(username)
else:
message = 'user {} already revoked.'.format(username)
cur.close()
pg_conn.close()
# Display status
elif do_status:
return tools.response_render(
TOOLS.list_keys(username=username),
content_type='application/json')
# If user is in PENDING state
elif user_state[0] == constants.STATES['PENDING']:
cur.execute(
"""
UPDATE USERS SET STATE=0 WHERE NAME=(%s)
""", (username,))
pg_conn.commit()
cur.close()
pg_conn.close()
message = 'Active user=%s. SSH Key active but need to be signed.' % username
# If user is in REVOKED state
elif user_state[0] == constants.STATES['REVOKED']:
cur.execute('UPDATE USERS SET STATE=0 WHERE NAME=(%s)', (username,))
pg_conn.commit()
cur.close()
pg_conn.close()
message = 'Active user=%s. SSH Key active but need to be signed.' % username
else:
cur.close()
pg_conn.close()
message = 'user=%s already active. Nothing done.' % username
return tools.response_render(message)
def PATCH(self, username):
"""
Set the first founded value.
/admin/<username>
key=value => Set the key value. Keys are in status output.
"""
# LDAP authentication
is_admin_auth, message = tools.ldap_authentification(SERVER_OPTS, admin=True)
if not is_admin_auth:
return tools.response_render(message, http_code='401 Unauthorized')
pg_conn, message = TOOLS.pg_connection()
if pg_conn is None:
return tools.response_render(message, http_code='503 Service Unavailable')
cur = pg_conn.cursor()
payload, message = tools.data2map()
if message:
return tools.response_render(message, http_code='400 Bad Request')
for key, value in payload.items():
if key == 'expiry':
cur.execute(
"""
UPDATE USERS SET EXPIRY=(%s) WHERE NAME=(%s)
""", (value, username))
pg_conn.commit()
cur.close()
pg_conn.close()
return tools.response_render(
'OK: %s=%s for %s' % (key, value, username))
return tools.response_render('WARNING: No key found...')
def DELETE(self, username):
"""
Delete keys (but DOESN'T REVOKE)
/admin/<username>
"""
# LDAP authentication
is_admin_auth, message = tools.ldap_authentification(SERVER_OPTS, admin=True)
if not is_admin_auth:
return tools.response_render(message, http_code='401 Unauthorized')
pg_conn, message = TOOLS.pg_connection()
if pg_conn is None:
return tools.response_render(message, http_code='503 Service Unavailable')
cur = pg_conn.cursor()
# Search if key already exists
cur.execute(
"""
DELETE FROM USERS WHERE NAME=(%s)
""", (username,))
pg_conn.commit()
cur.close()
pg_conn.close()
return tools.response_render('OK')
class Ca():
"""
Class CA.
"""
def GET(self):
"""
Return ca.
"""
return tools.response_render(
open(SERVER_OPTS['ca'] + '.pub', 'rb'),
content_type='application/octet-stream')
class ClientStatus():
"""
ClientStatus main class.
"""
def POST(self):
"""
Get client key status.
/client/status
"""
# LDAP authentication
is_auth, message = tools.ldap_authentification(SERVER_OPTS)
if not is_auth:
return tools.response_render(message, http_code='401 Unauthorized')
payload, message = tools.data2map()
if message:
return tools.response_render(message, http_code='400 Bad Request')
if 'realname' in payload:
realname = unquote_plus(payload['realname'])
else:
return tools.response_render(
'Error: No realname option given.',
http_code='400 Bad Request')
return tools.response_render(
TOOLS.list_keys(realname=realname),
content_type='application/json')
class Client():
"""
Client main class.
"""
def POST(self):
"""
Ask to sign pub key.
/client
username=xxxxxx => Unique username. Used by default to connect on server.
realname=xxxxx@domain.fr => This LDAP/AD user.
# Optionnal
admin_force=true|false
"""
# LDAP authentication
is_auth, message = tools.ldap_authentification(SERVER_OPTS)
if not is_auth:
return tools.response_render(message, http_code='401 Unauthorized')
# Check if user is an admin and want to force signature when db fail
force_sign = False
# LDAP ADMIN authentication
is_admin_auth, message = tools.ldap_authentification(
SERVER_OPTS, admin=True)
payload, message = tools.data2map()
if message:
return tools.response_render(message, http_code='400 Bad Request')
if is_admin_auth and SERVER_OPTS['admin_db_failover'] \
and 'admin_force' in payload and payload['admin_force'].lower() == 'true':
force_sign = True
# Get username
if 'username' in payload:
username = payload['username']
else:
return tools.response_render(
'Error: No username option given.',
http_code='400 Bad Request')
if username == 'all':
return tools.response_render(
"Error: username not valid.",
http_code='400 Bad Request')
# Get realname
if 'realname' in payload:
realname = unquote_plus(payload['realname'])
else:
return tools.response_render(
'Error: No realname option given.',
http_code='400 Bad Request')
# Get public key
if 'pubkey' in payload:
pubkey = tools.unquote_custom(payload['pubkey'])
else:
return tools.response_render(
'Error: No pubkey given.',
http_code='400 Bad Request')
with NamedTemporaryFile(delete=False) as tmp_pubkey:
tmp_pubkey.write(bytes(pubkey, 'utf-8'))
pubkey_fingerprint = get_fingerprint(tmp_pubkey.name)
if pubkey_fingerprint == 'Unknown':
remove(tmp_pubkey.name)
return tools.response_render(
'Error : Public key unprocessable',
http_code='422 Unprocessable Entity')
pg_conn, message = TOOLS.pg_connection()
# Admin force signature case
if pg_conn is None and force_sign:
cert_contents = TOOLS.sign_key(tmp_pubkey.name, username, '+12h', username)
remove(tmp_pubkey.name)
return tools.response_render(cert_contents, content_type='application/octet-stream')
# Check if db is up
if pg_conn is None:
remove(tmp_pubkey.name)
return tools.response_render(message, http_code='503 Service Unavailable')
cur = pg_conn.cursor()
# Search if user already exists
cur.execute(
"""
SELECT NAME,REALNAME,STATE,EXPIRY,PRINCIPALS,SSH_KEY FROM USERS
WHERE NAME=lower(%s)
""", (username,))
user = cur.fetchone()
if user is None:
cur.close()
pg_conn.close()
remove(tmp_pubkey.name)
return tools.response_render(
'Error : User absent, please create an account.',
http_code='400 Bad Request')
# Get database key fingerprint
with NamedTemporaryFile(delete=False) as db_pubkey:
db_pubkey.write(bytes(user[5], 'utf-8'))
db_pubkey_fingerprint = get_fingerprint(db_pubkey.name)
remove(db_pubkey.name)
if db_pubkey_fingerprint == 'Unknown':
remove(tmp_pubkey.name)
return tools.response_render(
'Error : Public key from database unprocessable',
http_code='422 Unprocessable Entity')
if username != user[0] or \
realname != user[1] or \
db_pubkey_fingerprint != pubkey_fingerprint:
cur.close()
pg_conn.close()
remove(tmp_pubkey.name)
return tools.response_render(
'Error : (username, realname, pubkey) triple mismatch.',
http_code='401 Unauthorized')
status = user[2]
expiry = user[3]
custom_principals = tools.clean_principals_output(user[4], username, shell=True)
list_membership, _ = tools.get_memberof(
realname,
SERVER_OPTS)
full_principals = tools.merge_principals(custom_principals, list_membership, SERVER_OPTS)
if status > 0:
cur.close()
pg_conn.close()
remove(tmp_pubkey.name)
return tools.response_render("Status: %s" % constants.STATES[user[2]])
cert_contents = TOOLS.sign_key(
tmp_pubkey.name, username, expiry, full_principals, db_cursor=cur)
remove(tmp_pubkey.name)
pg_conn.commit()
cur.close()
pg_conn.close()
return tools.response_render(
cert_contents,
content_type='application/octet-stream')
def PUT(self):
"""
This function permit to add or update a ssh public key.
/client
username=xxxxxx => Unique username. Used by default to connect on server.
realname=xxxxx@domain.fr => This LDAP/AD user.
"""
# LDAP authentication
is_auth, message = tools.ldap_authentification(SERVER_OPTS)
if not is_auth:
return tools.response_render(message, http_code='401 Unauthorized')
payload, message = tools.data2map()
if message:
return tools.response_render(message, http_code='400 Bad Request')
if 'username' in payload:
username = payload['username']
else:
return tools.response_render(
'Error: No username option given.',
http_code='400 Bad Request')
if username == 'all':
return tools.response_render(
"Error: username not valid.",
http_code='400 Bad Request')
if 'realname' in payload:
realname = unquote_plus(payload['realname'])
else:
return tools.response_render(
'Error: No realname option given.',
http_code='400 Bad Request')
if constants.PATTERN_REALNAME.match(realname) is None:
return tools.response_render(
"Error: realname doesn't match pattern",
http_code='400 Bad Request')
# Get public key
if 'pubkey' in payload:
pubkey = tools.unquote_custom(payload['pubkey'])
else:
return tools.response_render(
'Error: No pubkey given.',
http_code='400 Bad Request')
with NamedTemporaryFile(delete=False) as tmp_pubkey:
tmp_pubkey.write(bytes(pubkey, 'utf-8'))
pubkey_fingerprint = get_fingerprint(tmp_pubkey.name)
if pubkey_fingerprint == 'Unknown':
remove(tmp_pubkey.name)
return tools.response_render(
'Error : Public key unprocessable',
http_code='422 Unprocessable Entity')
pg_conn, message = TOOLS.pg_connection()
if pg_conn is None:
remove(tmp_pubkey.name)
return tools.response_render(message, http_code='503 Service Unavailable')
cur = pg_conn.cursor()
# Search if key already exists
cur.execute(
"""
SELECT 1 FROM USERS WHERE NAME=(%s)
""", (username,))
user = cur.fetchone()
# CREATE NEW USER
if user is None:
cur.execute(
"""
INSERT INTO USERS VALUES ((%s), (%s), (%s), (%s), (%s), (%s), (%s), (%s))
""", (
username, realname, constants.STATES['PENDING'],
0, pubkey_fingerprint, pubkey, '+12h', username))
pg_conn.commit()
cur.close()
pg_conn.close()
remove(tmp_pubkey.name)
return tools.response_render(
'Create user=%s. Pending request.' % username,
http_code='201 Created')
# Check if realname is the same
cur.execute(
"""
SELECT 1 FROM USERS WHERE NAME=(%s) AND REALNAME=lower((%s))
""", (username, realname))
if cur.fetchone() is None:
pg_conn.commit()
cur.close()
pg_conn.close()
remove(tmp_pubkey.name)
return tools.response_render(
'Error : (username, realname) couple mismatch.',
http_code='401 Unauthorized')
# Update entry into database
cur.execute(
"""
UPDATE USERS
SET SSH_KEY=(%s),SSH_KEY_HASH=(%s), STATE=(%s), EXPIRATION=0
WHERE NAME=(%s)
""", (pubkey, pubkey_fingerprint, constants.STATES['PENDING'], username))
pg_conn.commit()
cur.close()
pg_conn.close()
remove(tmp_pubkey.name)
return tools.response_render('Update user=%s. Pending request.' % username)
class ClusterStatus():
"""
ClusterStatus main class.
"""
def GET(self):
"""
/cluster/status
"""
message = dict()
alive_nodes, dead_nodes = TOOLS.cluster_alived()
for node in alive_nodes:
message.update({node: {'status': 'OK'}})
for node in dead_nodes:
message.update({node: {'status': 'KO'}})
return tools.response_render(
dumps(message),
content_type='application/json')
class Health():
"""
Class Health
"""
def GET(self):
"""
Return a health check
"""
health = {}
health['name'] = 'cassh'
health['version'] = VERSION
return tools.response_render(
dumps(health, indent=4, sort_keys=True),
content_type='application/json')
class Krl():
"""
Class KRL.
"""
def GET(self):
"""
Return krl.
"""
return TOOLS.get_last_krl()
class Ping():
"""
Class Ping
"""
def GET(self):
"""
Return a pong
"""
return tools.response_render('pong')
class Principals():
"""
Class Principals
"""
def POST(self, username):
"""
Manage user principals
"""
# LDAP authentication
is_admin_auth, message = tools.ldap_authentification(SERVER_OPTS, admin=True)
if not is_admin_auth:
return tools.response_render(message, http_code='401 Unauthorized')
pg_conn, message = TOOLS.pg_connection()
if pg_conn is None:
return tools.response_render(message, http_code='503 Service Unavailable')
cur = pg_conn.cursor()
payload, message = tools.data2map()
if message:
return tools.response_render(message, http_code='400 Bad Request')
if 'add' not in payload and \
'remove' not in payload and \
'update' not in payload and \
'purge' not in payload:
return tools.response_render(
'[ERROR] Unknown action',
http_code='400 Bad Request')
# Search if username exists
values = {'username': username}
cur.execute(
"""
SELECT NAME,PRINCIPALS,REALNAME FROM USERS WHERE NAME=(%(username)s)
""", values)
user = cur.fetchone()
# If user dont exist
if user is None:
cur.close()
pg_conn.close()
return tools.response_render(
"ERROR: {} doesn't exist".format(username),
http_code='400 Bad Request')
values['principals'] = user[1]
for key, value in payload.items():
value = unquote_plus(value)
if key == 'add':
for principal in value.split(','):
if constants.PATTERN_PRINCIPALS.match(principal) is None:
return tools.response_render(
"Error: principal doesn't match pattern {}".format(
constants.PATTERN_PRINCIPALS.pattern),
http_code='400 Bad Request')
if values['principals']:
values['principals'] += ',' + value
else:
values['principals'] = value
elif key == 'remove':
principals = values['principals'].split(',')
for principal in value.split(','):
if constants.PATTERN_PRINCIPALS.match(principal) is None:
return tools.response_render(
"Error: principal doesn't match pattern {}".format(
constants.PATTERN_PRINCIPALS.pattern),
http_code='400 Bad Request')
if principal in principals:
principals.remove(principal)
values['principals'] = ','.join(principals)
elif key == 'update':
for principal in value.split(','):
if constants.PATTERN_PRINCIPALS.match(principal) is None:
return tools.response_render(
"Error: principal doesn't match pattern {}".format(
constants.PATTERN_PRINCIPALS.pattern),
http_code='400 Bad Request')
values['principals'] = value
elif key == 'purge':
values['principals'] = username
list_membership, _ = tools.get_memberof(
user[2],
SERVER_OPTS)
values['principals'] = tools.truncate_principals(
values['principals'],
list_membership,
SERVER_OPTS)
cur.execute(
"""
UPDATE USERS SET PRINCIPALS=(%(principals)s) WHERE NAME=(%(username)s)
""", values)
pg_conn.commit()
cur.close()
pg_conn.close()
# Add LDAP principals
values['principals'] = tools.merge_principals(
values['principals'],
list_membership,
SERVER_OPTS)
return tools.response_render(
"OK: {} principals are '{}'".format(username, values['principals']))
class PrincipalsSearch():
"""
Class Principals Search
"""
def POST(self):
"""
Search user's principals by filter
"""
# LDAP authentication
is_admin_auth, message = tools.ldap_authentification(SERVER_OPTS, admin=True)
if not is_admin_auth:
return tools.response_render(message, http_code='401 Unauthorized')
pg_conn, message = TOOLS.pg_connection()
if pg_conn is None:
return tools.response_render(message, http_code='503 Service Unavailable')
cur = pg_conn.cursor()
payload, message = tools.data2map()
if message:
return tools.response_render(message, http_code='400 Bad Request')
if 'filter' not in payload:
return tools.response_render(
'[ERROR] Unknown action',
http_code='400 Bad Request')
cur.execute(
"""
SELECT NAME,PRINCIPALS,REALNAME FROM USERS
""")
all_principals = cur.fetchall()
pg_conn.commit()
cur.close()
pg_conn.close()
if SERVER_OPTS['ldap']:
ldap_conn, _ = tools.get_ldap_conn(
SERVER_OPTS['ldap_host'],
SERVER_OPTS['ldap_username'],
SERVER_OPTS['ldap_password'],
SERVER_OPTS['ldap_protocol'])
result = dict()
for key, value in payload.items():
value = unquote_plus(value)
if key == 'filter' and value == '':
for name, custom_principals, realname in all_principals:
if not isinstance(custom_principals, str):
continue
list_membership, _ = tools.get_memberof(
realname,
SERVER_OPTS,
reuse=ldap_conn)
result[name] = tools.merge_principals(
custom_principals,
list_membership,
SERVER_OPTS).split(',')
elif key == 'filter':
for principal in value.split(','):
for name, custom_principals, realname in all_principals:
if not isinstance(custom_principals, str):
continue
list_membership, _ = tools.get_memberof(
realname,
SERVER_OPTS,
reuse=ldap_conn)
principals = tools.merge_principals(
custom_principals,
list_membership,
SERVER_OPTS).split(',')
if principal in principals:
if name not in result:
result[name] = list()
result[name].append(principal)
return tools.response_render(dumps(result))
class TestAuth():
"""
Test authentication
"""
def POST(self):
"""
Test authentication
"""
# LDAP authentication
is_auth, message = tools.ldap_authentification(SERVER_OPTS)
if not is_auth:
return tools.response_render(message, http_code='401 Unauthorized')
return tools.response_render('OK')
class MyApplication(web.application):
"""
Can change port or other stuff
"""
def run(self, port=int(SERVER_OPTS['port']), *middleware):
func = self.wsgifunc(*middleware)
return web.httpserver.runsimple(func, ('0.0.0.0', port))
if __name__ == "__main__":
if SERVER_OPTS['ssl']:
HTTPServer.ssl_adapter = BuiltinSSLAdapter(
certificate=SERVER_OPTS['ssl_public_key'],
private_key=SERVER_OPTS['ssl_private_key'])
if ARGS.verbose:
print('SSL: %s' % SERVER_OPTS['ssl'])
print('LDAP: %s' % SERVER_OPTS['ldap'])
print('Admin DB Failover: %s' % SERVER_OPTS['admin_db_failover'])
APP = MyApplication(constants.URLS, globals(), autoreload=False)
web.config.debug = SERVER_OPTS['debug']
if SERVER_OPTS['debug']:
print('Debug mode on')
APP.run()
| |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide a Bokeh Application Handler to build up documents by running
the code from ``main.py`` or ``main.ipynb`` files in specified directories.
The directory may also optionally contain:
* A ``server_lifecyle.py`` module to provide lifecycle callbacks for the
application and sessions.
* A ``static`` subdirectory containing app-specific static resources to
serve.
* A ``theme.yaml`` file containing a Bokeh theme to automatically apply to
all new documents.
* A ``templates`` subdirectory containing templates for app display
A full directory layout might look like:
.. code-block:: none
myapp
|
+---main.py
+---server_lifecycle.py
+---static
+---theme.yaml
+---templates
+---index.html
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import sys
from os.path import (
basename,
dirname,
exists,
join,
)
from types import ModuleType
from typing import (
TYPE_CHECKING,
Any,
Coroutine,
Dict,
List,
)
# External imports
from jinja2 import Environment, FileSystemLoader, Template
# Bokeh imports
from ...core.types import PathLike
from ...document import Document
from ..application import ServerContext, SessionContext
from .code_runner import CodeRunner
from .handler import Handler
from .notebook import NotebookHandler
from .script import ScriptHandler
from .server_lifecycle import ServerLifecycleHandler
from .server_request_handler import ServerRequestHandler
if TYPE_CHECKING:
from tornado.httputil import HTTPServerRequest
from ...themes import Theme
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'DirectoryHandler',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class DirectoryHandler(Handler):
''' Load an application directory which modifies a Document.
.. autoclasstoc::
'''
_package_runner: CodeRunner | None
_package: ModuleType | None
_lifecycle_handler: Handler
_request_handler: Handler
_theme: Theme | None
_static: str | None
_template: Template | None
def __init__(self, *, filename: PathLike, argv: List[str] = []) -> None:
'''
Keywords:
filename (str) : a path to an application directory with either "main.py" or "main.ipynb"
argv (list[str], optional) : a list of string arguments to make available as sys.argv to main.py
'''
super().__init__()
src_path = filename
init_py = join(src_path, '__init__.py')
if exists(init_py):
self._package_runner = CodeRunner(open(init_py).read(), init_py, argv)
self._package = self._package_runner.new_module()
assert self._package is not None
sys.modules[self._package.__name__] = self._package
else:
self._package_runner = None
self._package = None
main_py = join(src_path, 'main.py')
main_ipy = join(src_path, 'main.ipynb')
if exists(main_py) and exists(main_ipy):
log.warning(f"Found both 'main.py' and 'main.ipynb' in {src_path}, using 'main.py'")
main = main_py
elif exists(main_py):
main = main_py
elif exists(main_ipy):
main = main_ipy
else:
raise ValueError(f"No 'main.py' or 'main.ipynb' in {src_path}")
self._path = src_path
self._main = main
handler = NotebookHandler if main.endswith('.ipynb') else ScriptHandler
self._main_handler = handler(filename=self._main, argv=argv, package=self._package)
hooks = None
app_hooks = join(src_path, 'app_hooks.py')
lifecycle = join(src_path, 'server_lifecycle.py')
if exists(app_hooks) and exists(lifecycle):
raise ValueError("Directory style apps can provide either server_lifecycle.py or app_hooks.py, not both.")
elif exists(lifecycle):
hooks = lifecycle
elif exists(app_hooks):
hooks = app_hooks
if hooks is not None:
self._lifecycle_handler = ServerLifecycleHandler(filename=hooks, argv=argv, package=self._package)
else:
self._lifecycle_handler = Handler() # no-op handler
if exists(app_hooks):
assert hooks is not None
self._request_handler = ServerRequestHandler(filename=hooks, argv=argv, package=self._package)
else:
self._request_handler = Handler() # no-op handler
self._theme = None
themeyaml = join(src_path, 'theme.yaml')
if exists(themeyaml):
from bokeh.themes import Theme
self._theme = Theme(filename=themeyaml)
appstatic = join(src_path, 'static')
if exists(appstatic):
self._static = appstatic
self._template = None
appindex = join(src_path, 'templates', 'index.html')
if exists(appindex):
env = Environment(loader=FileSystemLoader(dirname(appindex)))
self._template = env.get_template('index.html')
# Properties --------------------------------------------------------------
@property
def error(self) -> str | None:
''' If the handler fails, may contain a related error message.
'''
return self._main_handler.error or self._lifecycle_handler.error
@property
def error_detail(self) -> str | None:
''' If the handler fails, may contain a traceback or other details.
'''
return self._main_handler.error_detail or self._lifecycle_handler.error_detail
@property
def failed(self) -> bool:
''' ``True`` if the handler failed to modify the doc
'''
return self._main_handler.failed or self._lifecycle_handler.failed
@property
def safe_to_fork(self) -> bool:
''' Whether it is still safe for the Bokeh server to fork new workers.
``False`` if the configured code (script, notebook, etc.) has already
been run.
'''
return self._main_handler.safe_to_fork
# Public methods ----------------------------------------------------------
def modify_document(self, doc: Document) -> None:
''' Execute the configured ``main.py`` or ``main.ipynb`` to modify the
document.
This method will also search the app directory for any theme or
template files, and automatically configure the document with them
if they are found.
'''
if self._lifecycle_handler.failed:
return
# Note: we do NOT copy self._theme, which assumes the Theme
# class is immutable (has no setters)
if self._theme is not None:
doc.theme = self._theme
if self._template is not None:
doc.template = self._template
# This internal handler should never add a template
self._main_handler.modify_document(doc)
def on_server_loaded(self, server_context: ServerContext) -> None:
''' Execute `on_server_unloaded`` from ``server_lifecycle.py`` (if
it is defined) when the server is first started.
Args:
server_context (ServerContext) :
'''
if self._package_runner and self._package:
self._package_runner.run(self._package)
return self._lifecycle_handler.on_server_loaded(server_context)
def on_server_unloaded(self, server_context: ServerContext) -> None:
''' Execute ``on_server_unloaded`` from ``server_lifecycle.py`` (if
it is defined) when the server cleanly exits. (Before stopping the
server's ``IOLoop``.)
Args:
server_context (ServerContext) :
.. warning::
In practice this code may not run, since servers are often killed
by a signal.
'''
return self._lifecycle_handler.on_server_unloaded(server_context)
def on_session_created(self, session_context: SessionContext) -> Coroutine[Any, Any, None]:
''' Execute ``on_session_created`` from ``server_lifecycle.py`` (if
it is defined) when a new session is created.
Args:
session_context (SessionContext) :
'''
return self._lifecycle_handler.on_session_created(session_context)
def on_session_destroyed(self, session_context: SessionContext) -> Coroutine[Any, Any, None]:
''' Execute ``on_session_destroyed`` from ``server_lifecycle.py`` (if
it is defined) when a session is destroyed.
Args:
session_context (SessionContext) :
'''
return self._lifecycle_handler.on_session_destroyed(session_context)
def process_request(self, request: HTTPServerRequest) -> Dict[str, Any]:
''' Processes incoming HTTP request returning a dictionary of
additional data to add to the session_context.
Args:
request: HTTP request
Returns:
A dictionary of JSON serializable data to be included on
the session context.
'''
return self._request_handler.process_request(request)
def url_path(self) -> str | None:
''' The last path component for the basename of the path to the
configured directory.
'''
if self.failed:
return None
else:
# TODO should fix invalid URL characters
return '/' + basename(self._path)
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| |
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Contact(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'cloud_provider': 'str',
'cloud_provider_container_id': 'str',
'contact_id': 'str',
'contact_phone_numbers': 'list[ContactPhoneNumber]',
'contact_uri': 'str',
'emails': 'list[str]',
'error_details': 'ErrorDetails',
'is_owner': 'bool',
'name': 'str',
'organization': 'str',
'shared': 'str',
'signing_group': 'str',
'signing_group_name': 'str'
}
attribute_map = {
'cloud_provider': 'cloudProvider',
'cloud_provider_container_id': 'cloudProviderContainerId',
'contact_id': 'contactId',
'contact_phone_numbers': 'contactPhoneNumbers',
'contact_uri': 'contactUri',
'emails': 'emails',
'error_details': 'errorDetails',
'is_owner': 'isOwner',
'name': 'name',
'organization': 'organization',
'shared': 'shared',
'signing_group': 'signingGroup',
'signing_group_name': 'signingGroupName'
}
def __init__(self, cloud_provider=None, cloud_provider_container_id=None, contact_id=None, contact_phone_numbers=None, contact_uri=None, emails=None, error_details=None, is_owner=None, name=None, organization=None, shared=None, signing_group=None, signing_group_name=None): # noqa: E501
"""Contact - a model defined in Swagger""" # noqa: E501
self._cloud_provider = None
self._cloud_provider_container_id = None
self._contact_id = None
self._contact_phone_numbers = None
self._contact_uri = None
self._emails = None
self._error_details = None
self._is_owner = None
self._name = None
self._organization = None
self._shared = None
self._signing_group = None
self._signing_group_name = None
self.discriminator = None
if cloud_provider is not None:
self.cloud_provider = cloud_provider
if cloud_provider_container_id is not None:
self.cloud_provider_container_id = cloud_provider_container_id
if contact_id is not None:
self.contact_id = contact_id
if contact_phone_numbers is not None:
self.contact_phone_numbers = contact_phone_numbers
if contact_uri is not None:
self.contact_uri = contact_uri
if emails is not None:
self.emails = emails
if error_details is not None:
self.error_details = error_details
if is_owner is not None:
self.is_owner = is_owner
if name is not None:
self.name = name
if organization is not None:
self.organization = organization
if shared is not None:
self.shared = shared
if signing_group is not None:
self.signing_group = signing_group
if signing_group_name is not None:
self.signing_group_name = signing_group_name
@property
def cloud_provider(self):
"""Gets the cloud_provider of this Contact. # noqa: E501
# noqa: E501
:return: The cloud_provider of this Contact. # noqa: E501
:rtype: str
"""
return self._cloud_provider
@cloud_provider.setter
def cloud_provider(self, cloud_provider):
"""Sets the cloud_provider of this Contact.
# noqa: E501
:param cloud_provider: The cloud_provider of this Contact. # noqa: E501
:type: str
"""
self._cloud_provider = cloud_provider
@property
def cloud_provider_container_id(self):
"""Gets the cloud_provider_container_id of this Contact. # noqa: E501
# noqa: E501
:return: The cloud_provider_container_id of this Contact. # noqa: E501
:rtype: str
"""
return self._cloud_provider_container_id
@cloud_provider_container_id.setter
def cloud_provider_container_id(self, cloud_provider_container_id):
"""Sets the cloud_provider_container_id of this Contact.
# noqa: E501
:param cloud_provider_container_id: The cloud_provider_container_id of this Contact. # noqa: E501
:type: str
"""
self._cloud_provider_container_id = cloud_provider_container_id
@property
def contact_id(self):
"""Gets the contact_id of this Contact. # noqa: E501
# noqa: E501
:return: The contact_id of this Contact. # noqa: E501
:rtype: str
"""
return self._contact_id
@contact_id.setter
def contact_id(self, contact_id):
"""Sets the contact_id of this Contact.
# noqa: E501
:param contact_id: The contact_id of this Contact. # noqa: E501
:type: str
"""
self._contact_id = contact_id
@property
def contact_phone_numbers(self):
"""Gets the contact_phone_numbers of this Contact. # noqa: E501
# noqa: E501
:return: The contact_phone_numbers of this Contact. # noqa: E501
:rtype: list[ContactPhoneNumber]
"""
return self._contact_phone_numbers
@contact_phone_numbers.setter
def contact_phone_numbers(self, contact_phone_numbers):
"""Sets the contact_phone_numbers of this Contact.
# noqa: E501
:param contact_phone_numbers: The contact_phone_numbers of this Contact. # noqa: E501
:type: list[ContactPhoneNumber]
"""
self._contact_phone_numbers = contact_phone_numbers
@property
def contact_uri(self):
"""Gets the contact_uri of this Contact. # noqa: E501
# noqa: E501
:return: The contact_uri of this Contact. # noqa: E501
:rtype: str
"""
return self._contact_uri
@contact_uri.setter
def contact_uri(self, contact_uri):
"""Sets the contact_uri of this Contact.
# noqa: E501
:param contact_uri: The contact_uri of this Contact. # noqa: E501
:type: str
"""
self._contact_uri = contact_uri
@property
def emails(self):
"""Gets the emails of this Contact. # noqa: E501
# noqa: E501
:return: The emails of this Contact. # noqa: E501
:rtype: list[str]
"""
return self._emails
@emails.setter
def emails(self, emails):
"""Sets the emails of this Contact.
# noqa: E501
:param emails: The emails of this Contact. # noqa: E501
:type: list[str]
"""
self._emails = emails
@property
def error_details(self):
"""Gets the error_details of this Contact. # noqa: E501
:return: The error_details of this Contact. # noqa: E501
:rtype: ErrorDetails
"""
return self._error_details
@error_details.setter
def error_details(self, error_details):
"""Sets the error_details of this Contact.
:param error_details: The error_details of this Contact. # noqa: E501
:type: ErrorDetails
"""
self._error_details = error_details
@property
def is_owner(self):
"""Gets the is_owner of this Contact. # noqa: E501
# noqa: E501
:return: The is_owner of this Contact. # noqa: E501
:rtype: bool
"""
return self._is_owner
@is_owner.setter
def is_owner(self, is_owner):
"""Sets the is_owner of this Contact.
# noqa: E501
:param is_owner: The is_owner of this Contact. # noqa: E501
:type: bool
"""
self._is_owner = is_owner
@property
def name(self):
"""Gets the name of this Contact. # noqa: E501
# noqa: E501
:return: The name of this Contact. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Contact.
# noqa: E501
:param name: The name of this Contact. # noqa: E501
:type: str
"""
self._name = name
@property
def organization(self):
"""Gets the organization of this Contact. # noqa: E501
# noqa: E501
:return: The organization of this Contact. # noqa: E501
:rtype: str
"""
return self._organization
@organization.setter
def organization(self, organization):
"""Sets the organization of this Contact.
# noqa: E501
:param organization: The organization of this Contact. # noqa: E501
:type: str
"""
self._organization = organization
@property
def shared(self):
"""Gets the shared of this Contact. # noqa: E501
When set to **true**, this custom tab is shared. # noqa: E501
:return: The shared of this Contact. # noqa: E501
:rtype: str
"""
return self._shared
@shared.setter
def shared(self, shared):
"""Sets the shared of this Contact.
When set to **true**, this custom tab is shared. # noqa: E501
:param shared: The shared of this Contact. # noqa: E501
:type: str
"""
self._shared = shared
@property
def signing_group(self):
"""Gets the signing_group of this Contact. # noqa: E501
# noqa: E501
:return: The signing_group of this Contact. # noqa: E501
:rtype: str
"""
return self._signing_group
@signing_group.setter
def signing_group(self, signing_group):
"""Sets the signing_group of this Contact.
# noqa: E501
:param signing_group: The signing_group of this Contact. # noqa: E501
:type: str
"""
self._signing_group = signing_group
@property
def signing_group_name(self):
"""Gets the signing_group_name of this Contact. # noqa: E501
The display name for the signing group. Maximum Length: 100 characters. # noqa: E501
:return: The signing_group_name of this Contact. # noqa: E501
:rtype: str
"""
return self._signing_group_name
@signing_group_name.setter
def signing_group_name(self, signing_group_name):
"""Sets the signing_group_name of this Contact.
The display name for the signing group. Maximum Length: 100 characters. # noqa: E501
:param signing_group_name: The signing_group_name of this Contact. # noqa: E501
:type: str
"""
self._signing_group_name = signing_group_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Contact, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Contact):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| |
import unittest
from pyramid.compat import text_
from pyramid.security import AuthenticationAPIMixin, AuthorizationAPIMixin
from pyramid.tests.test_config import IDummy
class TestingConfiguratorMixinTests(unittest.TestCase):
def _makeOne(self, *arg, **kw):
from pyramid.config import Configurator
config = Configurator(*arg, **kw)
return config
def test_testing_securitypolicy(self):
from pyramid.testing import DummySecurityPolicy
config = self._makeOne(autocommit=True)
config.testing_securitypolicy('user', ('group1', 'group2'),
permissive=False)
from pyramid.interfaces import IAuthenticationPolicy
from pyramid.interfaces import IAuthorizationPolicy
ut = config.registry.getUtility(IAuthenticationPolicy)
self.assertTrue(isinstance(ut, DummySecurityPolicy))
ut = config.registry.getUtility(IAuthorizationPolicy)
self.assertEqual(ut.userid, 'user')
self.assertEqual(ut.groupids, ('group1', 'group2'))
self.assertEqual(ut.permissive, False)
def test_testing_securitypolicy_remember_result(self):
from pyramid.security import remember
config = self._makeOne(autocommit=True)
pol = config.testing_securitypolicy(
'user', ('group1', 'group2'),
permissive=False, remember_result=True)
request = DummyRequest()
request.registry = config.registry
val = remember(request, 'fred')
self.assertEqual(pol.remembered, 'fred')
self.assertEqual(val, True)
def test_testing_securitypolicy_forget_result(self):
from pyramid.security import forget
config = self._makeOne(autocommit=True)
pol = config.testing_securitypolicy(
'user', ('group1', 'group2'),
permissive=False, forget_result=True)
request = DummyRequest()
request.registry = config.registry
val = forget(request)
self.assertEqual(pol.forgotten, True)
self.assertEqual(val, True)
def test_testing_resources(self):
from pyramid.traversal import find_resource
from pyramid.interfaces import ITraverser
ob1 = object()
ob2 = object()
resources = {'/ob1':ob1, '/ob2':ob2}
config = self._makeOne(autocommit=True)
config.testing_resources(resources)
adapter = config.registry.getAdapter(None, ITraverser)
result = adapter(DummyRequest({'PATH_INFO':'/ob1'}))
self.assertEqual(result['context'], ob1)
self.assertEqual(result['view_name'], '')
self.assertEqual(result['subpath'], ())
self.assertEqual(result['traversed'], (text_('ob1'),))
self.assertEqual(result['virtual_root'], ob1)
self.assertEqual(result['virtual_root_path'], ())
result = adapter(DummyRequest({'PATH_INFO':'/ob2'}))
self.assertEqual(result['context'], ob2)
self.assertEqual(result['view_name'], '')
self.assertEqual(result['subpath'], ())
self.assertEqual(result['traversed'], (text_('ob2'),))
self.assertEqual(result['virtual_root'], ob2)
self.assertEqual(result['virtual_root_path'], ())
self.assertRaises(KeyError, adapter, DummyRequest({'PATH_INFO':'/ob3'}))
try:
config.begin()
self.assertEqual(find_resource(None, '/ob1'), ob1)
finally:
config.end()
def test_testing_add_subscriber_single(self):
config = self._makeOne(autocommit=True)
L = config.testing_add_subscriber(IDummy)
event = DummyEvent()
config.registry.notify(event)
self.assertEqual(len(L), 1)
self.assertEqual(L[0], event)
config.registry.notify(object())
self.assertEqual(len(L), 1)
def test_testing_add_subscriber_dottedname(self):
config = self._makeOne(autocommit=True)
L = config.testing_add_subscriber(
'pyramid.tests.test_config.test_init.IDummy')
event = DummyEvent()
config.registry.notify(event)
self.assertEqual(len(L), 1)
self.assertEqual(L[0], event)
config.registry.notify(object())
self.assertEqual(len(L), 1)
def test_testing_add_subscriber_multiple(self):
from zope.interface import Interface
config = self._makeOne(autocommit=True)
L = config.testing_add_subscriber((Interface, IDummy))
event = DummyEvent()
event.object = 'foo'
# the below is the equivalent of z.c.event.objectEventNotify(event)
config.registry.subscribers((event.object, event), None)
self.assertEqual(len(L), 2)
self.assertEqual(L[0], 'foo')
self.assertEqual(L[1], event)
def test_testing_add_subscriber_defaults(self):
config = self._makeOne(autocommit=True)
L = config.testing_add_subscriber()
event = object()
config.registry.notify(event)
self.assertEqual(L[-1], event)
event2 = object()
config.registry.notify(event2)
self.assertEqual(L[-1], event2)
def test_testing_add_renderer(self):
config = self._makeOne(autocommit=True)
renderer = config.testing_add_renderer('templates/foo.pt')
from pyramid.testing import DummyTemplateRenderer
self.assertTrue(isinstance(renderer, DummyTemplateRenderer))
from pyramid.renderers import render_to_response
# must provide request to pass in registry (this is a functest)
request = DummyRequest()
request.registry = config.registry
render_to_response(
'templates/foo.pt', {'foo':1, 'bar':2}, request=request)
renderer.assert_(foo=1)
renderer.assert_(bar=2)
renderer.assert_(request=request)
def test_testing_add_renderer_twice(self):
config = self._makeOne(autocommit=True)
renderer1 = config.testing_add_renderer('templates/foo.pt')
renderer2 = config.testing_add_renderer('templates/bar.pt')
from pyramid.testing import DummyTemplateRenderer
self.assertTrue(isinstance(renderer1, DummyTemplateRenderer))
self.assertTrue(isinstance(renderer2, DummyTemplateRenderer))
from pyramid.renderers import render_to_response
# must provide request to pass in registry (this is a functest)
request = DummyRequest()
request.registry = config.registry
render_to_response(
'templates/foo.pt', {'foo':1, 'bar':2}, request=request)
renderer1.assert_(foo=1)
renderer1.assert_(bar=2)
renderer1.assert_(request=request)
render_to_response(
'templates/bar.pt', {'foo':1, 'bar':2}, request=request)
renderer2.assert_(foo=1)
renderer2.assert_(bar=2)
renderer2.assert_(request=request)
def test_testing_add_renderer_explicitrenderer(self):
config = self._makeOne(autocommit=True)
class E(Exception): pass
def renderer(kw, system):
self.assertEqual(kw, {'foo':1, 'bar':2})
raise E
renderer = config.testing_add_renderer('templates/foo.pt', renderer)
from pyramid.renderers import render_to_response
# must provide request to pass in registry (this is a functest)
request = DummyRequest()
request.registry = config.registry
try:
render_to_response(
'templates/foo.pt', {'foo':1, 'bar':2}, request=request)
except E:
pass
else: # pragma: no cover
raise AssertionError
def test_testing_add_template(self):
config = self._makeOne(autocommit=True)
renderer = config.testing_add_template('templates/foo.pt')
from pyramid.testing import DummyTemplateRenderer
self.assertTrue(isinstance(renderer, DummyTemplateRenderer))
from pyramid.renderers import render_to_response
# must provide request to pass in registry (this is a functest)
request = DummyRequest()
request.registry = config.registry
render_to_response('templates/foo.pt', dict(foo=1, bar=2),
request=request)
renderer.assert_(foo=1)
renderer.assert_(bar=2)
renderer.assert_(request=request)
from zope.interface import implementer
@implementer(IDummy)
class DummyEvent:
pass
class DummyRequest(AuthenticationAPIMixin, AuthorizationAPIMixin):
def __init__(self, environ=None):
if environ is None:
environ = {}
self.environ = environ
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import json
import logging
from dataclasses import dataclass
from typing import Any, cast, Dict, Optional, TYPE_CHECKING
from flask import g
from sqlalchemy.orm.exc import DetachedInstanceError
from superset import is_feature_enabled
from superset.models.sql_lab import Query
from superset.sql_parse import CtasMethod
from superset.utils import core as utils
from superset.utils.core import apply_max_row_limit
from superset.utils.dates import now_as_float
from superset.views.utils import get_cta_schema_name
if TYPE_CHECKING:
from superset.connectors.sqla.models import Database
logger = logging.getLogger(__name__)
SqlResults = Dict[str, Any]
@dataclass
class SqlJsonExecutionContext: # pylint: disable=too-many-instance-attributes
database_id: int
schema: str
sql: str
template_params: Dict[str, Any]
async_flag: bool
limit: int
status: str
client_id: str
client_id_or_short_id: str
sql_editor_id: str
tab_name: str
user_id: Optional[int]
expand_data: bool
create_table_as_select: Optional[CreateTableAsSelect]
database: Optional[Database]
query: Query
_sql_result: Optional[SqlResults]
def __init__(self, query_params: Dict[str, Any]):
self.create_table_as_select = None
self.database = None
self._init_from_query_params(query_params)
self.user_id = self._get_user_id()
self.client_id_or_short_id = cast(str, self.client_id or utils.shortid()[:10])
def set_query(self, query: Query) -> None:
self.query = query
def _init_from_query_params(self, query_params: Dict[str, Any]) -> None:
self.database_id = cast(int, query_params.get("database_id"))
self.schema = cast(str, query_params.get("schema"))
self.sql = cast(str, query_params.get("sql"))
self.template_params = self._get_template_params(query_params)
self.async_flag = cast(bool, query_params.get("runAsync"))
self.limit = self._get_limit_param(query_params)
self.status = cast(str, query_params.get("status"))
if cast(bool, query_params.get("select_as_cta")):
self.create_table_as_select = CreateTableAsSelect.create_from(query_params)
self.client_id = cast(str, query_params.get("client_id"))
self.sql_editor_id = cast(str, query_params.get("sql_editor_id"))
self.tab_name = cast(str, query_params.get("tab"))
self.expand_data: bool = cast(
bool,
is_feature_enabled("PRESTO_EXPAND_DATA")
and query_params.get("expand_data"),
)
@staticmethod
def _get_template_params(query_params: Dict[str, Any]) -> Dict[str, Any]:
try:
template_params = json.loads(query_params.get("templateParams") or "{}")
except json.JSONDecodeError:
logger.warning(
"Invalid template parameter %s" " specified. Defaulting to empty dict",
str(query_params.get("templateParams")),
)
template_params = {}
return template_params
@staticmethod
def _get_limit_param(query_params: Dict[str, Any]) -> int:
limit = apply_max_row_limit(query_params.get("queryLimit") or 0)
if limit < 0:
logger.warning(
"Invalid limit of %i specified. Defaulting to max limit.", limit
)
limit = 0
return limit
def _get_user_id(self) -> Optional[int]: # pylint: disable=no-self-use
try:
return g.user.get_id() if g.user else None
except RuntimeError:
return None
def is_run_asynchronous(self) -> bool:
return self.async_flag
@property
def select_as_cta(self) -> bool:
return self.create_table_as_select is not None
def set_database(self, database: Database) -> None:
self._validate_db(database)
self.database = database
if self.select_as_cta:
schema_name = self._get_ctas_target_schema_name(database)
self.create_table_as_select.target_schema_name = schema_name # type: ignore
def _get_ctas_target_schema_name(self, database: Database) -> Optional[str]:
if database.force_ctas_schema:
return database.force_ctas_schema
return get_cta_schema_name(database, g.user, self.schema, self.sql)
def _validate_db(self, database: Database) -> None:
# TODO validate db.id is equal to self.database_id
pass
def get_execution_result(self) -> Optional[SqlResults]:
return self._sql_result
def set_execution_result(self, sql_result: Optional[SqlResults]) -> None:
self._sql_result = sql_result
def create_query(self) -> Query:
# pylint: disable=line-too-long
start_time = now_as_float()
if self.select_as_cta:
return Query(
database_id=self.database_id,
sql=self.sql,
schema=self.schema,
select_as_cta=True,
ctas_method=self.create_table_as_select.ctas_method, # type: ignore
start_time=start_time,
tab_name=self.tab_name,
status=self.status,
sql_editor_id=self.sql_editor_id,
tmp_table_name=self.create_table_as_select.target_table_name, # type: ignore
tmp_schema_name=self.create_table_as_select.target_schema_name, # type: ignore
user_id=self.user_id,
client_id=self.client_id_or_short_id,
)
return Query(
database_id=self.database_id,
sql=self.sql,
schema=self.schema,
select_as_cta=False,
start_time=start_time,
tab_name=self.tab_name,
status=self.status,
sql_editor_id=self.sql_editor_id,
user_id=self.user_id,
client_id=self.client_id_or_short_id,
)
def get_query_details(self) -> str:
try:
if hasattr(self, "query"):
if self.query.id:
return "query '{}' - '{}'".format(self.query.id, self.query.sql)
except DetachedInstanceError:
pass
return "query '{}'".format(self.sql)
class CreateTableAsSelect: # pylint: disable=too-few-public-methods
ctas_method: CtasMethod
target_schema_name: Optional[str]
target_table_name: str
def __init__(
self, ctas_method: CtasMethod, target_schema_name: str, target_table_name: str
):
self.ctas_method = ctas_method
self.target_schema_name = target_schema_name
self.target_table_name = target_table_name
@staticmethod
def create_from(query_params: Dict[str, Any]) -> CreateTableAsSelect:
ctas_method = query_params.get("ctas_method", CtasMethod.TABLE)
schema = cast(str, query_params.get("schema"))
tmp_table_name = cast(str, query_params.get("tmp_table_name"))
return CreateTableAsSelect(ctas_method, schema, tmp_table_name)
| |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid
# NOTE(morganfainberg): import endpoint filter to populate the SQL model
from keystone.contrib import endpoint_filter # flake8: noqa
from keystone.tests import test_v3
class TestExtensionCase(test_v3.RestfulTestCase):
EXTENSION_NAME = 'endpoint_filter'
EXTENSION_TO_ADD = 'endpoint_filter_extension'
def config_overrides(self):
super(TestExtensionCase, self).config_overrides()
self.config_fixture.config(
group='catalog',
driver='keystone.contrib.endpoint_filter.backends.catalog_sql.'
'EndpointFilterCatalog')
def setUp(self):
super(TestExtensionCase, self).setUp()
self.default_request_url = (
'/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': self.endpoint_id})
class EndpointFilterCRUDTestCase(TestExtensionCase):
def test_create_endpoint_project_association(self):
"""PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Valid endpoint and project id test case.
"""
self.put(self.default_request_url,
body='',
expected_status=204)
def test_create_endpoint_project_association_with_invalid_project(self):
"""PUT OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid project id test case.
"""
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': uuid.uuid4().hex,
'endpoint_id': self.endpoint_id},
body='',
expected_status=404)
def test_create_endpoint_project_association_with_invalid_endpoint(self):
"""PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid endpoint id test case.
"""
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': uuid.uuid4().hex},
body='',
expected_status=404)
def test_create_endpoint_project_association_with_unexpected_body(self):
"""PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Unexpected body in request. The body should be ignored.
"""
self.put(self.default_request_url,
body={'project_id': self.default_domain_project_id},
expected_status=204)
def test_check_endpoint_project_association(self):
"""HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Valid project and endpoint id test case.
"""
self.put(self.default_request_url,
body='',
expected_status=204)
self.head('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': self.endpoint_id},
expected_status=204)
def test_check_endpoint_project_association_with_invalid_project(self):
"""HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid project id test case.
"""
self.put(self.default_request_url)
self.head('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': uuid.uuid4().hex,
'endpoint_id': self.endpoint_id},
body='',
expected_status=404)
def test_check_endpoint_project_association_with_invalid_endpoint(self):
"""HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid endpoint id test case.
"""
self.put(self.default_request_url)
self.head('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': uuid.uuid4().hex},
body='',
expected_status=404)
def test_list_endpoints_associated_with_valid_project(self):
"""GET /OS-EP-FILTER/projects/{project_id}/endpoints
Valid project and endpoint id test case.
"""
self.put(self.default_request_url)
resource_url = '/OS-EP-FILTER/projects/%(project_id)s/endpoints' % {
'project_id': self.default_domain_project_id}
r = self.get(resource_url)
self.assertValidEndpointListResponse(r, self.endpoint,
resource_url=resource_url)
def test_list_endpoints_associated_with_invalid_project(self):
"""GET /OS-EP-FILTER/projects/{project_id}/endpoints
Invalid project id test case.
"""
self.put(self.default_request_url)
self.get('/OS-EP-FILTER/projects/%(project_id)s/endpoints' % {
'project_id': uuid.uuid4().hex},
body='',
expected_status=404)
def test_list_projects_associated_with_endpoint(self):
"""GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects
Valid endpoint-project association test case.
"""
self.put(self.default_request_url)
resource_url = '/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' % {
'endpoint_id': self.endpoint_id}
r = self.get(resource_url)
self.assertValidProjectListResponse(r, self.default_domain_project,
resource_url=resource_url)
def test_list_projects_with_no_endpoint_project_association(self):
"""GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects
Valid endpoint id but no endpoint-project associations test case.
"""
r = self.get('/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' %
{'endpoint_id': self.endpoint_id},
expected_status=200)
self.assertValidProjectListResponse(r, expected_length=0)
def test_list_projects_associated_with_invalid_endpoint(self):
"""GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects
Invalid endpoint id test case.
"""
self.get('/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' %
{'endpoint_id': uuid.uuid4().hex},
expected_status=404)
def test_remove_endpoint_project_association(self):
"""DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Valid project id and endpoint id test case.
"""
self.put(self.default_request_url)
self.delete('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': self.endpoint_id},
expected_status=204)
def test_remove_endpoint_project_association_with_invalid_project(self):
"""DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid project id test case.
"""
self.put(self.default_request_url)
self.delete('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': uuid.uuid4().hex,
'endpoint_id': self.endpoint_id},
body='',
expected_status=404)
def test_remove_endpoint_project_association_with_invalid_endpoint(self):
"""DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid endpoint id test case.
"""
self.put(self.default_request_url)
self.delete('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': uuid.uuid4().hex},
body='',
expected_status=404)
def test_endpoint_project_association_cleanup_when_project_deleted(self):
self.put(self.default_request_url)
association_url = '/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' % {
'endpoint_id': self.endpoint_id}
r = self.get(association_url, expected_status=200)
self.assertValidProjectListResponse(r, expected_length=1)
self.delete('/projects/%(project_id)s' % {
'project_id': self.default_domain_project_id})
r = self.get(association_url, expected_status=200)
self.assertValidProjectListResponse(r, expected_length=0)
def test_endpoint_project_association_cleanup_when_endpoint_deleted(self):
self.put(self.default_request_url)
association_url = '/OS-EP-FILTER/projects/%(project_id)s/endpoints' % {
'project_id': self.default_domain_project_id}
r = self.get(association_url, expected_status=200)
self.assertValidEndpointListResponse(r, expected_length=1)
self.delete('/endpoints/%(endpoint_id)s' % {
'endpoint_id': self.endpoint_id})
r = self.get(association_url, expected_status=200)
self.assertValidEndpointListResponse(r, expected_length=0)
class EndpointFilterTokenRequestTestCase(TestExtensionCase):
def test_project_scoped_token_using_endpoint_filter(self):
"""Verify endpoints from project scoped token filtered."""
# create a project to work with
ref = self.new_project_ref(domain_id=self.domain_id)
r = self.post('/projects', body={'project': ref})
project = self.assertValidProjectResponse(r, ref)
# grant the user a role on the project
self.put(
'/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % {
'user_id': self.user['id'],
'project_id': project['id'],
'role_id': self.role['id']})
# set the user's preferred project
body = {'user': {'default_project_id': project['id']}}
r = self.patch('/users/%(user_id)s' % {
'user_id': self.user['id']},
body=body)
self.assertValidUserResponse(r)
# add one endpoint to the project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': project['id'],
'endpoint_id': self.endpoint_id},
body='',
expected_status=204)
# attempt to authenticate without requesting a project
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'])
r = self.post('/auth/tokens', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=True,
endpoint_filter=True,
ep_filter_assoc=1)
self.assertEqual(r.result['token']['project']['id'], project['id'])
def test_default_scoped_token_using_endpoint_filter(self):
"""Verify endpoints from default scoped token filtered."""
# add one endpoint to default project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': self.endpoint_id},
body='',
expected_status=204)
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
r = self.post('/auth/tokens', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=True,
endpoint_filter=True,
ep_filter_assoc=1)
self.assertEqual(r.result['token']['project']['id'],
self.project['id'])
def test_project_scoped_token_with_no_catalog_using_endpoint_filter(self):
"""Verify endpoint filter when project scoped token returns no catalog.
Test that the project scoped token response is valid for a given
endpoint-project association when no service catalog is returned.
"""
# create a project to work with
ref = self.new_project_ref(domain_id=self.domain_id)
r = self.post('/projects', body={'project': ref})
project = self.assertValidProjectResponse(r, ref)
# grant the user a role on the project
self.put(
'/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % {
'user_id': self.user['id'],
'project_id': project['id'],
'role_id': self.role['id']})
# set the user's preferred project
body = {'user': {'default_project_id': project['id']}}
r = self.patch('/users/%(user_id)s' % {
'user_id': self.user['id']},
body=body)
self.assertValidUserResponse(r)
# add one endpoint to the project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': project['id'],
'endpoint_id': self.endpoint_id},
body='',
expected_status=204)
# attempt to authenticate without requesting a project
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'])
r = self.post('/auth/tokens?nocatalog', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=False,
endpoint_filter=True,
ep_filter_assoc=1)
self.assertEqual(r.result['token']['project']['id'], project['id'])
def test_default_scoped_token_with_no_catalog_using_endpoint_filter(self):
"""Verify endpoint filter when default scoped token returns no catalog.
Test that the default project scoped token response is valid for a
given endpoint-project association when no service catalog is returned.
"""
# add one endpoint to default project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': self.endpoint_id},
body='',
expected_status=204)
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
r = self.post('/auth/tokens?nocatalog', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=False,
endpoint_filter=True,
ep_filter_assoc=1)
self.assertEqual(r.result['token']['project']['id'],
self.project['id'])
def test_project_scoped_token_with_no_endpoint_project_association(self):
"""Verify endpoint filter when no endpoint-project association.
Test that the project scoped token response is valid when there are
no endpoint-project associations defined.
"""
# create a project to work with
ref = self.new_project_ref(domain_id=self.domain_id)
r = self.post('/projects', body={'project': ref})
project = self.assertValidProjectResponse(r, ref)
# grant the user a role on the project
self.put(
'/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % {
'user_id': self.user['id'],
'project_id': project['id'],
'role_id': self.role['id']})
# set the user's preferred project
body = {'user': {'default_project_id': project['id']}}
r = self.patch('/users/%(user_id)s' % {
'user_id': self.user['id']},
body=body)
self.assertValidUserResponse(r)
# attempt to authenticate without requesting a project
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'])
r = self.post('/auth/tokens?nocatalog', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=False,
endpoint_filter=True)
self.assertEqual(r.result['token']['project']['id'], project['id'])
def test_default_scoped_token_with_no_endpoint_project_association(self):
"""Verify endpoint filter when no endpoint-project association.
Test that the default project scoped token response is valid when
there are no endpoint-project associations defined.
"""
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
r = self.post('/auth/tokens?nocatalog', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=False,
endpoint_filter=True,)
self.assertEqual(r.result['token']['project']['id'],
self.project['id'])
def test_invalid_endpoint_project_association(self):
"""Verify an invalid endpoint-project association is handled."""
# add first endpoint to default project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': self.endpoint_id},
body='',
expected_status=204)
# create a second temporary endpoint
self.endpoint_id2 = uuid.uuid4().hex
self.endpoint2 = self.new_endpoint_ref(service_id=self.service_id)
self.endpoint2['id'] = self.endpoint_id2
self.catalog_api.create_endpoint(
self.endpoint_id2,
self.endpoint2.copy())
# add second endpoint to default project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': self.endpoint_id2},
body='',
expected_status=204)
# remove the temporary reference
# this will create inconsistency in the endpoint filter table
# which is fixed during the catalog creation for token request
self.catalog_api.delete_endpoint(self.endpoint_id2)
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
r = self.post('/auth/tokens', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=True,
endpoint_filter=True,
ep_filter_assoc=1)
self.assertEqual(r.result['token']['project']['id'],
self.project['id'])
def test_disabled_endpoint(self):
"""Test that a disabled endpoint is handled."""
# Add an enabled endpoint to the default project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': self.endpoint_id},
expected_status=204)
# Add a disabled endpoint to the default project.
# Create a disabled endpoint that's like the enabled one.
disabled_endpoint_ref = copy.copy(self.endpoint)
disabled_endpoint_id = uuid.uuid4().hex
disabled_endpoint_ref.update({
'id': disabled_endpoint_id,
'enabled': False,
'interface': 'internal'
})
self.catalog_api.create_endpoint(disabled_endpoint_id,
disabled_endpoint_ref)
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': disabled_endpoint_id},
expected_status=204)
# Authenticate to get token with catalog
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
r = self.post('/auth/tokens', body=auth_data)
endpoints = r.result['token']['catalog'][0]['endpoints']
endpoint_ids = [ep['id'] for ep in endpoints]
self.assertEqual([self.endpoint_id], endpoint_ids)
| |
# tictactoe.py
import random
def drawBoard(board):
"""
This function prints out the board that it was passed.
"""
# "board" is a list of 10 strings representing the board (ignore index 0)
print(' | |')
print(' ' + board[7] + ' | ' + board[8] + ' | ' + board[9])
print(' | |')
print('-----------')
print(' | |')
print(' ' + board[4] + ' | ' + board[5] + ' | ' + board[6])
print(' | |')
print('-----------')
print(' | |')
print(' ' + board[1] + ' | ' + board[2] + ' | ' + board[3])
print(' | |')
def inputPlayerLetter():
"""
Allows the player to choose what letter they want to be.
Returns a list with the player's letter as the first item, and the
computer's letter as the second.
"""
letter = ''
while not (letter == 'X' or letter =='O'):
print('Do you want to be X or O?')
letter = input().upper()
# The first element in the list is the player's letter, the second is the
# computer's letter.
if letter == 'X':
return ['X', 'O']
else:
return ['O', 'X']
def whoGoesFirst():
"""
Randomly choose the player who goes first.
"""
if random.randint(0, 1) == 0:
return 'computer'
else:
return 'player'
def playAgain():
"""
This function returns True if the player wants to play again, otherwise
it returns False.
"""
print('Do you want to play again? (yes or no)')
return input().lower().startswith('y')
def makeMove(board, letter, move):
"""
Set the board at the move location to be the specified letter.
"""
board[move] = letter
def isWinner(bo, le):
"""
Given a board and a player's letter, this function returns True if that
player has won.
bo = board and le = letter (abbreviations are used for brevity in the code)
"""
return ((bo[7] == le and bo[8] == le and bo[9] == le) or # across the top
(bo[4] == le and bo[5] == le and bo[6] == le) or # across the middle
(bo[1] == le and bo[2] == le and bo[3] == le) or # across the bottom
(bo[7] == le and bo[4] == le and bo[1] == le) or # down the left
(bo[8] == le and bo[5] == le and bo[2] == le) or # down the middle
(bo[9] == le and bo[6] == le and bo[3] == le) or # down the right
(bo[7] == le and bo[5] == le and bo[3] == le) or # diagonal
(bo[9] == le and bo[5] == le and bo[1] == le)) # diagonal
def getBoardCopy(board):
"""
Make a duplicate of the board list and return the duplicate.
"""
dupeBoard = []
for i in board:
dupeBoard.append(i)
return dupeBoard
def isSpaceFree(board, move):
"""
Return True if the passedmove is free on the passed board.
"""
return board[move] == ' '
def getPlayerMove(board):
"""
Ask the player for their move.
"""
move = ' '
while move not in '1 2 3 4 5 6 7 8 9'.split() or not isSpaceFree(board, int(move)):
print('What is your next move? (1-9)')
move = input()
return int(move)
def chooseRandomMoveFromList(board, movesList):
"""
Returns a valid move from the passed list on the passed board.
Returns None if there is no valid move.
"""
possibleMoves = []
for i in movesList:
if isSpaceFree(board, i):
possibleMoves.append(i)
if len(possibleMoves) != 0:
return random.choice(possibleMoves)
else:
return None
def getComputerMove(board, computerLetter):
"""
Given a board amd the computer's letter, determine where to move and
return that move.
"""
if computerLetter == 'X':
playerLetter = 'O'
else:
playerLetter = 'X'
# Algorithm for Tic-Tac-Toe AI:
# First, check if we can win in the next move
for i in range(1, 10):
copy = getBoardCopy(board)
if isSpaceFree(copy, i):
makeMove(copy, computerLetter, i)
if isWinner(copy, computerLetter):
return i
# Check if the player could win on their next move, and block them.
for i in range(1, 10):
copy = getBoardCopy(board)
if isSpaceFree(copy, i):
makeMove(copy, playerLetter, i)
if isWinner(copy, playerLetter):
return i
# Try to take one of the corners, if they are free.
move = chooseRandomMoveFromList(board, [1, 3, 7, 9])
if move != None:
return move
# Try to take the center, if it is free.
if isSpaceFree(board, 5):
return 5
# Move on one of the sides
return chooseRandomMoveFromList(board, [2, 4, 6, 8])
def isBoardFull(board):
"""
Return True if every space on the board has been taken.
Otherwise return False.
"""
for i in range(1, 10):
if isSpaceFree(board, i):
return False
return True
print("Welcome to Tic Tac Toe!")
while True:
#Reset the board
theBoard = [' '] * 10
playerLetter, computerLetter = inputPlayerLetter()
turn = whoGoesFirst()
print('The ' + turn + ' will go first.')
gameIsPlaying = True
while gameIsPlaying:
if turn == "player":
# Player's turn.
drawBoard(theBoard)
move = getPlayerMove(theBoard)
makeMove(theBoard, playerLetter, move)
if isWinner(theBoard, playerLetter):
drawBoard(theBoard)
print('Hooray!!! You have won the game!')
gameIsPlaying = False
else:
if isBoardFull(theBoard):
drawBoard(theBoard)
print('The game is a tie!')
break
else:
turn = 'computer'
else:
# Computer's turn.
move = getComputerMove(theBoard, computerLetter)
makeMove(theBoard, computerLetter, move)
if isWinner(theBoard, computerLetter):
drawBoard(theBoard)
print('The computer has dominated you!!! You lose! Muhahahaha!!!')
gameIsPlaying = False
else:
if isBoardFull(theBoard):
drawBoard(theBoard)
print('The game is a tie!')
break
else:
turn = 'player'
if not playAgain():
break
| |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.15 (https://github.com/warner/python-versioneer)
import errno
import os
import re
import subprocess
import sys
def get_keywords():
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
pass
def get_config():
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "ophyd/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
pass
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
# get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
# exceptions:
# 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
# TAG[.post.devDISTANCE] . No -dirty
# exceptions:
# 1: no tags. 0.post.devDISTANCE
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
# TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
# .dev0 sorts backwards (a dirty tree will appear "older" than the
# corresponding clean one), but you shouldn't be releasing software with
# -dirty anyways.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
# TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
# TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
# --always'
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
# TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
# --always -long'. The distance/hash is unconditional.
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
| |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class cachepolicylabel_policybinding_binding(base_resource) :
""" Binding class showing the policybinding that can be bound to cachepolicylabel.
"""
def __init__(self) :
self._policyname = ""
self._priority = 0
self._gotopriorityexpression = ""
self._invoke = False
self._labeltype = ""
self._invoke_labelname = ""
self._labelname = ""
self.___count = 0
@property
def priority(self) :
ur"""Specifies the priority of the policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
ur"""Specifies the priority of the policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
ur"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
ur"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def policyname(self) :
ur"""Name of the cache policy to bind to the policy label.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
ur"""Name of the cache policy to bind to the policy label.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def labeltype(self) :
ur"""Type of policy label to invoke: an unnamed label associated with a virtual server, or user-defined policy label.<br/>Possible values = reqvserver, resvserver, policylabel.
"""
try :
return self._labeltype
except Exception as e:
raise e
@labeltype.setter
def labeltype(self, labeltype) :
ur"""Type of policy label to invoke: an unnamed label associated with a virtual server, or user-defined policy label.<br/>Possible values = reqvserver, resvserver, policylabel
"""
try :
self._labeltype = labeltype
except Exception as e:
raise e
@property
def labelname(self) :
ur"""Name of the cache policy label to which to bind the policy.
"""
try :
return self._labelname
except Exception as e:
raise e
@labelname.setter
def labelname(self, labelname) :
ur"""Name of the cache policy label to which to bind the policy.
"""
try :
self._labelname = labelname
except Exception as e:
raise e
@property
def invoke_labelname(self) :
ur"""Name of the policy label to invoke if the current policy rule evaluates to TRUE.
"""
try :
return self._invoke_labelname
except Exception as e:
raise e
@invoke_labelname.setter
def invoke_labelname(self, invoke_labelname) :
ur"""Name of the policy label to invoke if the current policy rule evaluates to TRUE.
"""
try :
self._invoke_labelname = invoke_labelname
except Exception as e:
raise e
@property
def invoke(self) :
ur"""Invoke policies bound to a virtual server or a user-defined policy label. After the invoked policies are evaluated, the flow returns to the policy with the next-lower priority.
"""
try :
return self._invoke
except Exception as e:
raise e
@invoke.setter
def invoke(self, invoke) :
ur"""Invoke policies bound to a virtual server or a user-defined policy label. After the invoked policies are evaluated, the flow returns to the policy with the next-lower priority.
"""
try :
self._invoke = invoke
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(cachepolicylabel_policybinding_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.cachepolicylabel_policybinding_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.labelname is not None :
return str(self.labelname)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, labelname) :
ur""" Use this API to fetch cachepolicylabel_policybinding_binding resources.
"""
try :
obj = cachepolicylabel_policybinding_binding()
obj.labelname = labelname
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, labelname, filter_) :
ur""" Use this API to fetch filtered set of cachepolicylabel_policybinding_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = cachepolicylabel_policybinding_binding()
obj.labelname = labelname
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, labelname) :
ur""" Use this API to count cachepolicylabel_policybinding_binding resources configued on NetScaler.
"""
try :
obj = cachepolicylabel_policybinding_binding()
obj.labelname = labelname
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, labelname, filter_) :
ur""" Use this API to count the filtered set of cachepolicylabel_policybinding_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = cachepolicylabel_policybinding_binding()
obj.labelname = labelname
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Labeltype:
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class cachepolicylabel_policybinding_binding_response(base_response) :
def __init__(self, length=1) :
self.cachepolicylabel_policybinding_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.cachepolicylabel_policybinding_binding = [cachepolicylabel_policybinding_binding() for _ in range(length)]
| |
'''
Author: Hans Erik Heggem
Email: hans.erik.heggem@gmail.com
Project: Master's Thesis - Autonomous Inspection Of Wind Blades
Repository: Master's Thesis - CV (Computer Vision)
'''
import numpy as np
import timeit, time, warnings
from getpass import getpass
'''
Import bin libraries
'''
from src.bin.tools import RunThread, GetTimestamp, GetTimestampedFolder, CheckDir, RemoveDir
from Settings.Settings import Settings
from Settings.Exceptions import DroneVisionError, PtGreyError
from src.DroneVision.DroneVision_src.hardware.PyQtImage import PyQtImage
from src.DroneVision.DroneVision_src.hardware.imageTools import RealTimePlot, WriteImage, GetImage, CheckDisplayAvailable
from src.DroneVision.DroneVision_src.imgProcessing.frameTools.frameTools import GetShape
'''
Import src libraries
'''
from DataBase.DataBase import DataBase
from MasterSlave.Master import Master
from DroneVision.DroneVision import DroneVision
'''
User input
'''
from src.bin.UserInput.UserInput import UserInput
'''
@brief Master class. Handles the master module.
@param settings_inst (Preconfigured settings instance (mainly for testing), used if settings_inst != None, (default=None))
@param calibrate_stereopsis_session (Set calibrate_stereopsis_session to True for starting a new stereopsis calibration session (default=False))
@param calibrate_blob_scale_detector_session (Set calibrate_blob_scale_detector_session to True for starting a new blob scale detector calibration session (default=False))
'''
class DroneMaster(Master, DroneVision, Settings, DataBase, UserInput):
def __init__(self, settings_inst=None, calibrate_stereopsis_session=False, calibrate_blob_scale_detector_session=False):
'''CONSTRUCTOR'''
Settings.__init__(self, load_initial_settings=False)
if not(isinstance(settings_inst, type(None))):
self.ResetSettings(settings_inst.GetRawSettings())
else:
self.GetInitialSettings()
self.__realTimePlot = None
if self.GetSettings('REAL_TIME_PLOT', 'real_time_plot_on'):
if self.GetSettings('REAL_TIME_PLOT', 'use_matplotlib') or not(CheckDisplayAvailable()):
self.__realTimePlot = RealTimePlot(main_title='Master', interactive_mode=self.GetSettings('REAL_TIME_PLOT', 'use_interactive_mode'))
else:
self.__realTimePlot = PyQtImage(title='Master')
Master.__init__(self, self.GetSettings('TCP'))
DroneVision.__init__(self, True, self.GetSettings(), self.__realTimePlot)
DataBase.__init__(self, self, self.GetSettings('DATABASE'))
UserInput.__init__(self, self.GetSettings('USER_INPUT'))
self.__frame_un_r_shape = None
self.__keypoints_r = None
self.__descriptors_r = None
self.__req_success = False
self.__req_error = False
self.__force_stereo_vision_calibration = False
self.__force_blob_calibration = False
self.__calibrate_stereopsis_session = calibrate_stereopsis_session
self.__calibrate_blob_scale_detector_session = calibrate_blob_scale_detector_session
def InitializeMaster(self):
'''
@brief Main initializing function for the master.
Locks the master in a infinite loop.
'''
self.SetTimestamp(GetTimestamp())
self.Connect() #Connect to slave
self.SetTimestampedFolder(self.__timestamp)
self.RequestSetTimestamp(self.__timestamp)
self.InitDroneVision()
self.SetDataBaseOutputFolder(self.GetCameraOutputFolder())
if isinstance(self.__realTimePlot, RealTimePlot):
self.__realTimePlot.SetSaveFigFolder(self.GetDatabaseOutputFolder() + self.GetSettings('REAL_TIME_PLOT', 'save_fig_folder'))
self.RequestCVCalibration(self.__calibrate_stereopsis_session, self.__calibrate_blob_scale_detector_session)
self.CheckRunMasterCalibration(self.__calibrate_stereopsis_session, self.__calibrate_blob_scale_detector_session)
self.CalibratePointDetection(force_calibration=self.__force_stereo_vision_calibration, force_blob_calibration=self.__force_blob_calibration)
self.WaitSlaveReady()
self.SetDatabaseTableName(self.__timestamp, master=True, wait_for_user=not(self.GetSettings('USER_INPUT', 'automatic_mode')))
self.SendFlagToSlave(True) # Send flag to slave commanding it to continue
def RunMaster(self):
'''
@brief Run master indefinitely.
Implement computer vision, manouvering and gimbal control here.
'''
store_to_db = self.GetSettings('DATABASE', 'store_process_data')
store_frames = self.GetSettings('DATABASE', 'store_frames_as_video') or self.GetSettings('DATABASE', 'store_frames_as_images')
store_drawn_frames = self.GetSettings('DATABASE', 'store_drawn_frames')
draw_heading = self.GetSettings('DATABASE', 'draw_heading')
draw_matches = self.GetSettings('DATABASE', 'draw_matches')
draw_hough_lines = self.GetSettings('DATABASE', 'draw_hough_lines')
draw_detected_points = self.GetSettings('DATABASE', 'draw_detected_points')
print_3D_points = self.GetSettings('DATABASE', 'print_3D_points')
if self.GetSettings('REAL_TIME_PLOT', 'real_time_plot_on'):
self.__realTimePlot(reset=True)
self.StartAutoHandleUserInput()
self.ResetTermination()
while not(self.CheckFinished()):
##########################################
#----------- COMPUTER VISION ------------#
points_error, boundary_error, heading_error, stereo_error, heading_distance, heading_angle, points3D, frame_un_l, delta_frame_l, hough_frame, matches_frame = self.ProcessCV(draw_heading=draw_heading, draw_hough_lines=draw_hough_lines, draw_detected_points=draw_detected_points, draw_matches=draw_matches)
if isinstance(points_error, PtGreyError): # Dominant error - continue with next frame
continue
if stereo_error == None and points_error == None:
points3D_m = self.Points3DToMatrix(points3D)
average_point3D = np.mean(points3D_m, axis=1)
std_points3D = np.std(points3D_m, axis=1)
#----------------------------------------#
#----------- GIMBAL CONTROL -------------#
# TODO (implement)
#----------------------------------------#
#----------- DRONE MANOUVERING ----------#
# TODO (implement)
#----------------------------------------#
#----------- STORE IN DATABASE ----------#
if points_error == None:
if stereo_error == None:
self.SetProcessData('X_average', average_point3D[0,0])
self.SetProcessData('Y_average', average_point3D[1,0])
self.SetProcessData('Z_average', average_point3D[2,0])
self.SetProcessData('Z_std', std_points3D[2,0])
if heading_error == None:
self.SetProcessData('rho', heading_distance)
self.SetProcessData('theta', heading_angle)
if store_frames and store_drawn_frames:
if draw_heading:
self.SetProcessFrame('heading', frame_un_l)
if draw_matches and not(isinstance(stereo_error, DroneVisionError)):
self.SetProcessFrame('matches', matches_frame)
if draw_hough_lines and not(isinstance(boundary_error, DroneVisionError)):
self.SetProcessFrame('hough_lines', hough_frame)
if draw_detected_points:
self.SetProcessFrame('points', delta_frame_l)
if not(print_3D_points) or stereo_error != None or points_error != None:
points3D = []
self.RecordData(record_frames=store_frames, insert_to_database=store_to_db, print_progress=True, points3D=points3D)
#----------------------------------------#
#-------- SHOW RESULTS REALTIME ---------#
if self.GetSettings('REAL_TIME_PLOT', 'real_time_plot_on'):
if not(isinstance(points_error, DroneVisionError)):
plot_frames = []
if draw_heading:
plot_frames.append(('heading', frame_un_l))
if draw_matches and not(isinstance(stereo_error, DroneVisionError)):
plot_frames.append(('matches', matches_frame))
if draw_hough_lines and not(isinstance(boundary_error, DroneVisionError)):
plot_frames.append(('hough_lines', hough_frame))
if draw_detected_points:
plot_frames.append(('points', delta_frame_l))
self.__realTimePlot(plot_frames)
#----------------------------------------#
#---- DELETE FRAMES TO FREE MEMORY ----#
del frame_un_l
del delta_frame_l
del matches_frame
#----------------------------------------#
##########################################
self.ForceTermination()
self.PrintFinished()
self.RequestStop()
self.SendFinishRequest()
self.CloseMaster()
def SendFinishRequest(self):
'''
@brief Send finish request from master to slave
'''
if self.GetSettings('REAL_TIME_PLOT', 'real_time_plot_on'):
if (self.GetSettings('REAL_TIME_PLOT', 'use_matplotlib') and self.GetSettings('REAL_TIME_PLOT', 'use_interactive_mode')) or not(self.GetSettings('REAL_TIME_PLOT', 'use_matplotlib')):
getpass('Hit enter to terminate..')
self.RequestDisconnect()
def CheckRunMasterCalibration(self, calibrate_stereopsis=False, calibrate_blob_scale_detector=False):
'''
@brief Check if a new calibration session should be started.
@param calibrate_stereopsis (Set calibrate_stereopsis to True for starting a new stereopsis calibration session (default=False))
@param calibrate_blob_scale_detector (Set calibrate_blob_scale_detector to True for starting a new blob scale detector calibration session (default=False))
'''
n_stereo_saved_calib_frame_sets = 0
n_blob_saved_calib_frame_sets = 0
if calibrate_stereopsis:
calib_folder = self.GetSettings('CALIB', 'calib_img_folder_left_cam')
n_stereo_saved_calib_frame_sets += self.RunMasterCalibration(calib_folder, True)
if calibrate_blob_scale_detector:
calib_folder = self.GetSettings('BLOB_SCALE', 'scale_calib_folder')
n_blob_saved_calib_frame_sets += self.RunMasterCalibration(calib_folder, False)
if (calibrate_stereopsis or calibrate_blob_scale_detector) and not(self.GetSettings('BASIC', 'reset_calibration')):
self.__force_stereo_vision_calibration = self.GetYesNoFromUser('Force new stereopsis calibration with new calibration samples?')
self.__force_blob_calibration = self.GetYesNoFromUser('Force new blob calibration with new calibration samples?')
self.SendFlagToSlave(self.__force_stereo_vision_calibration)
self.SendFlagToSlave(self.__force_blob_calibration)
if self.GetSettings('REAL_TIME_PLOT', 'real_time_plot_on'):
self.__realTimePlot(reset=True)
def RunMasterCalibration(self, calib_folder, stereo_or_blob_calib, calib_filename='calib_frame'):
'''
@brief Run master calibration.
Manually trig new images of a chessboard to calibrate the stereopsis system.
@param calib_folder (folder where the captured calibration frames are stored)
@param stereo_or_blob_calib (True for stereopsis calibration, False for blob scale calibration)
@param calib_filename (Basic filename of save frames)
@return n_saved_calib_frame_sets
'''
first_image_set = True
if self.GetSettings('REAL_TIME_PLOT', 'real_time_plot_on'):
self.__realTimePlot(reset=True)
self.PrintCalibrationSession(calib_folder, stereo_or_blob_calib, calib_filename)
if stereo_or_blob_calib:
get_normal_frame_only = True
else:
normal_folder = calib_folder + 'normal/'
sl_folder = calib_folder + 'sl/'
get_normal_frame_only = False # blob calibration demands sl frames
frame_n = 0
while not(self.CheckFinished()):
self.SendFlagToSlave(True) # Send flag to slave commanding it to continue
self.RequestFrameProcessingOnSlave() # Trigger slave to wait for new frame capturing.
try:
frame, sl_frame = self.GetRawFrames(get_normal_frame_only=get_normal_frame_only)
except PtGreyError, err:
warnings.simplefilter('always')
warnings.warn(str(err), Warning)
warnings.simplefilter('default')
self.RestartCamera()
self.RequestRestartPtGrey()
continue
self.WaitSlaveReady()
#-------- SHOW RESULTS REALTIME ---------#
if self.GetSettings('REAL_TIME_PLOT', 'real_time_plot_on'):
if get_normal_frame_only:
title = 'stereopsis_calibration_session'
plot_frames = [('frame', frame)]
else:
title = 'blob_calibration_session'
plot_frames = [('frame', frame), ('sl_frame', sl_frame)]
self.__realTimePlot(plot_frames, title)
#----------------------------------------#
answer = self.GetYesNoFromUser('Store images to calibration folder?')
self.SendFlagToSlave(answer)
if answer:
if first_image_set:
first_image_set = False
if get_normal_frame_only:
RemoveDir(calib_folder)
CheckDir(calib_folder)
else:
RemoveDir(normal_folder)
RemoveDir(sl_folder)
CheckDir(normal_folder)
CheckDir(sl_folder)
print 'Master is saving calibration frames [{0}]..'.format(frame_n+1)
if get_normal_frame_only:
WriteImage(frame, calib_folder + calib_filename + '_' + str(frame_n))
else:
WriteImage(frame, normal_folder + calib_filename + '_' + str(frame_n))
WriteImage(sl_frame, sl_folder + calib_filename + '_' + str(frame_n))
frame_n += 1
else:
print 'Master did not save calibration frames..'
self.SendFlagToSlave(False) # Send flag to slave commanding it to break
if get_normal_frame_only: # Transfer all stereopsis images back and forth to slave/master
if frame_n > 0:
RemoveDir(self.GetSettings('CALIB', 'calib_img_folder_right_cam'))
CheckDir(self.GetSettings('CALIB', 'calib_img_folder_right_cam'))
for n_traded_frame in range(frame_n):
traded_frame = GetImage(calib_folder + calib_filename + '_' + str(n_traded_frame) + '.tif')
print 'Trading frame {0}/{1} with slave..'.format(n_traded_frame+1, frame_n)
traded_frame, valid, error = self.RequestTradeFrame(self.GetSettings('CALIB', 'calib_img_folder_right_cam') + calib_filename + '_' + str(n_traded_frame) + '.tif', traded_frame)
if not(valid) or error:
raise Exception('Failed trading frames with slave')
WriteImage(traded_frame, self.GetSettings('CALIB', 'calib_img_folder_right_cam') + calib_filename + '_' + str(n_traded_frame))
self.SendFlagToSlave(True) # Send flag to slave commanding it to break
return frame_n
def ProcessCV(self, draw_heading=False, draw_hough_lines=False, draw_detected_points=False, draw_matches=False):
'''
@brief Process computer vision steps
@param draw_heading (default=False)
@param draw_hough_lines (default=False) - draw_hough_lines overwrites draw_detected_points
@param draw_detected_points (default=False)
@param draw_matches (default=False)
@return points_error, boundary_error, heading_error, stereo_error, cv_results (Returns: points_error, heading_error, stereo_error (None, if no error and points_error as dominant error), cv_results = tuple containing elements of desired results.)
'''
points_error, frame_un_l, delta_frame_l, keypoints_l, descriptors_l, und_shape_r, keypoints_r, descriptors_r = self.GetProcessedFrames(draw_detected_points=draw_detected_points)
if points_error != None:
return points_error, None, None, None, None, None, None, None, None, None, None # Return failed frames.
boundary_error, heading_error, heading_distance, heading_angle, frame_un_l, hough_frame = self.ProcessHeading(frame_un_l, delta_frame_l, keypoints_l, draw_heading=draw_heading, draw_hough_lines=draw_hough_lines)
stereo_error, points3D, matches_frame = self.ProcessStereopsis(GetShape(frame_un_l), und_shape_r, keypoints_l, descriptors_l, keypoints_r, descriptors_r, draw_matches=draw_matches)
return None, boundary_error, heading_error, stereo_error, heading_distance, heading_angle, points3D, frame_un_l, delta_frame_l, hough_frame, matches_frame
def GetProcessedFrames(self, draw_detected_points=False):
'''
@brief Get processed left and right frame simultaneously.
Returns error if an error occurs (error=None if not).
@param draw_detected_points (default=False)
@return error, frame_un_l, delta_frame_l, keypoints_l, descriptors_l, frame_un_r_shape, keypoints_r, descriptors_r
'''
self.RequestFrameProcessingOnSlave() # Trig slave to capture new frames triggered by the master.
try:
original_frame_l, original_sl_frame_l = self.GetRawFrames() # Get new frames from slave, which triggers new frames to be captured on slave
if self.GetSettings('DATABASE', 'store_frames_as_video') or self.GetSettings('DATABASE', 'store_frames_as_images'): # Store frames here to relieve memory. The frames are deleted as soon as possible.
self.SetProcessFrame('original_left', original_frame_l)
self.SetProcessFrame('original_sl_left', original_sl_frame_l)
self.RecordData(record_frames=True, insert_to_database=False)
except PtGreyError, err:
warnings.simplefilter('always')
warnings.warn(str(err), Warning)
warnings.simplefilter('default')
self.RestartCamera()
self.RequestRestartPtGrey()
return err, None, None, None, None, None, None, None
t = RunThread(self.RequestPointlistThread)
try:
original_frame_l, original_sl_frame_l, frame_un_l, delta_frame_l, keypoints_l, descriptors_l = self.GetProcessedFrame(original_frame=original_frame_l, original_sl_frame=original_sl_frame_l, draw_detected_points=draw_detected_points) #Master is positioned to the left (left frame)
del original_frame_l
del original_sl_frame_l
t.join() # Wait for slave point list request to finish
if not(self.__req_success):
if self.__req_error:
self.RequestRestartPtGrey()
raise DroneVisionError('could_not_get_point_list_from_slave')
except DroneVisionError, err:
self.__break_req = True # Break slave point list request if it's still running.
warnings.simplefilter('always')
warnings.warn(str(err), Warning)
warnings.simplefilter('default')
t.join() # Wait for slave point list request to terminate
return err, None, None, None, None, None, None, None
return None, frame_un_l, delta_frame_l, keypoints_l, descriptors_l, self.__frame_un_r_shape, self.__keypoints_r, self.__descriptors_r
def RequestPointlistThread(self):
'''
@brief Request point list from slave.
Execute in thread.
'''
self.__break_req = False
self.__req_success = False
self.__req_error = False
timeout = timeit.default_timer()
while (not(self.__req_success) and not(self.__req_error) and (timeit.default_timer() - timeout) < self.GetSettings('TCP', 'frame_req_timeout')) and not(self.__break_req):
self.__frame_un_r_shape, self.__keypoints_r, self.__descriptors_r, self.__req_success, self.__req_error = self.RequestPointList() #Slave is positioned to the right (right frame)
def CheckFinished(self):
'''
@Check if master is finished.
@return True/False - True = Finished, False = Not Finished
'''
stop = False
if self.CheckDroneVisionFinished() or self.CheckTerminated():
stop = True
return stop #else
def SetTimestamp(self, timestamp):
'''
@brief Set timestamp for this session
@param timestamp
'''
self.__timestamp = timestamp
def SetTimestampedFolder(self, timestamp):
'''
@brief Set timestamped folder for this session.
@param timestamp String
'''
self.ChangeSetting('DATABASE', 'output_folder', GetTimestampedFolder(timestamp, self.GetSettings('DATABASE', 'output_folder'), self.GetSettings('DATABASE', 'sub_output_folder')))
def WaitSlaveReady(self):
'''
@brief Wait for slave to be ready.
'''
slave_ready = False
timeout = timeit.default_timer()
while not(slave_ready) and ((timeit.default_timer() - timeout) < self.GetSettings('CALIB', 'calib_timeout') or self.GetSettings('CALIB', 'calib_timeout') < 0):
slave_ready = self.RequestSlaveReady()
if not(slave_ready):
time.sleep(0.1)
if not(slave_ready):
raise Exception('Slave could not be ready within the timeout.')
def CloseMaster(self):
'''
@brief Close master safely
'''
Master.__del__(self)
DataBase.__del__(self)
def __del__(self):
'''DESTRUCTOR'''
self.CloseMaster()
| |
from __future__ import unicode_literals
from django import forms
from django.contrib.auth.hashers import is_password_usable
from django.contrib.auth.hashers import make_password
from django.contrib.auth import get_user_model
from django.core import signing
from django.core.exceptions import ObjectDoesNotExist
from django.template import loader
try:
# SortedDict is deprecated as of Django 1.7 and will be removed in Django 1.9.
# https://code.djangoproject.com/wiki/SortedDict
from collections import OrderedDict as SortedDict
except ImportError:
from django.utils.datastructures import SortedDict
try:
from django.contrib.sites.models import get_current_site
except ImportError:
from django.contrib.sites.shortcuts import get_current_site
from django.utils.http import urlsafe_base64_encode
from django.utils.encoding import force_bytes
from django.utils.translation import ugettext_lazy as _
from password_policies.conf import settings
from password_policies.forms.fields import PasswordPoliciesField
from password_policies.models import PasswordHistory
from password_policies.models import PasswordChangeRequired
class PasswordPoliciesForm(forms.Form):
"""
A form that lets a user set his/her password without entering the
old password.
Has the following fields and methods:
"""
#: This forms error messages.
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
'password_used': _("The new password was used before. "
"Please enter another one."),
}
new_password1 = PasswordPoliciesField(label=_("New password"),
max_length=settings.PASSWORD_MAX_LENGTH,
min_length=settings.PASSWORD_MIN_LENGTH)
new_password2 = forms.CharField(label=_("New password confirmation"),
widget=forms.PasswordInput)
def __init__(self, user, *args, **kwargs):
"""
Initializes the form.
:arg user: A :class:`~django.contrib.auth.models.User` instance.
"""
self.user = user
super(PasswordPoliciesForm, self).__init__(*args, **kwargs)
def clean_new_password1(self):
"""
Validates that a given password was not used before.
"""
new_password1 = self.cleaned_data.get('new_password1')
if settings.PASSWORD_USE_HISTORY:
if self.user.check_password(new_password1):
raise forms.ValidationError(
self.error_messages['password_used'])
if not PasswordHistory.objects.check_password(self.user,
new_password1):
raise forms.ValidationError(
self.error_messages['password_used'])
return new_password1
def clean_new_password2(self):
"""
Validates that the two new passwords match.
"""
password1 = self.cleaned_data.get('new_password1')
password2 = self.cleaned_data.get('new_password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'])
return password2
def save(self, commit=True):
"""
Sets the user's password to the new one and creates an entry
in the user's password history,
if :py:attr:`~password_policies.conf.Settings.PASSWORD_USE_HISTORY`
is set to ``True``.
"""
new_password = self.cleaned_data['new_password1']
self.user.set_password(new_password)
if commit:
self.user.save()
if settings.PASSWORD_USE_HISTORY:
password = make_password(new_password)
PasswordHistory.objects.create(password=password, user=self.user)
PasswordHistory.objects.delete_expired(self.user)
PasswordChangeRequired.objects.filter(user=self.user).delete()
return self.user
class PasswordPoliciesChangeForm(PasswordPoliciesForm):
"""
A form that lets a user change his/her password by entering
their old password.
Has the following fields and methods:
"""
#: This forms error messages.
error_messages = dict(PasswordPoliciesForm.error_messages, **{
'password_incorrect': _("Your old password was entered incorrectly. "
"Please enter it again."),
'password_similar': _("The old and the new password are too similar."),
'password_identical': _("The old and the new password are the same."),
})
old_password = forms.CharField(label=_("Old password"),
widget=forms.PasswordInput)
def clean_old_password(self):
"""
Validates the current password.
"""
old_password = self.cleaned_data["old_password"]
if not self.user.check_password(old_password):
raise forms.ValidationError(
self.error_messages['password_incorrect'])
return old_password
def clean(self):
"""
Validates that old and new password are not too similar.
"""
cleaned_data = super(PasswordPoliciesChangeForm, self).clean()
old_password = cleaned_data.get("old_password")
new_password1 = cleaned_data.get("new_password1")
if old_password and new_password1:
if old_password == new_password1 and not settings.PASSWORD_USE_HISTORY:
raise forms.ValidationError(self.error_messages['password_identical'])
else:
if settings.PASSWORD_DIFFERENCE_DISTANCE:
try:
import Levenshtein
except ImportError:
pass
else:
distance = Levenshtein.distance(old_password,
new_password1)
if distance < settings.PASSWORD_DIFFERENCE_DISTANCE:
raise forms.ValidationError(self.error_messages['password_similar'])
return cleaned_data
def save(self, commit=True):
user = super(PasswordPoliciesChangeForm, self).save(commit=commit)
try:
# Checking the object id to prevent AssertionError id is None when deleting.
if user.password_change_required and user.password_change_required.id:
user.password_change_required.delete()
except ObjectDoesNotExist:
pass
return user
PasswordPoliciesChangeForm.base_fields = SortedDict([
(k, PasswordPoliciesChangeForm.base_fields[k])
for k in ['old_password', 'new_password1', 'new_password2']
])
class PasswordResetForm(forms.Form):
"""
A form to let a user reset his/her password.
Has the following fields and methods:
"""
#: This forms error messages.
error_messages = {
'unknown': _("That e-mail address doesn't have an associated "
"user account. Are you sure you've registered?"),
'unusable': _("The user account associated with this e-mail "
"address cannot reset the password."),
}
# TODO: Help text?
email = forms.EmailField(label=_("E-mail"), max_length=75, help_text='help')
def clean_email(self):
"""
Validates that an active user exists with the given email address.
"""
email = self.cleaned_data["email"]
self.users_cache = get_user_model().objects.filter(email__iexact=email, is_active=True)
if not len(self.users_cache):
raise forms.ValidationError(self.error_messages['unknown'])
if any(not is_password_usable(user.password)
for user in self.users_cache):
raise forms.ValidationError(self.error_messages['unusable'])
return email
def save(self, domain_override=None,
subject_template_name='registration/password_reset_subject.txt',
email_template_name='registration/password_reset_email.txt',
email_html_template_name='registration/password_reset_email.html',
use_https=False, from_email=None, request=None):
"""
Generates a one-use only link for resetting password and sends to the
user.
:arg str domain_override: A string that changes the site name and
domain if needed.
:arg str email_template_name: A relative path to a template in the root of a
template directory to generate the body of the mail.
:arg str email_html_template_name: A relative path to a template in the root of
a template directory to generate the HTML attachment of the mail.
:arg str from_email: The email address to use as sender of the email.
:arg request: A HttpRequest instance.
:arg str subject_template_name: A relative path to a template in the root of
a template directory to generate the subject of the mail.
:arg bool use_https: Determines wether to use HTTPS while generating
the one-use only link for resetting passwords.
"""
from django.core.mail import EmailMultiAlternatives
context = self.get_context_data(request,
domain_override,
use_https)
signer = signing.TimestampSigner()
for user in self.users_cache:
c = context
var = signer.sign(user.password)
var = var.split(':')
c['email'] = user.email
c['signature'] = var[2]
c['timestamp'] = var[1]
c['uid'] = urlsafe_base64_encode(force_bytes(user.id))
c['user'] = user
subject = loader.render_to_string(subject_template_name, c)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
email = loader.render_to_string(email_template_name, c)
html = loader.render_to_string(email_html_template_name, c)
msg = EmailMultiAlternatives(subject, email, from_email, [user.email])
msg.attach_alternative(html, "text/html")
msg.send()
def get_context_data(self, request, domain_override, use_https):
"""
Returns a dictionary with common context items.
:arg request: A HttpRequest instance.
:arg str domain_override: A string that changes the site name and
domain if needed.
:arg bool use_https: Determines wether to use HTTPS while generating
the one-use only link for resetting passwords.
"""
if not domain_override:
current_site = get_current_site(request)
site_name = current_site.name
domain = current_site.domain
else:
site_name = domain = domain_override
context = {
'domain': domain,
'site_name': site_name,
'protocol': use_https and 'https' or 'http',
}
return context
class PasswordPoliciesRegistrationForm(forms.Form):
"""
A form to support user registration with password policies.
Has the following fields and methods:
"""
#: This forms error messages.
error_messages = {
'duplicate_username': _("A user with that username already exists."),
'password_mismatch': _("The two password fields didn't match."),
}
username = forms.RegexField(label=_("Username"), max_length=30,
regex=r'^[\w.@+-]+$',
help_text=_("Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only."),
error_messages={'invalid': _("This value may contain only letters, numbers and @/./+/-/_ characters.")}
)
password1 = PasswordPoliciesField(label=_("Password"),
max_length=settings.PASSWORD_MAX_LENGTH,
min_length=settings.PASSWORD_MIN_LENGTH)
password2 = forms.CharField(label=_("Password confirmation"),
widget=forms.PasswordInput,
help_text=_("Enter the same password as above, for verification."))
def clean_username(self):
"""
Validates that the username is not already taken.
"""
username = self.cleaned_data["username"]
if username and not get_user_model().objects.filter(username__iexact=username).count():
return username
raise forms.ValidationError(self.error_messages['duplicate_username'])
def clean_password2(self):
"""
Validates that the two passwords are identical.
"""
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'])
return password2
| |
class text_markup_exception(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class text_markup_base(object):
'''Abstract bolding and colorizing text. Base class does no markup. This
is way overengineered...'''
# supported colors
RED = 'red'
WHITE = 'white'
BLUE = 'blue'
GREEN = 'green'
YELLOW = 'yellow'
BLACK = 'black'
RAINBOW = 'rainbow' # special hanabi color, hacky.
Colors = [RED, WHITE, BLUE, GREEN, YELLOW, RAINBOW]
# supported markup
BOLD = 'bold'
UNDERLINE = 'underline'
Markups = [BOLD, UNDERLINE]
def __init__(self):
pass
def markup(self, text, markup):
'''just check for supported markup. raise exception if not
supported.'''
if not markup in text_markup_base.Markups:
raise text_markup_exception('Unknown markup: %s' % markup)
return text
def color(self, text, color):
'''just check for supported color. raise exception if not supported.'''
if not color in text_markup_base.Colors:
raise text_markup_exception('Unknown color: %s' % color)
return text
def bold(self, text):
return self.markup(text, text_markup_base.BOLD)
def underline(self, text):
return self.markup(text, text_markup_base.UNDERLINE)
class irc_markup(text_markup_base):
'''
mIRC specific markup encodings.
See ascii_markup for usage examples.
'''
# here are complete? color and control codes
# for mIRC. This si just for reference. We
# only support the hanabi colors for now.
# enum ColorCode {
# White = 0, /**< White */
# Black = 1, /**< Black */
# DarkBlue = 2, /**< Dark blue */
# DarkGreen = 3, /**< Dark green */
# Red = 4, /**< Red */
# DarkRed = 5, /**< Dark red */
# DarkViolet = 6, /**< Dark violet */
# Orange = 7, /**< Orange */
# Yellow = 8, /**< Yellow */
# LightGreen = 9, /**< Light green */
# Cyan = 10, /**< Cornflower blue */
# LightCyan = 11, /**< Light blue */
# Blue = 12, /**< Blue */
# Violet = 13, /**< Violet */
# DarkGray = 14, /**< Dark gray */
# LightGray = 15 /**< Light gray */
# };
#
# enum ControlCode {
# Bold = 0x02, /**< Bold */
# Color = 0x03, /**< Color */
# Italic = 0x09, /**< Italic */
# StrikeThrough = 0x13, /**< Strike-Through */
# Reset = 0x0f, /**< Reset */
# Underline = 0x15, /**< Underline */
# Underline2 = 0x1f, /**< Underline */
# Reverse = 0x16 /**< Reverse */
# };
# from mIRC color codes
_colormap = {
text_markup_base.WHITE: 0,
text_markup_base.BLUE: 11,
text_markup_base.GREEN: 9,
text_markup_base.RED: 4,
text_markup_base.YELLOW: 8,
text_markup_base.BLACK: 1,
text_markup_base.RAINBOW: 0
}
def __init__(self):
text_markup_base.__init__(self)
self.bg_color = irc_markup._colormap[text_markup_base.BLACK]
def markup(self, text, markup):
text_markup_base.markup(self, text, markup)
if markup == text_markup_base.BOLD:
return '\x02%s\x02' % text
elif markup == text_markup_base.UNDERLINE:
return '\x1f%s\x1f' % text
else:
return text
def color(self, text, color):
text_markup_base.color(self, text, color)
if color == text_markup_base.RAINBOW:
retVal = ''
for i, c in enumerate(text):
retVal += self.color(c, text_markup_base.Colors[i % (len(text_markup_base.Colors)-1)])
return retVal
else:
return '\x03%02d,%02d%s\x03' % (irc_markup._colormap[color],
irc_markup._colormap[text_markup_base.BLACK], text)
class xterm_markup(text_markup_base):
'''
markup for xterms.
'''
_colormap = {
text_markup_base.WHITE: 37,
text_markup_base.BLUE: 34,
text_markup_base.GREEN: 32,
text_markup_base.RED: 31,
text_markup_base.YELLOW: 33,
text_markup_base.RAINBOW: 37
}
def __init__(self):
text_markup_base.__init__(self)
def markup(self, text, markup):
text_markup_base.markup(self, text, markup)
# GTL - I don't know how to BOLD without knowing the current fg color.
return text
def color(self, text, color):
text_markup_base.color(self, text, color)
if color == text_markup_base.RAINBOW:
retVal = ''
for i, c in enumerate(text):
retVal += self.color(c, text_markup_base.Colors[i % (len(text_markup_base.Colors)-1)])
return retVal
else:
return '\033[%d;1m%s\033[0m' % (xterm_markup._colormap[color], text)
class ascii_markup(text_markup_base):
'''
pseudo markup for ascii terminals.
pydoctest code/sample:
>>> from text_markup import ascii_markup as markup
>>> m = markup()
>>> m.color('hello', m.RED)
'Rhello'
>>> m.markup('hello', m.BOLD)
'HELLO'
>>> m.color('hello', 'purple')
Traceback (most recent call last):
...
text_markup_exception: 'Unknown color: purple'
>>> m.markup('hello', 'underlined')
Traceback (most recent call last):
...
text_markup_exception: 'Unknown markup: underlined'
'''
_colormap = {
text_markup_base.WHITE: 'W',
text_markup_base.BLUE: 'B',
text_markup_base.GREEN: 'G',
text_markup_base.RED: 'R',
text_markup_base.YELLOW: 'Y',
text_markup_base.RAINBOW: 'RNBW'
}
def __init__(self):
text_markup_base.__init__(self)
def bold(self, text):
return self.markup(text, self.BOLD)
def markup(self, text, markup):
text_markup_base.markup(self, text, markup)
return text.upper()
def color(self, text, color):
text_markup_base.color(self, text, color)
return '%s%s' % (ascii_markup._colormap[color], text)
if __name__ == "__main__":
import doctest
doctest.testmod()
| |
"""
Stochastic processes.
"""
# TODO:
# Generalize lattice_dims to have non-zero minimum
# Example script with real-time plotting
import random
class RandomWalk(object):
"""
The base random walk class.
"""
def __init__(self, dims, pos0, lattice_dims, kwargs):
self.pos = []
if dims == 1:
if isinstance(pos0, int) or isinstance(pos0, float):
self.pos.append(int(pos0))
elif pos0 is None:
self.pos.append(0)
else:
raise ValueError("Initial position is not int or float")
if (isinstance(lattice_dims, int)
or isinstance(lattice_dims, float)):
self.lattice_dims = lattice_dims
elif lattice_dims is None:
self.lattice_dims = None
else:
raise ValueError("Input lattice dim is not int or float")
else:
if isinstance(pos0, list) or isinstance(pos0, tuple):
if len(pos0) == dims:
self.pos.append(tuple(map(int, pos0)))
else:
raise ValueError("Initial position has wrong dimensions")
elif pos0 is None:
self.pos.append([0, 0])
else:
raise ValueError("Initial position is not list or tuple")
if (isinstance(lattice_dims, list)
or isinstance(lattice_dims, tuple)):
if len(lattice_dims) == dims:
self.lattice_dims = tuple(lattice_dims)
else:
raise ValueError("Input lattice has wrong dimensions")
elif lattice_dims is None:
self.lattice_dims = lattice_dims
else:
raise ValueError("Input lattice dims is not list or tuple")
self.kwargs = kwargs
def get_position(self):
return self.pos[-1]
def get_trajectory(self):
return self.pos
def unpack(self):
arrays = zip(*self.pos)
return arrays
def current_position(self):
print(" ".join(map(str, self.pos[-1])))
class RandomWalk1D(RandomWalk):
"""
Performs a random walk in one dimension.
"""
def __init__(self, pos0=None, lattice_dims=None, **kwargs):
RandomWalk.__init__(self, 1, pos0, lattice_dims, kwargs)
def step(self):
"""
Takes a single step.
@rtype: number
@return: new position
"""
direction = random.randint(1, 2)
if direction == 1:
# Move right
new_pos = self.pos[-1] + 1
else:
# Move left
new_pos = self.pos[-1] - 1
return new_pos
def walk(self, n_steps, verbose=False):
"""
Takes multiple steps.
@type n_steps: number
@param n_steps: number of steps
@type verbose: boolean
@param verbose: print position at each step [default=False]
"""
for n in range(n_steps):
new_pos = self.step()
# Keep the particle within the lattice dimensions
while (new_pos < 0 or new_pos > self.lattice_dims):
new_pos = self.step()
self.pos.append(new_pos)
if verbose:
self.current_position()
class RandomWalk2D(RandomWalk):
"""
Performs a random walk in two dimensions.
"""
def __init__(self, pos0=None, lattice_dims=None, **kwargs):
RandomWalk.__init__(self, 2, pos0, lattice_dims, kwargs)
def step(self):
"""
Takes a single step.
@rtype: vector
@return: new position
"""
direction = random.randint(1, 4)
if direction == 1:
# Move right
new_pos = (self.pos[-1][0] + 1, self.pos[-1][1])
elif direction == 2:
# Move left
new_pos = (self.pos[-1][0] - 1, self.pos[-1][1])
elif direction == 3:
# Move up
new_pos = (self.pos[-1][0] + 1, self.pos[-1][1])
else:
# Move down
new_pos = (self.pos[-1][0] + 1, self.pos[-1][1])
return new_pos
def walk(self, n_steps, verbose=False):
"""
Takes multiple steps.
@type n_steps: number
@param n_steps: number of steps
@type verbose: boolean
@param verbose: print position at each step [default=False]
"""
for n in range(n_steps):
new_pos = self.step()
# Keep the particle within the lattice dimensions
while (new_pos[0] < 0 or new_pos[0] > self.lattice_dims[0]
or new_pos[1] < 0 or new_pos[1] > self.lattice_dims[1]):
new_pos = self.step()
self.pos.append(new_pos)
if verbose:
self.current_position()
class RandomWalk3D(RandomWalk):
"""
Performs a random walk in three dimensions.
"""
def __init__(self, pos0=None, lattice_dims=None, **kwargs):
RandomWalk.__init__(self, 3, pos0, lattice_dims, kwargs)
def step(self):
"""
Takes a single step.
@rtype: vector
@return: new position
"""
direction = random.randint(1, 6)
if direction == 1:
# Move up on x-axis
new_pos = (self.pos[-1][0] + 1, self.pos[-1][1], self.pos[-1][2])
elif direction == 2:
# Move down on x-axis
new_pos = (self.pos[-1][0] - 1, self.pos[-1][1], self.pos[-1][2])
elif direction == 3:
# Move up on y-axis
new_pos = (self.pos[-1][0], self.pos[-1][1] + 1, self.pos[-1][2])
elif direction == 4:
# Move down on y-axis
new_pos = (self.pos[-1][0], self.pos[-1][1] - 1, self.pos[-1][2])
elif direction == 5:
# Move up on z-axis
new_pos = (self.pos[-1][0], self.pos[-1][1], self.pos[-1][2] + 1)
else:
# Move down on z-axis
new_pos = (self.pos[-1][0], self.pos[-1][1], self.pos[-1][2] - 1)
return new_pos
def walk(self, n_steps, verbose=False):
"""
Takes multiple steps.
@type n_steps: number
@param n_steps: number of steps
@type verbose: boolean
@param verbose: print position at each step [default=False]
"""
for n in range(n_steps):
new_pos = self.step()
# Keep the particle within the lattice dimensions
while (new_pos[0] < 0 or new_pos[0] > self.lattice_dims[0]
or new_pos[1] < 0 or new_pos[1] > self.lattice_dims[1]
or new_pos[2] < 0 or new_pos[2] > self.lattice_dims[2]):
new_pos = self.step()
self.pos.append(new_pos)
if verbose:
self.current_position()
if __name__ == "__main__":
walk1d = RandomWalk1D(1, 10)
walk1d.walk(100)
print(walk1d.get_position())
print(walk1d.get_trajectory())
print()
walk2d = RandomWalk2D([2, 3], [10, 10])
walk2d.walk(100)
print(walk2d.get_position())
print(walk2d.get_trajectory())
print()
walk3d = RandomWalk3D([4, 5, 6], [10, 10, 10])
walk3d.walk(100)
print(walk3d.get_position())
print(walk3d.get_trajectory())
| |
# Python test set -- built-in functions
import test.support, unittest
import sys
import pickle
import itertools
# pure Python implementations (3 args only), for comparison
def pyrange(start, stop, step):
if (start - stop) // step < 0:
# replace stop with next element in the sequence of integers
# that are congruent to start modulo step.
stop += (start - stop) % step
while start != stop:
yield start
start += step
def pyrange_reversed(start, stop, step):
stop += (start - stop) % step
return pyrange(stop - step, start - step, -step)
class RangeTest(unittest.TestCase):
def assert_iterators_equal(self, xs, ys, test_id, limit=None):
# check that an iterator xs matches the expected results ys,
# up to a given limit.
if limit is not None:
xs = itertools.islice(xs, limit)
ys = itertools.islice(ys, limit)
sentinel = object()
pairs = itertools.zip_longest(xs, ys, fillvalue=sentinel)
for i, (x, y) in enumerate(pairs):
if x == y:
continue
elif x == sentinel:
self.fail('{}: iterator ended unexpectedly '
'at position {}; expected {}'.format(test_id, i, y))
elif y == sentinel:
self.fail('{}: unexpected excess element {} at '
'position {}'.format(test_id, x, i))
else:
self.fail('{}: wrong element at position {};'
'expected {}, got {}'.format(test_id, i, y, x))
def test_range(self):
self.assertEqual(list(range(3)), [0, 1, 2])
self.assertEqual(list(range(1, 5)), [1, 2, 3, 4])
self.assertEqual(list(range(0)), [])
self.assertEqual(list(range(-3)), [])
self.assertEqual(list(range(1, 10, 3)), [1, 4, 7])
self.assertEqual(list(range(5, -5, -3)), [5, 2, -1, -4])
a = 10
b = 100
c = 50
self.assertEqual(list(range(a, a+2)), [a, a+1])
self.assertEqual(list(range(a+2, a, -1)), [a+2, a+1])
self.assertEqual(list(range(a+4, a, -2)), [a+4, a+2])
seq = list(range(a, b, c))
self.assertIn(a, seq)
self.assertNotIn(b, seq)
self.assertEqual(len(seq), 2)
seq = list(range(b, a, -c))
self.assertIn(b, seq)
self.assertNotIn(a, seq)
self.assertEqual(len(seq), 2)
seq = list(range(-a, -b, -c))
self.assertIn(-a, seq)
self.assertNotIn(-b, seq)
self.assertEqual(len(seq), 2)
self.assertRaises(TypeError, range)
self.assertRaises(TypeError, range, 1, 2, 3, 4)
self.assertRaises(ValueError, range, 1, 2, 0)
self.assertRaises(TypeError, range, 0.0, 2, 1)
self.assertRaises(TypeError, range, 1, 2.0, 1)
self.assertRaises(TypeError, range, 1, 2, 1.0)
self.assertRaises(TypeError, range, 1e100, 1e101, 1e101)
self.assertRaises(TypeError, range, 0, "spam")
self.assertRaises(TypeError, range, 0, 42, "spam")
self.assertEqual(len(range(0, sys.maxsize, sys.maxsize-1)), 2)
r = range(-sys.maxsize, sys.maxsize, 2)
self.assertEqual(len(r), sys.maxsize)
def test_large_operands(self):
x = range(10**20, 10**20+10, 3)
self.assertEqual(len(x), 4)
self.assertEqual(len(list(x)), 4)
x = range(10**20+10, 10**20, 3)
self.assertEqual(len(x), 0)
self.assertEqual(len(list(x)), 0)
x = range(10**20, 10**20+10, -3)
self.assertEqual(len(x), 0)
self.assertEqual(len(list(x)), 0)
x = range(10**20+10, 10**20, -3)
self.assertEqual(len(x), 4)
self.assertEqual(len(list(x)), 4)
# Now test range() with longs
self.assertEqual(list(range(-2**100)), [])
self.assertEqual(list(range(0, -2**100)), [])
self.assertEqual(list(range(0, 2**100, -1)), [])
self.assertEqual(list(range(0, 2**100, -1)), [])
a = int(10 * sys.maxsize)
b = int(100 * sys.maxsize)
c = int(50 * sys.maxsize)
self.assertEqual(list(range(a, a+2)), [a, a+1])
self.assertEqual(list(range(a+2, a, -1)), [a+2, a+1])
self.assertEqual(list(range(a+4, a, -2)), [a+4, a+2])
seq = list(range(a, b, c))
self.assertIn(a, seq)
self.assertNotIn(b, seq)
self.assertEqual(len(seq), 2)
self.assertEqual(seq[0], a)
self.assertEqual(seq[-1], a+c)
seq = list(range(b, a, -c))
self.assertIn(b, seq)
self.assertNotIn(a, seq)
self.assertEqual(len(seq), 2)
self.assertEqual(seq[0], b)
self.assertEqual(seq[-1], b-c)
seq = list(range(-a, -b, -c))
self.assertIn(-a, seq)
self.assertNotIn(-b, seq)
self.assertEqual(len(seq), 2)
self.assertEqual(seq[0], -a)
self.assertEqual(seq[-1], -a-c)
def test_large_range(self):
# Check long ranges (len > sys.maxsize)
# len() is expected to fail due to limitations of the __len__ protocol
def _range_len(x):
try:
length = len(x)
except OverflowError:
step = x[1] - x[0]
length = 1 + ((x[-1] - x[0]) // step)
return length
a = -sys.maxsize
b = sys.maxsize
expected_len = b - a
x = range(a, b)
self.assertIn(a, x)
self.assertNotIn(b, x)
self.assertRaises(OverflowError, len, x)
self.assertEqual(_range_len(x), expected_len)
self.assertEqual(x[0], a)
idx = sys.maxsize+1
self.assertEqual(x[idx], a+idx)
self.assertEqual(x[idx:idx+1][0], a+idx)
with self.assertRaises(IndexError):
x[-expected_len-1]
with self.assertRaises(IndexError):
x[expected_len]
a = 0
b = 2 * sys.maxsize
expected_len = b - a
x = range(a, b)
self.assertIn(a, x)
self.assertNotIn(b, x)
self.assertRaises(OverflowError, len, x)
self.assertEqual(_range_len(x), expected_len)
self.assertEqual(x[0], a)
idx = sys.maxsize+1
self.assertEqual(x[idx], a+idx)
self.assertEqual(x[idx:idx+1][0], a+idx)
with self.assertRaises(IndexError):
x[-expected_len-1]
with self.assertRaises(IndexError):
x[expected_len]
a = 0
b = sys.maxsize**10
c = 2*sys.maxsize
expected_len = 1 + (b - a) // c
x = range(a, b, c)
self.assertIn(a, x)
self.assertNotIn(b, x)
self.assertRaises(OverflowError, len, x)
self.assertEqual(_range_len(x), expected_len)
self.assertEqual(x[0], a)
idx = sys.maxsize+1
self.assertEqual(x[idx], a+(idx*c))
self.assertEqual(x[idx:idx+1][0], a+(idx*c))
with self.assertRaises(IndexError):
x[-expected_len-1]
with self.assertRaises(IndexError):
x[expected_len]
a = sys.maxsize**10
b = 0
c = -2*sys.maxsize
expected_len = 1 + (b - a) // c
x = range(a, b, c)
self.assertIn(a, x)
self.assertNotIn(b, x)
self.assertRaises(OverflowError, len, x)
self.assertEqual(_range_len(x), expected_len)
self.assertEqual(x[0], a)
idx = sys.maxsize+1
self.assertEqual(x[idx], a+(idx*c))
self.assertEqual(x[idx:idx+1][0], a+(idx*c))
with self.assertRaises(IndexError):
x[-expected_len-1]
with self.assertRaises(IndexError):
x[expected_len]
def test_invalid_invocation(self):
self.assertRaises(TypeError, range)
self.assertRaises(TypeError, range, 1, 2, 3, 4)
self.assertRaises(ValueError, range, 1, 2, 0)
a = int(10 * sys.maxsize)
self.assertRaises(ValueError, range, a, a + 1, int(0))
self.assertRaises(TypeError, range, 1., 1., 1.)
self.assertRaises(TypeError, range, 1e100, 1e101, 1e101)
self.assertRaises(TypeError, range, 0, "spam")
self.assertRaises(TypeError, range, 0, 42, "spam")
# Exercise various combinations of bad arguments, to check
# refcounting logic
self.assertRaises(TypeError, range, 0.0)
self.assertRaises(TypeError, range, 0, 0.0)
self.assertRaises(TypeError, range, 0.0, 0)
self.assertRaises(TypeError, range, 0.0, 0.0)
self.assertRaises(TypeError, range, 0, 0, 1.0)
self.assertRaises(TypeError, range, 0, 0.0, 1)
self.assertRaises(TypeError, range, 0, 0.0, 1.0)
self.assertRaises(TypeError, range, 0.0, 0, 1)
self.assertRaises(TypeError, range, 0.0, 0, 1.0)
self.assertRaises(TypeError, range, 0.0, 0.0, 1)
self.assertRaises(TypeError, range, 0.0, 0.0, 1.0)
def test_index(self):
u = range(2)
self.assertEqual(u.index(0), 0)
self.assertEqual(u.index(1), 1)
self.assertRaises(ValueError, u.index, 2)
u = range(-2, 3)
self.assertEqual(u.count(0), 1)
self.assertEqual(u.index(0), 2)
self.assertRaises(TypeError, u.index)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
a = range(4)
self.assertRaises(BadExc, a.index, BadCmp())
a = range(-2, 3)
self.assertEqual(a.index(0), 2)
self.assertEqual(range(1, 10, 3).index(4), 1)
self.assertEqual(range(1, -10, -3).index(-5), 2)
self.assertEqual(range(10**20).index(1), 1)
self.assertEqual(range(10**20).index(10**20 - 1), 10**20 - 1)
self.assertRaises(ValueError, range(1, 2**100, 2).index, 2**87)
self.assertEqual(range(1, 2**100, 2).index(2**87+1), 2**86)
class AlwaysEqual(object):
def __eq__(self, other):
return True
always_equal = AlwaysEqual()
self.assertEqual(range(10).index(always_equal), 0)
def test_user_index_method(self):
bignum = 2*sys.maxsize
smallnum = 42
# User-defined class with an __index__ method
class I:
def __init__(self, n):
self.n = int(n)
def __index__(self):
return self.n
self.assertEqual(list(range(I(bignum), I(bignum + 1))), [bignum])
self.assertEqual(list(range(I(smallnum), I(smallnum + 1))), [smallnum])
# User-defined class with a failing __index__ method
class IX:
def __index__(self):
raise RuntimeError
self.assertRaises(RuntimeError, range, IX())
# User-defined class with an invalid __index__ method
class IN:
def __index__(self):
return "not a number"
self.assertRaises(TypeError, range, IN())
def test_count(self):
self.assertEqual(range(3).count(-1), 0)
self.assertEqual(range(3).count(0), 1)
self.assertEqual(range(3).count(1), 1)
self.assertEqual(range(3).count(2), 1)
self.assertEqual(range(3).count(3), 0)
self.assertIs(type(range(3).count(-1)), int)
self.assertIs(type(range(3).count(1)), int)
self.assertEqual(range(10**20).count(1), 1)
self.assertEqual(range(10**20).count(10**20), 0)
self.assertEqual(range(3).index(1), 1)
self.assertEqual(range(1, 2**100, 2).count(2**87), 0)
self.assertEqual(range(1, 2**100, 2).count(2**87+1), 1)
class AlwaysEqual(object):
def __eq__(self, other):
return True
always_equal = AlwaysEqual()
self.assertEqual(range(10).count(always_equal), 10)
self.assertEqual(len(range(sys.maxsize, sys.maxsize+10)), 10)
def test_repr(self):
self.assertEqual(repr(range(1)), 'range(0, 1)')
self.assertEqual(repr(range(1, 2)), 'range(1, 2)')
self.assertEqual(repr(range(1, 2, 3)), 'range(1, 2, 3)')
def test_pickling(self):
testcases = [(13,), (0, 11), (-22, 10), (20, 3, -1),
(13, 21, 3), (-2, 2, 2)]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for t in testcases:
r = range(*t)
self.assertEqual(list(pickle.loads(pickle.dumps(r, proto))),
list(r))
def test_odd_bug(self):
# This used to raise a "SystemError: NULL result without error"
# because the range validation step was eating the exception
# before NULL was returned.
with self.assertRaises(TypeError):
range([], 1, -1)
def test_types(self):
# Non-integer objects *equal* to any of the range's items are supposed
# to be contained in the range.
self.assertIn(1.0, range(3))
self.assertIn(True, range(3))
self.assertIn(1+0j, range(3))
class C1:
def __eq__(self, other): return True
self.assertIn(C1(), range(3))
# Objects are never coerced into other types for comparison.
class C2:
def __int__(self): return 1
def __index__(self): return 1
self.assertNotIn(C2(), range(3))
# ..except if explicitly told so.
self.assertIn(int(C2()), range(3))
# Check that the range.__contains__ optimization is only
# used for ints, not for instances of subclasses of int.
class C3(int):
def __eq__(self, other): return True
self.assertIn(C3(11), range(10))
self.assertIn(C3(11), list(range(10)))
def test_strided_limits(self):
r = range(0, 101, 2)
self.assertIn(0, r)
self.assertNotIn(1, r)
self.assertIn(2, r)
self.assertNotIn(99, r)
self.assertIn(100, r)
self.assertNotIn(101, r)
r = range(0, -20, -1)
self.assertIn(0, r)
self.assertIn(-1, r)
self.assertIn(-19, r)
self.assertNotIn(-20, r)
r = range(0, -20, -2)
self.assertIn(-18, r)
self.assertNotIn(-19, r)
self.assertNotIn(-20, r)
def test_empty(self):
r = range(0)
self.assertNotIn(0, r)
self.assertNotIn(1, r)
r = range(0, -10)
self.assertNotIn(0, r)
self.assertNotIn(-1, r)
self.assertNotIn(1, r)
def test_range_iterators(self):
# exercise 'fast' iterators, that use a rangeiterobject internally.
# see issue 7298
limits = [base + jiggle
for M in (2**32, 2**64)
for base in (-M, -M//2, 0, M//2, M)
for jiggle in (-2, -1, 0, 1, 2)]
test_ranges = [(start, end, step)
for start in limits
for end in limits
for step in (-2**63, -2**31, -2, -1, 1, 2)]
for start, end, step in test_ranges:
iter1 = range(start, end, step)
iter2 = pyrange(start, end, step)
test_id = "range({}, {}, {})".format(start, end, step)
# check first 100 entries
self.assert_iterators_equal(iter1, iter2, test_id, limit=100)
iter1 = reversed(range(start, end, step))
iter2 = pyrange_reversed(start, end, step)
test_id = "reversed(range({}, {}, {}))".format(start, end, step)
self.assert_iterators_equal(iter1, iter2, test_id, limit=100)
def test_slice(self):
def check(start, stop, step=None):
i = slice(start, stop, step)
self.assertEqual(list(r[i]), list(r)[i])
self.assertEqual(len(r[i]), len(list(r)[i]))
for r in [range(10),
range(0),
range(1, 9, 3),
range(8, 0, -3),
range(sys.maxsize+1, sys.maxsize+10),
]:
check(0, 2)
check(0, 20)
check(1, 2)
check(20, 30)
check(-30, -20)
check(-1, 100, 2)
check(0, -1)
check(-1, -3, -1)
def test_contains(self):
r = range(10)
self.assertIn(0, r)
self.assertIn(1, r)
self.assertIn(5.0, r)
self.assertNotIn(5.1, r)
self.assertNotIn(-1, r)
self.assertNotIn(10, r)
self.assertNotIn("", r)
r = range(9, -1, -1)
self.assertIn(0, r)
self.assertIn(1, r)
self.assertIn(5.0, r)
self.assertNotIn(5.1, r)
self.assertNotIn(-1, r)
self.assertNotIn(10, r)
self.assertNotIn("", r)
r = range(0, 10, 2)
self.assertIn(0, r)
self.assertNotIn(1, r)
self.assertNotIn(5.0, r)
self.assertNotIn(5.1, r)
self.assertNotIn(-1, r)
self.assertNotIn(10, r)
self.assertNotIn("", r)
r = range(9, -1, -2)
self.assertNotIn(0, r)
self.assertIn(1, r)
self.assertIn(5.0, r)
self.assertNotIn(5.1, r)
self.assertNotIn(-1, r)
self.assertNotIn(10, r)
self.assertNotIn("", r)
def test_reverse_iteration(self):
for r in [range(10),
range(0),
range(1, 9, 3),
range(8, 0, -3),
range(sys.maxsize+1, sys.maxsize+10),
]:
self.assertEqual(list(reversed(r)), list(r)[::-1])
def test_main():
test.support.run_unittest(RangeTest)
if __name__ == "__main__":
test_main()
| |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_serialization import jsonutils
import requests
import six
import testtools
import webob
from keystoneclient.middleware import s3_token
from keystoneclient.tests.unit import utils
GOOD_RESPONSE = {'access': {'token': {'id': 'TOKEN_ID',
'tenant': {'id': 'TENANT_ID'}}}}
class FakeApp(object):
"""This represents a WSGI app protected by the auth_token middleware."""
def __call__(self, env, start_response):
resp = webob.Response()
resp.environ = env
return resp(env, start_response)
class S3TokenMiddlewareTestBase(utils.TestCase):
TEST_PROTOCOL = 'https'
TEST_HOST = 'fakehost'
TEST_PORT = 35357
TEST_URL = '%s://%s:%d/v2.0/s3tokens' % (TEST_PROTOCOL,
TEST_HOST,
TEST_PORT)
def setUp(self):
super(S3TokenMiddlewareTestBase, self).setUp()
self.conf = {
'auth_host': self.TEST_HOST,
'auth_port': self.TEST_PORT,
'auth_protocol': self.TEST_PROTOCOL,
}
def start_fake_response(self, status, headers):
self.response_status = int(status.split(' ', 1)[0])
self.response_headers = dict(headers)
class S3TokenMiddlewareTestGood(S3TokenMiddlewareTestBase):
def setUp(self):
super(S3TokenMiddlewareTestGood, self).setUp()
self.middleware = s3_token.S3Token(FakeApp(), self.conf)
self.requests.post(self.TEST_URL, status_code=201, json=GOOD_RESPONSE)
# Ignore the request and pass to the next middleware in the
# pipeline if no path has been specified.
def test_no_path_request(self):
req = webob.Request.blank('/')
self.middleware(req.environ, self.start_fake_response)
self.assertEqual(self.response_status, 200)
# Ignore the request and pass to the next middleware in the
# pipeline if no Authorization header has been specified
def test_without_authorization(self):
req = webob.Request.blank('/v1/AUTH_cfa/c/o')
self.middleware(req.environ, self.start_fake_response)
self.assertEqual(self.response_status, 200)
def test_without_auth_storage_token(self):
req = webob.Request.blank('/v1/AUTH_cfa/c/o')
req.headers['Authorization'] = 'badboy'
self.middleware(req.environ, self.start_fake_response)
self.assertEqual(self.response_status, 200)
def test_authorized(self):
req = webob.Request.blank('/v1/AUTH_cfa/c/o')
req.headers['Authorization'] = 'access:signature'
req.headers['X-Storage-Token'] = 'token'
req.get_response(self.middleware)
self.assertTrue(req.path.startswith('/v1/AUTH_TENANT_ID'))
self.assertEqual(req.headers['X-Auth-Token'], 'TOKEN_ID')
def test_authorized_http(self):
TEST_URL = 'http://%s:%d/v2.0/s3tokens' % (self.TEST_HOST,
self.TEST_PORT)
self.requests.post(TEST_URL, status_code=201, json=GOOD_RESPONSE)
self.middleware = (
s3_token.filter_factory({'auth_protocol': 'http',
'auth_host': self.TEST_HOST,
'auth_port': self.TEST_PORT})(FakeApp()))
req = webob.Request.blank('/v1/AUTH_cfa/c/o')
req.headers['Authorization'] = 'access:signature'
req.headers['X-Storage-Token'] = 'token'
req.get_response(self.middleware)
self.assertTrue(req.path.startswith('/v1/AUTH_TENANT_ID'))
self.assertEqual(req.headers['X-Auth-Token'], 'TOKEN_ID')
def test_authorization_nova_toconnect(self):
req = webob.Request.blank('/v1/AUTH_swiftint/c/o')
req.headers['Authorization'] = 'access:FORCED_TENANT_ID:signature'
req.headers['X-Storage-Token'] = 'token'
req.get_response(self.middleware)
path = req.environ['PATH_INFO']
self.assertTrue(path.startswith('/v1/AUTH_FORCED_TENANT_ID'))
@mock.patch.object(requests, 'post')
def test_insecure(self, MOCK_REQUEST):
self.middleware = (
s3_token.filter_factory({'insecure': True})(FakeApp()))
text_return_value = jsonutils.dumps(GOOD_RESPONSE)
if six.PY3:
text_return_value = text_return_value.encode()
MOCK_REQUEST.return_value = utils.TestResponse({
'status_code': 201,
'text': text_return_value})
req = webob.Request.blank('/v1/AUTH_cfa/c/o')
req.headers['Authorization'] = 'access:signature'
req.headers['X-Storage-Token'] = 'token'
req.get_response(self.middleware)
self.assertTrue(MOCK_REQUEST.called)
mock_args, mock_kwargs = MOCK_REQUEST.call_args
self.assertIs(mock_kwargs['verify'], False)
class S3TokenMiddlewareTestBad(S3TokenMiddlewareTestBase):
def setUp(self):
super(S3TokenMiddlewareTestBad, self).setUp()
self.middleware = s3_token.S3Token(FakeApp(), self.conf)
def test_unauthorized_token(self):
ret = {"error":
{"message": "EC2 access key not found.",
"code": 401,
"title": "Unauthorized"}}
self.requests.post(self.TEST_URL, status_code=403, json=ret)
req = webob.Request.blank('/v1/AUTH_cfa/c/o')
req.headers['Authorization'] = 'access:signature'
req.headers['X-Storage-Token'] = 'token'
resp = req.get_response(self.middleware)
s3_denied_req = self.middleware.deny_request('AccessDenied')
self.assertEqual(resp.body, s3_denied_req.body)
self.assertEqual(resp.status_int, s3_denied_req.status_int)
def test_bogus_authorization(self):
req = webob.Request.blank('/v1/AUTH_cfa/c/o')
req.headers['Authorization'] = 'badboy'
req.headers['X-Storage-Token'] = 'token'
resp = req.get_response(self.middleware)
self.assertEqual(resp.status_int, 400)
s3_invalid_req = self.middleware.deny_request('InvalidURI')
self.assertEqual(resp.body, s3_invalid_req.body)
self.assertEqual(resp.status_int, s3_invalid_req.status_int)
def test_fail_to_connect_to_keystone(self):
with mock.patch.object(self.middleware, '_json_request') as o:
s3_invalid_req = self.middleware.deny_request('InvalidURI')
o.side_effect = s3_token.ServiceError(s3_invalid_req)
req = webob.Request.blank('/v1/AUTH_cfa/c/o')
req.headers['Authorization'] = 'access:signature'
req.headers['X-Storage-Token'] = 'token'
resp = req.get_response(self.middleware)
self.assertEqual(resp.body, s3_invalid_req.body)
self.assertEqual(resp.status_int, s3_invalid_req.status_int)
def test_bad_reply(self):
self.requests.post(self.TEST_URL, status_code=201, text="<badreply>")
req = webob.Request.blank('/v1/AUTH_cfa/c/o')
req.headers['Authorization'] = 'access:signature'
req.headers['X-Storage-Token'] = 'token'
resp = req.get_response(self.middleware)
s3_invalid_req = self.middleware.deny_request('InvalidURI')
self.assertEqual(resp.body, s3_invalid_req.body)
self.assertEqual(resp.status_int, s3_invalid_req.status_int)
class S3TokenMiddlewareTestUtil(testtools.TestCase):
def test_split_path_failed(self):
self.assertRaises(ValueError, s3_token.split_path, '')
self.assertRaises(ValueError, s3_token.split_path, '/')
self.assertRaises(ValueError, s3_token.split_path, '//')
self.assertRaises(ValueError, s3_token.split_path, '//a')
self.assertRaises(ValueError, s3_token.split_path, '/a/c')
self.assertRaises(ValueError, s3_token.split_path, '//c')
self.assertRaises(ValueError, s3_token.split_path, '/a/c/')
self.assertRaises(ValueError, s3_token.split_path, '/a//')
self.assertRaises(ValueError, s3_token.split_path, '/a', 2)
self.assertRaises(ValueError, s3_token.split_path, '/a', 2, 3)
self.assertRaises(ValueError, s3_token.split_path, '/a', 2, 3, True)
self.assertRaises(ValueError, s3_token.split_path, '/a/c/o/r', 3, 3)
self.assertRaises(ValueError, s3_token.split_path, '/a', 5, 4)
def test_split_path_success(self):
self.assertEqual(s3_token.split_path('/a'), ['a'])
self.assertEqual(s3_token.split_path('/a/'), ['a'])
self.assertEqual(s3_token.split_path('/a/c', 2), ['a', 'c'])
self.assertEqual(s3_token.split_path('/a/c/o', 3), ['a', 'c', 'o'])
self.assertEqual(s3_token.split_path('/a/c/o/r', 3, 3, True),
['a', 'c', 'o/r'])
self.assertEqual(s3_token.split_path('/a/c', 2, 3, True),
['a', 'c', None])
self.assertEqual(s3_token.split_path('/a/c/', 2), ['a', 'c'])
self.assertEqual(s3_token.split_path('/a/c/', 2, 3), ['a', 'c', ''])
def test_split_path_invalid_path(self):
try:
s3_token.split_path('o\nn e', 2)
except ValueError as err:
self.assertEqual(str(err), 'Invalid path: o%0An%20e')
try:
s3_token.split_path('o\nn e', 2, 3, True)
except ValueError as err:
self.assertEqual(str(err), 'Invalid path: o%0An%20e')
| |
# Electrum - Lightweight Bitcoin Client
# Copyright (c) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
import Queue
import os
import errno
import sys
import random
import select
import traceback
from collections import defaultdict, deque
from threading import Lock
import socks
import socket
import json
import util
from bitcoin import *
from interface import Connection, Interface
from blockchain import Blockchain
from version import ELECTRUM_VERSION, PROTOCOL_VERSION
FEE_TARGETS = [25, 10, 5, 2]
DEFAULT_PORTS = {'t':'50001', 's':'50002', 'h':'8081', 'g':'8082'}
DEFAULT_SERVERS = {
'erbium1.sytes.net':{'t':'50001', 's':'50002'},
'ecdsa.net':{'t':'50001', 's':'110'},
'ELECTRUM.top-master.com':{'t':'50001', 's':'50002'},
'VPS.hsmiths.com':{'t':'50001', 's':'50002'},
'ELECTRUM.jdubya.info':{'t':'50001', 's':'50002'},
'electrum.no-ip.org':{'t':'50001', 's':'50002', 'g':'443'},
'us.electrum.be':DEFAULT_PORTS,
'bitcoins.sk':{'t':'50001', 's':'50002'},
'us1.einfachmalnettsein.de':{'t':'50001', 's':'50002'},
'electrum.dragonzone.net':DEFAULT_PORTS,
'Electrum.hsmiths.com':{'t':'8080', 's':'995'},
'electrum3.hachre.de':{'t':'50001', 's':'50002'},
'elec.luggs.co':{'t':'80', 's':'443'},
'btc.smsys.me':{'t':'110', 's':'995'},
'electrum.online':{'t':'50001', 's':'50002'},
}
NODES_RETRY_INTERVAL = 60
SERVER_RETRY_INTERVAL = 10
def parse_servers(result):
""" parse servers list into dict format"""
from version import PROTOCOL_VERSION
servers = {}
for item in result:
host = item[1]
out = {}
version = None
pruning_level = '-'
if len(item) > 2:
for v in item[2]:
if re.match("[stgh]\d*", v):
protocol, port = v[0], v[1:]
if port == '': port = DEFAULT_PORTS[protocol]
out[protocol] = port
elif re.match("v(.?)+", v):
version = v[1:]
elif re.match("p\d*", v):
pruning_level = v[1:]
if pruning_level == '': pruning_level = '0'
try:
is_recent = cmp(util.normalize_version(version), util.normalize_version(PROTOCOL_VERSION)) >= 0
except Exception:
is_recent = False
if out and is_recent:
out['pruning'] = pruning_level
servers[host] = out
return servers
def filter_protocol(hostmap = DEFAULT_SERVERS, protocol = 's'):
'''Filters the hostmap for those implementing protocol.
The result is a list in serialized form.'''
eligible = []
for host, portmap in hostmap.items():
port = portmap.get(protocol)
if port:
eligible.append(serialize_server(host, port, protocol))
return eligible
def pick_random_server(hostmap = DEFAULT_SERVERS, protocol = 's', exclude_set = set()):
eligible = list(set(filter_protocol(hostmap, protocol)) - exclude_set)
return random.choice(eligible) if eligible else None
from simple_config import SimpleConfig
proxy_modes = ['socks4', 'socks5', 'http']
def serialize_proxy(p):
if type(p) != dict:
return None
return ':'.join([p.get('mode'),p.get('host'), p.get('port')])
def deserialize_proxy(s):
if type(s) not in [str, unicode]:
return None
if s.lower() == 'none':
return None
proxy = { "mode":"socks5", "host":"localhost" }
args = s.split(':')
n = 0
if proxy_modes.count(args[n]) == 1:
proxy["mode"] = args[n]
n += 1
if len(args) > n:
proxy["host"] = args[n]
n += 1
if len(args) > n:
proxy["port"] = args[n]
else:
proxy["port"] = "8080" if proxy["mode"] == "http" else "1080"
return proxy
def deserialize_server(server_str):
host, port, protocol = str(server_str).split(':')
assert protocol in 'st'
int(port) # Throw if cannot be converted to int
return host, port, protocol
def serialize_server(host, port, protocol):
return str(':'.join([host, port, protocol]))
class Network(util.DaemonThread):
"""The Network class manages a set of connections to remote electrum
servers, each connected socket is handled by an Interface() object.
Connections are initiated by a Connection() thread which stops once
the connection succeeds or fails.
Our external API:
- Member functions get_header(), get_interfaces(), get_local_height(),
get_parameters(), get_server_height(), get_status_value(),
is_connected(), set_parameters(), stop()
"""
def __init__(self, config=None):
if config is None:
config = {} # Do not use mutables as default values!
util.DaemonThread.__init__(self)
self.config = SimpleConfig(config) if type(config) == type({}) else config
self.num_server = 8 if not self.config.get('oneserver') else 0
self.blockchain = Blockchain(self.config, self)
# A deque of interface header requests, processed left-to-right
self.bc_requests = deque()
# Server for addresses and transactions
self.default_server = self.config.get('server')
# Sanitize default server
try:
deserialize_server(self.default_server)
except:
self.default_server = None
if not self.default_server:
self.default_server = pick_random_server()
self.lock = Lock()
self.pending_sends = []
self.message_id = 0
self.debug = False
self.irc_servers = {} # returned by interface (list from irc)
self.recent_servers = self.read_recent_servers()
self.banner = ''
self.donation_address = ''
self.fee_estimates = {}
self.relay_fee = None
self.heights = {}
self.merkle_roots = {}
self.utxo_roots = {}
# callbacks passed with subscriptions
self.subscriptions = defaultdict(list)
self.sub_cache = {}
# callbacks set by the GUI
self.callbacks = defaultdict(list)
dir_path = os.path.join( self.config.path, 'certs')
if not os.path.exists(dir_path):
os.mkdir(dir_path)
# subscriptions and requests
self.subscribed_addresses = set()
# Requests from client we've not seen a response to
self.unanswered_requests = {}
# retry times
self.server_retry_time = time.time()
self.nodes_retry_time = time.time()
# kick off the network. interface is the main server we are currently
# communicating with. interfaces is the set of servers we are connecting
# to or have an ongoing connection with
self.interface = None
self.interfaces = {}
self.auto_connect = self.config.get('auto_connect', True)
self.connecting = set()
self.socket_queue = Queue.Queue()
self.start_network(deserialize_server(self.default_server)[2],
deserialize_proxy(self.config.get('proxy')))
def register_callback(self, callback, events):
with self.lock:
for event in events:
self.callbacks[event].append(callback)
def unregister_callback(self, callback):
with self.lock:
for callbacks in self.callbacks.values():
if callback in callbacks:
callbacks.remove(callback)
def trigger_callback(self, event, *args):
with self.lock:
callbacks = self.callbacks[event][:]
[callback(event, *args) for callback in callbacks]
def read_recent_servers(self):
if not self.config.path:
return []
path = os.path.join(self.config.path, "recent_servers")
try:
with open(path, "r") as f:
data = f.read()
return json.loads(data)
except:
return []
def save_recent_servers(self):
if not self.config.path:
return
path = os.path.join(self.config.path, "recent_servers")
s = json.dumps(self.recent_servers, indent=4, sort_keys=True)
try:
with open(path, "w") as f:
f.write(s)
except:
pass
def get_server_height(self):
return self.heights.get(self.default_server, 0)
def server_is_lagging(self):
sh = self.get_server_height()
if not sh:
self.print_error('no height for main interface')
return True
lh = self.get_local_height()
result = (lh - sh) > 1
if result:
self.print_error('%s is lagging (%d vs %d)' % (self.default_server, sh, lh))
return result
def set_status(self, status):
self.connection_status = status
self.notify('status')
def is_connected(self):
return self.interface is not None
def is_connecting(self):
return self.connection_status == 'connecting'
def is_up_to_date(self):
return self.unanswered_requests == {}
def queue_request(self, method, params, interface=None):
# If you want to queue a request on any interface it must go
# through this function so message ids are properly tracked
if interface is None:
interface = self.interface
message_id = self.message_id
self.message_id += 1
if self.debug:
self.print_error(interface.host, "-->", method, params, message_id)
interface.queue_request(method, params, message_id)
return message_id
def send_subscriptions(self):
self.print_error('sending subscriptions to', self.interface.server, len(self.unanswered_requests), len(self.subscribed_addresses))
self.sub_cache.clear()
# Resend unanswered requests
requests = self.unanswered_requests.values()
self.unanswered_requests = {}
for request in requests:
message_id = self.queue_request(request[0], request[1])
self.unanswered_requests[message_id] = request
for addr in self.subscribed_addresses:
self.queue_request('blockchain.address.subscribe', [addr])
self.queue_request('server.banner', [])
self.queue_request('server.donation_address', [])
self.queue_request('server.peers.subscribe', [])
for i in FEE_TARGETS:
self.queue_request('blockchain.estimatefee', [i])
self.queue_request('blockchain.relayfee', [])
def get_status_value(self, key):
if key == 'status':
value = self.connection_status
elif key == 'banner':
value = self.banner
elif key == 'fee':
value = self.fee_estimates
elif key == 'updated':
value = (self.get_local_height(), self.get_server_height())
elif key == 'servers':
value = self.get_servers()
elif key == 'interfaces':
value = self.get_interfaces()
return value
def dynfee(self, i):
from bitcoin import RECOMMENDED_FEE
if i < 4:
j = FEE_TARGETS[i]
fee = self.fee_estimates.get(j)
else:
assert i == 4
fee = self.fee_estimates.get(2)
if fee is not None:
fee += fee/2
if fee is not None:
fee = min(10*RECOMMENDED_FEE, fee)
return fee
def reverse_dynfee(self, fee_per_kb):
import operator
dist = map(lambda x: (x[0], abs(x[1] - fee_per_kb)), self.fee_estimates.items())
min_target, min_value = min(dist, key=operator.itemgetter(1))
if fee_per_kb < self.fee_estimates.get(25)/2:
min_target = -1
return min_target
def notify(self, key):
if key in ['status', 'updated']:
self.trigger_callback(key)
else:
self.trigger_callback(key, self.get_status_value(key))
def get_parameters(self):
host, port, protocol = deserialize_server(self.default_server)
return host, port, protocol, self.proxy, self.auto_connect
def get_donation_address(self):
if self.is_connected():
return self.donation_address
def get_interfaces(self):
'''The interfaces that are in connected state'''
return self.interfaces.keys()
def get_servers(self):
if self.irc_servers:
out = self.irc_servers
else:
out = DEFAULT_SERVERS
for s in self.recent_servers:
try:
host, port, protocol = deserialize_server(s)
except:
continue
if host not in out:
out[host] = { protocol:port }
return out
def start_interface(self, server):
if (not server in self.interfaces and not server in self.connecting):
if server == self.default_server:
self.print_error("connecting to %s as new interface" % server)
self.set_status('connecting')
self.connecting.add(server)
c = Connection(server, self.socket_queue, self.config.path)
def start_random_interface(self):
exclude_set = self.disconnected_servers.union(set(self.interfaces))
server = pick_random_server(self.get_servers(), self.protocol, exclude_set)
if server:
self.start_interface(server)
def start_interfaces(self):
self.start_interface(self.default_server)
for i in range(self.num_server - 1):
self.start_random_interface()
def set_proxy(self, proxy):
self.proxy = proxy
if proxy:
self.print_error('setting proxy', proxy)
proxy_mode = proxy_modes.index(proxy["mode"]) + 1
socks.setdefaultproxy(proxy_mode, proxy["host"], int(proxy["port"]))
socket.socket = socks.socksocket
# prevent dns leaks, see http://stackoverflow.com/questions/13184205/dns-over-proxy
socket.getaddrinfo = lambda *args: [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]
else:
socket.socket = socket._socketobject
socket.getaddrinfo = socket._socket.getaddrinfo
def start_network(self, protocol, proxy):
assert not self.interface and not self.interfaces
assert not self.connecting and self.socket_queue.empty()
self.print_error('starting network')
self.disconnected_servers = set([])
self.protocol = protocol
self.set_proxy(proxy)
self.start_interfaces()
def stop_network(self):
self.print_error("stopping network")
for interface in self.interfaces.values():
self.close_interface(interface)
assert self.interface is None
assert not self.interfaces
self.connecting = set()
# Get a new queue - no old pending connections thanks!
self.socket_queue = Queue.Queue()
def set_parameters(self, host, port, protocol, proxy, auto_connect):
proxy_str = serialize_proxy(proxy)
server = serialize_server(host, port, protocol)
self.config.set_key('auto_connect', auto_connect, False)
self.config.set_key("proxy", proxy_str, False)
self.config.set_key("server", server, True)
# abort if changes were not allowed by config
if self.config.get('server') != server or self.config.get('proxy') != proxy_str:
return
self.auto_connect = auto_connect
if self.proxy != proxy or self.protocol != protocol:
# Restart the network defaulting to the given server
self.stop_network()
self.default_server = server
self.start_network(protocol, proxy)
elif self.default_server != server:
self.switch_to_interface(server)
else:
self.switch_lagging_interface()
def switch_to_random_interface(self):
'''Switch to a random connected server other than the current one'''
servers = self.get_interfaces() # Those in connected state
if self.default_server in servers:
servers.remove(self.default_server)
if servers:
self.switch_to_interface(random.choice(servers))
def switch_lagging_interface(self, suggestion = None):
'''If auto_connect and lagging, switch interface'''
if self.server_is_lagging() and self.auto_connect:
if suggestion and self.protocol == deserialize_server(suggestion)[2]:
self.switch_to_interface(suggestion)
else:
self.switch_to_random_interface()
def switch_to_interface(self, server):
'''Switch to server as our interface. If no connection exists nor
being opened, start a thread to connect. The actual switch will
happen on receipt of the connection notification. Do nothing
if server already is our interface.'''
self.default_server = server
if server not in self.interfaces:
self.interface = None
self.start_interface(server)
return
i = self.interfaces[server]
if self.interface != i:
self.print_error("switching to", server)
# stop any current interface in order to terminate subscriptions
self.close_interface(self.interface)
self.interface = i
self.send_subscriptions()
self.set_status('connected')
self.notify('updated')
def close_interface(self, interface):
if interface:
self.interfaces.pop(interface.server)
if interface.server == self.default_server:
self.interface = None
interface.close()
def add_recent_server(self, server):
# list is ordered
if server in self.recent_servers:
self.recent_servers.remove(server)
self.recent_servers.insert(0, server)
self.recent_servers = self.recent_servers[0:20]
self.save_recent_servers()
def process_response(self, interface, response, callbacks):
if self.debug:
self.print_error("<--", response)
error = response.get('error')
result = response.get('result')
method = response.get('method')
params = response.get('params')
# We handle some responses; return the rest to the client.
if method == 'server.version':
interface.server_version = result
elif method == 'blockchain.headers.subscribe':
if error is None:
self.on_header(interface, result)
elif method == 'server.peers.subscribe':
if error is None:
self.irc_servers = parse_servers(result)
self.notify('servers')
elif method == 'server.banner':
if error is None:
self.banner = result
self.notify('banner')
elif method == 'server.donation_address':
if error is None:
self.donation_address = result
elif method == 'blockchain.estimatefee':
if error is None:
i = params[0]
self.fee_estimates[i] = int(result * COIN)
self.notify('fee')
elif method == 'blockchain.relayfee':
if error is None:
self.relay_fee = int(result * COIN)
self.print_error("relayfee", self.relay_fee)
elif method == 'blockchain.block.get_chunk':
self.on_get_chunk(interface, response)
elif method == 'blockchain.block.get_header':
self.on_get_header(interface, response)
for callback in callbacks:
callback(response)
def get_index(self, method, params):
""" hashable index for subscriptions and cache"""
return str(method) + (':' + str(params[0]) if params else '')
def process_responses(self, interface):
responses = interface.get_responses()
for request, response in responses:
if request:
method, params, message_id = request
k = self.get_index(method, params)
# client requests go through self.send() with a
# callback, are only sent to the current interface,
# and are placed in the unanswered_requests dictionary
client_req = self.unanswered_requests.pop(message_id, None)
if client_req:
assert interface == self.interface
callbacks = [client_req[2]]
else:
callbacks = []
# Copy the request method and params to the response
response['method'] = method
response['params'] = params
# Only once we've received a response to an addr subscription
# add it to the list; avoids double-sends on reconnection
if method == 'blockchain.address.subscribe':
self.subscribed_addresses.add(params[0])
else:
if not response: # Closed remotely / misbehaving
self.connection_down(interface.server)
break
# Rewrite response shape to match subscription request response
method = response.get('method')
params = response.get('params')
k = self.get_index(method, params)
if method == 'blockchain.headers.subscribe':
response['result'] = params[0]
response['params'] = []
elif method == 'blockchain.address.subscribe':
response['params'] = [params[0]] # addr
response['result'] = params[1]
callbacks = self.subscriptions.get(k, [])
# update cache if it's a subscription
if method.endswith('.subscribe'):
self.sub_cache[k] = response
# Response is now in canonical form
self.process_response(interface, response, callbacks)
def send(self, messages, callback):
'''Messages is a list of (method, params) tuples'''
with self.lock:
self.pending_sends.append((messages, callback))
def process_pending_sends(self):
# Requests needs connectivity. If we don't have an interface,
# we cannot process them.
if not self.interface:
return
with self.lock:
sends = self.pending_sends
self.pending_sends = []
for messages, callback in sends:
for method, params in messages:
r = None
if method.endswith('.subscribe'):
k = self.get_index(method, params)
# add callback to list
l = self.subscriptions.get(k, [])
if callback not in l:
l.append(callback)
self.subscriptions[k] = l
# check cached response for subscriptions
r = self.sub_cache.get(k)
if r is not None:
util.print_error("cache hit", k)
callback(r)
else:
message_id = self.queue_request(method, params)
self.unanswered_requests[message_id] = method, params, callback
def unsubscribe(self, callback):
'''Unsubscribe a callback to free object references to enable GC.'''
# Note: we can't unsubscribe from the server, so if we receive
# subsequent notifications process_response() will emit a harmless
# "received unexpected notification" warning
with self.lock:
for v in self.subscriptions.values():
if callback in v:
v.remove(callback)
def connection_down(self, server):
'''A connection to server either went down, or was never made.
We distinguish by whether it is in self.interfaces.'''
self.disconnected_servers.add(server)
if server == self.default_server:
self.set_status('disconnected')
if server in self.interfaces:
self.close_interface(self.interfaces[server])
self.heights.pop(server, None)
self.notify('interfaces')
def new_interface(self, server, socket):
self.add_recent_server(server)
self.interfaces[server] = interface = Interface(server, socket)
self.queue_request('blockchain.headers.subscribe', [], interface)
if server == self.default_server:
self.switch_to_interface(server)
self.notify('interfaces')
def maintain_sockets(self):
'''Socket maintenance.'''
# Responses to connection attempts?
while not self.socket_queue.empty():
server, socket = self.socket_queue.get()
self.connecting.remove(server)
if socket:
self.new_interface(server, socket)
else:
self.connection_down(server)
# Send pings and shut down stale interfaces
for interface in self.interfaces.values():
if interface.has_timed_out():
self.connection_down(interface.server)
elif interface.ping_required():
params = [ELECTRUM_VERSION, PROTOCOL_VERSION]
self.queue_request('server.version', params, interface)
now = time.time()
# nodes
if len(self.interfaces) + len(self.connecting) < self.num_server:
self.start_random_interface()
if now - self.nodes_retry_time > NODES_RETRY_INTERVAL:
self.print_error('network: retrying connections')
self.disconnected_servers = set([])
self.nodes_retry_time = now
# main interface
if not self.is_connected():
if self.auto_connect:
if not self.is_connecting():
self.switch_to_random_interface()
else:
if self.default_server in self.disconnected_servers:
if now - self.server_retry_time > SERVER_RETRY_INTERVAL:
self.disconnected_servers.remove(self.default_server)
self.server_retry_time = now
else:
self.switch_to_interface(self.default_server)
def request_chunk(self, interface, data, idx):
interface.print_error("requesting chunk %d" % idx)
self.queue_request('blockchain.block.get_chunk', [idx], interface)
data['chunk_idx'] = idx
data['req_time'] = time.time()
def on_get_chunk(self, interface, response):
'''Handle receiving a chunk of block headers'''
if self.bc_requests:
req_if, data = self.bc_requests[0]
req_idx = data.get('chunk_idx')
# Ignore unsolicited chunks
if req_if == interface and req_idx == response['params'][0]:
idx = self.blockchain.connect_chunk(req_idx, response['result'])
# If not finished, get the next chunk
if idx < 0 or self.get_local_height() >= data['if_height']:
self.bc_requests.popleft()
self.notify('updated')
else:
self.request_chunk(interface, data, idx)
def request_header(self, interface, data, height):
interface.print_error("requesting header %d" % height)
self.queue_request('blockchain.block.get_header', [height], interface)
data['header_height'] = height
data['req_time'] = time.time()
if not 'chain' in data:
data['chain'] = []
def on_get_header(self, interface, response):
'''Handle receiving a single block header'''
if self.bc_requests:
req_if, data = self.bc_requests[0]
req_height = data.get('header_height', -1)
# Ignore unsolicited headers
if req_if == interface and req_height == response['params'][0]:
next_height = self.blockchain.connect_header(data['chain'], response['result'])
# If not finished, get the next header
if next_height in [True, False]:
self.bc_requests.popleft()
if next_height:
self.switch_lagging_interface(interface.server)
self.notify('updated')
else:
interface.print_error("header didn't connect, dismissing interface")
interface.stop()
else:
self.request_header(interface, data, next_height)
def bc_request_headers(self, interface, data):
'''Send a request for the next header, or a chunk of them,
if necessary.
'''
local_height, if_height = self.get_local_height(), data['if_height']
if if_height <= local_height:
return False
elif if_height > local_height + 50:
self.request_chunk(interface, data, (local_height + 1) / 2016)
else:
self.request_header(interface, data, if_height)
return True
def handle_bc_requests(self):
'''Work through each interface that has notified us of a new header.
Send it requests if it is ahead of our blockchain object.
'''
while self.bc_requests:
interface, data = self.bc_requests.popleft()
# If the connection was lost move on
if not interface in self.interfaces.values():
continue
req_time = data.get('req_time')
if not req_time:
# No requests sent yet. This interface has a new height.
# Request headers if it is ahead of our blockchain
if not self.bc_request_headers(interface, data):
continue
elif time.time() - req_time > 10:
interface.print_error("blockchain request timed out")
self.connection_down(interface.server)
continue
# Put updated request state back at head of deque
self.bc_requests.appendleft((interface, data))
break
def wait_on_sockets(self):
# Python docs say Windows doesn't like empty selects.
# Sleep to prevent busy looping
if not self.interfaces:
time.sleep(0.1)
return
rin = [i for i in self.interfaces.values()]
win = [i for i in self.interfaces.values() if i.unsent_requests]
try:
rout, wout, xout = select.select(rin, win, [], 0.1)
except socket.error as (code, msg):
if code == errno.EINTR:
return
raise
assert not xout
for interface in wout:
interface.send_requests()
for interface in rout:
self.process_responses(interface)
def run(self):
import threading
t = threading.Thread(target = self.blockchain.init)
t.daemon = True
t.start()
while t.isAlive() and self.is_running():
t.join(1)
while self.is_running():
self.maintain_sockets()
self.wait_on_sockets()
self.handle_bc_requests()
self.run_jobs() # Synchronizer and Verifier
self.process_pending_sends()
self.stop_network()
self.on_stop()
def on_header(self, i, header):
height = header.get('block_height')
if not height:
return
self.heights[i.server] = height
self.merkle_roots[i.server] = header.get('merkle_root')
self.utxo_roots[i.server] = header.get('utxo_root')
# Queue this interface's height for asynchronous catch-up
self.bc_requests.append((i, {'if_height': height}))
if i == self.interface:
self.switch_lagging_interface()
self.notify('updated')
def get_header(self, tx_height):
return self.blockchain.read_header(tx_height)
def get_local_height(self):
return self.blockchain.height()
def synchronous_get(self, request, timeout=30):
queue = Queue.Queue()
self.send([request], queue.put)
try:
r = queue.get(True, timeout)
except Queue.Empty:
raise BaseException('Server did not answer')
if r.get('error'):
raise BaseException(r.get('error'))
return r.get('result')
def broadcast(self, tx, timeout=30):
tx_hash = tx.hash()
try:
out = self.synchronous_get(('blockchain.transaction.broadcast', [str(tx)]), timeout)
except BaseException as e:
return False, "error: " + str(e)
if out != tx_hash:
return False, "error: " + out
return True, out
| |
# Authors: Denis A. Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
# Yuval Harpaz <yuvharpaz@gmail.com>
#
# simplified BSD-3 license
import os.path as op
from itertools import count
import numpy as np
from ...utils import logger, verbose, sum_squared
from ..constants import FIFF
from ..base import _BaseRaw
from .constants import BTI
from .read import (read_int32, read_int16, read_str, read_float, read_double,
read_transform, read_char, read_int64, read_uint16,
read_uint32, read_double_matrix, read_float_matrix,
read_int16_matrix)
from .transforms import (bti_identity_trans, bti_to_vv_trans,
bti_to_vv_coil_trans, inverse_trans, merge_trans)
from ..meas_info import Info
from ...externals import six
FIFF_INFO_CHS_FIELDS = ('loc', 'ch_name', 'unit_mul', 'coil_trans',
'coord_frame', 'coil_type', 'range', 'unit', 'cal',
'eeg_loc', 'scanno', 'kind', 'logno')
FIFF_INFO_CHS_DEFAULTS = (np.array([0, 0, 0, 1] * 3, dtype='f4'),
None, 0, None, 0, 0, 1.0,
107, 1.0, None, None, 402, None)
FIFF_INFO_DIG_FIELDS = ('kind', 'ident', 'r', 'coord_frame')
FIFF_INFO_DIG_DEFAULTS = (None, None, None, FIFF.FIFFV_COORD_HEAD)
BTI_WH2500_REF_MAG = ['MxA', 'MyA', 'MzA', 'MxaA', 'MyaA', 'MzaA']
BTI_WH2500_REF_GRAD = ['GxxA', 'GyyA', 'GyxA', 'GzaA', 'GzyA']
dtypes = zip(list(range(1, 5)), ('>i2', '>i4', '>f4', '>f8'))
DTYPES = dict((i, np.dtype(t)) for i, t in dtypes)
RAW_INFO_FIELDS = ['dev_head_t', 'nchan', 'bads', 'projs', 'dev_ctf_t',
'meas_date', 'meas_id', 'dig', 'sfreq', 'highpass',
'comps', 'chs', 'ch_names', 'file_id',
'lowpass', 'acq_pars', 'acq_stim', 'filename',
'ctf_head_t']
def _rename_channels(names, ecg_ch='E31', eog_ch=('E63', 'E64')):
"""Renames appropriately ordered list of channel names
Parameters
----------
names : list of str
Lists of 4-D channel names in ascending order
Returns
-------
new : list
List of names, channel names in Neuromag style
"""
new = list()
ref_mag, ref_grad, eog, eeg, ext = [count(1) for _ in range(5)]
for i, name in enumerate(names, 1):
if name.startswith('A'):
name = 'MEG %3.3d' % i
elif name == 'RESPONSE':
name = 'STI 013'
elif name == 'TRIGGER':
name = 'STI 014'
elif any([name == k for k in eog_ch]):
name = 'EOG %3.3d' % six.advance_iterator(eog)
elif name == ecg_ch:
name = 'ECG 001'
elif name.startswith('E'):
name = 'EEG %3.3d' % six.advance_iterator(eeg)
elif name == 'UACurrent':
name = 'UTL 001'
elif name.startswith('M'):
name = 'RFM %3.3d' % six.advance_iterator(ref_mag)
elif name.startswith('G'):
name = 'RFG %3.3d' % six.advance_iterator(ref_grad)
elif name.startswith('X'):
name = 'EXT %3.3d' % six.advance_iterator(ext)
new += [name]
return new
def _read_head_shape(fname):
""" Helper Function """
with open(fname, 'rb') as fid:
fid.seek(BTI.FILE_HS_N_DIGPOINTS)
_n_dig_points = read_int32(fid)
idx_points = read_double_matrix(fid, BTI.DATA_N_IDX_POINTS, 3)
dig_points = read_double_matrix(fid, _n_dig_points, 3)
return idx_points, dig_points
def _convert_head_shape(idx_points, dig_points):
""" Helper function """
fp = idx_points.astype('>f8')
dp = np.sum(fp[2] * (fp[0] - fp[1]))
tmp1, tmp2 = sum_squared(fp[2]), sum_squared(fp[0] - fp[1])
dcos = -dp / np.sqrt(tmp1 * tmp2)
dsin = np.sqrt(1. - dcos * dcos)
dt = dp / np.sqrt(tmp2)
idx_points_nm = np.ones((len(fp), 3), dtype='>f8')
for idx, f in enumerate(fp):
idx_points_nm[idx, 0] = dcos * f[0] - dsin * f[1] + dt
idx_points_nm[idx, 1] = dsin * f[0] + dcos * f[1]
idx_points_nm[idx, 2] = f[2]
# adjust order of fiducials to Neuromag
idx_points_nm[[1, 2]] = idx_points_nm[[2, 1]]
t = bti_identity_trans('>f8')
t[0, 0] = dcos
t[0, 1] = -dsin
t[1, 0] = dsin
t[1, 1] = dcos
t[0, 3] = dt
dig_points_nm = np.dot(t[BTI.T_ROT_IX], dig_points.T).T
dig_points_nm += t[BTI.T_TRANS_IX].T
return idx_points_nm, dig_points_nm, t
def _setup_head_shape(fname, use_hpi=True):
"""Read index points and dig points from BTi head shape file
Parameters
----------
fname : str
The absolute path to the head shape file
Returns
-------
dig : list of dicts
The list of dig point info structures needed for the fiff info
structure.
use_hpi : bool
Whether to treat additional hpi coils as digitization points or not.
If False, hpi coils will be discarded.
"""
idx_points, dig_points = _read_head_shape(fname)
idx_points, dig_points, t = _convert_head_shape(idx_points, dig_points)
all_points = np.r_[idx_points, dig_points].astype('>f4')
idx_idents = list(range(1, 4)) + list(range(1, (len(idx_points) + 1) - 3))
dig = []
for idx in range(all_points.shape[0]):
point_info = dict(zip(FIFF_INFO_DIG_FIELDS, FIFF_INFO_DIG_DEFAULTS))
point_info['r'] = all_points[idx]
if idx < 3:
point_info['kind'] = FIFF.FIFFV_POINT_CARDINAL
point_info['ident'] = idx_idents[idx]
if 2 < idx < len(idx_points) and use_hpi:
point_info['kind'] = FIFF.FIFFV_POINT_HPI
point_info['ident'] = idx_idents[idx]
elif idx > 4:
point_info['kind'] = FIFF.FIFFV_POINT_EXTRA
point_info['ident'] = (idx + 1) - len(idx_idents)
if 2 < idx < len(idx_points) and not use_hpi:
pass
else:
dig += [point_info]
return dig, t
def _convert_coil_trans(coil_trans, bti_trans, bti_to_nm):
""" Helper Function """
t = bti_to_vv_coil_trans(coil_trans, bti_trans, bti_to_nm)
loc = np.roll(t.copy().T, 1, 0)[:, :3].flatten()
return t, loc
def _convert_dev_head_t(bti_trans, bti_to_nm, m_h_nm_h):
""" Helper Function """
nm_to_m_sensor = inverse_trans(bti_identity_trans(), bti_to_nm)
nm_sensor_m_head = merge_trans(bti_trans, nm_to_m_sensor)
nm_dev_head_t = merge_trans(m_h_nm_h, nm_sensor_m_head)
nm_dev_head_t[3, :3] = 0.
return nm_dev_head_t
def _correct_offset(fid):
""" Align fid pointer """
current = fid.tell()
if ((current % BTI.FILE_CURPOS) != 0):
offset = current % BTI.FILE_CURPOS
fid.seek(BTI.FILE_CURPOS - (offset), 1)
def _read_config(fname):
"""Read BTi system config file
Parameters
----------
fname : str
The absolute path to the config file
Returns
-------
cfg : dict
The config blocks found.
"""
with open(fname, 'rb') as fid:
cfg = dict()
cfg['hdr'] = {'version': read_int16(fid),
'site_name': read_str(fid, 32),
'dap_hostname': read_str(fid, 16),
'sys_type': read_int16(fid),
'sys_options': read_int32(fid),
'supply_freq': read_int16(fid),
'total_chans': read_int16(fid),
'system_fixed_gain': read_float(fid),
'volts_per_bit': read_float(fid),
'total_sensors': read_int16(fid),
'total_user_blocks': read_int16(fid),
'next_der_chan_no': read_int16(fid)}
fid.seek(2, 1)
cfg['checksum'] = read_uint32(fid)
cfg['reserved'] = read_char(fid, 32)
cfg['transforms'] = [read_transform(fid) for t in
range(cfg['hdr']['total_sensors'])]
cfg['user_blocks'] = dict()
for block in range(cfg['hdr']['total_user_blocks']):
ub = dict()
ub['hdr'] = {'nbytes': read_int32(fid),
'kind': read_str(fid, 20),
'checksum': read_int32(fid),
'username': read_str(fid, 32),
'timestamp': read_int32(fid),
'user_space_size': read_int32(fid),
'reserved': read_char(fid, 32)}
_correct_offset(fid)
kind = ub['hdr'].pop('kind')
if not kind: # make sure reading goes right. Should never be empty
raise RuntimeError('Could not read user block. Probably you '
'acquired data using a BTi version '
'currently not supported. Please contact '
'the mne-python developers.')
dta, cfg['user_blocks'][kind] = dict(), ub
if kind in [v for k, v in BTI.items() if k[:5] == 'UB_B_']:
if kind == BTI.UB_B_MAG_INFO:
dta['version'] = read_int32(fid)
fid.seek(20, 1)
dta['headers'] = list()
for hdr in range(6):
d = {'name': read_str(fid, 16),
'transform': read_transform(fid),
'units_per_bit': read_float(fid)}
dta['headers'] += [d]
fid.seek(20, 1)
elif kind == BTI.UB_B_COH_POINTS:
dta['n_points'] = read_int32(fid)
dta['status'] = read_int32(fid)
dta['points'] = []
for pnt in range(16):
d = {'pos': read_double_matrix(fid, 1, 3),
'direction': read_double_matrix(fid, 1, 3),
'error': read_double(fid)}
dta['points'] += [d]
elif kind == BTI.UB_B_CCP_XFM_BLOCK:
dta['method'] = read_int32(fid)
# handle difference btw/ linux (0) and solaris (4)
size = 0 if ub['hdr']['user_space_size'] == 132 else 4
fid.seek(size, 1)
dta['transform'] = read_transform(fid)
elif kind == BTI.UB_B_EEG_LOCS:
dta['electrodes'] = []
while True:
d = {'label': read_str(fid, 16),
'location': read_double_matrix(fid, 1, 3)}
if not d['label']:
break
dta['electrodes'] += [d]
elif kind in [BTI.UB_B_WHC_CHAN_MAP_VER,
BTI.UB_B_WHS_SUBSYS_VER]:
dta['version'] = read_int16(fid)
dta['struct_size'] = read_int16(fid)
dta['entries'] = read_int16(fid)
fid.seek(8, 1)
elif kind == BTI.UB_B_WHC_CHAN_MAP:
num_channels = None
for name, data in cfg['user_blocks'].items():
if name == BTI.UB_B_WHC_CHAN_MAP_VER:
num_channels = data['entries']
break
if num_channels is None:
raise ValueError('Cannot find block %s to determine '
'number of channels'
% BTI.UB_B_WHC_CHAN_MAP_VER)
dta['channels'] = list()
for i in range(num_channels):
d = {'subsys_type': read_int16(fid),
'subsys_num': read_int16(fid),
'card_num': read_int16(fid),
'chan_num': read_int16(fid),
'recdspnum': read_int16(fid)}
dta['channels'] += [d]
fid.seek(8, 1)
elif kind == BTI.UB_B_WHS_SUBSYS:
num_subsys = None
for name, data in cfg['user_blocks'].items():
if name == BTI.UB_B_WHS_SUBSYS_VER:
num_subsys = data['entries']
break
if num_subsys is None:
raise ValueError('Cannot find block %s to determine'
' number of subsystems'
% BTI.UB_B_WHS_SUBSYS_VER)
dta['subsys'] = list()
for sub_key in range(num_subsys):
d = {'subsys_type': read_int16(fid),
'subsys_num': read_int16(fid),
'cards_per_sys': read_int16(fid),
'channels_per_card': read_int16(fid),
'card_version': read_int16(fid)}
fid.seek(2, 1)
d.update({'offsetdacgain': read_float(fid),
'squid_type': read_int32(fid),
'timesliceoffset': read_int16(fid),
'padding': read_int16(fid),
'volts_per_bit': read_float(fid)})
dta['subsys'] += [d]
elif kind == BTI.UB_B_CH_LABELS:
dta['version'] = read_int32(fid)
dta['entries'] = read_int32(fid)
fid.seek(16, 1)
dta['labels'] = list()
for label in range(dta['entries']):
dta['labels'] += [read_str(fid, 16)]
elif kind == BTI.UB_B_CALIBRATION:
dta['sensor_no'] = read_int16(fid)
fid.seek(2, 1)
dta['timestamp'] = read_int32(fid)
dta['logdir'] = read_str(fid, 256)
elif kind == BTI.UB_B_SYS_CONFIG_TIME:
# handle difference btw/ linux (256) and solaris (512)
size = 256 if ub['hdr']['user_space_size'] == 260 else 512
dta['sysconfig_name'] = read_str(fid, size)
dta['timestamp'] = read_int32(fid)
elif kind == BTI.UB_B_DELTA_ENABLED:
dta['delta_enabled'] = read_int16(fid)
elif kind in [BTI.UB_B_E_TABLE_USED, BTI.UB_B_E_TABLE]:
dta['hdr'] = {'version': read_int32(fid),
'entry_size': read_int32(fid),
'n_entries': read_int32(fid),
'filtername': read_str(fid, 16),
'n_e_values': read_int32(fid),
'reserved': read_str(fid, 28)}
if dta['hdr']['version'] == 2:
size = 16
dta['ch_names'] = [read_str(fid, size) for ch in
range(dta['hdr']['n_entries'])]
dta['e_ch_names'] = [read_str(fid, size) for ch in
range(dta['hdr']['n_e_values'])]
rows = dta['hdr']['n_entries']
cols = dta['hdr']['n_e_values']
dta['etable'] = read_float_matrix(fid, rows, cols)
else: # handle MAGNES2500 naming scheme
dta['ch_names'] = ['WH2500'] * dta['hdr']['n_e_values']
dta['hdr']['n_e_values'] = 6
dta['e_ch_names'] = BTI_WH2500_REF_MAG
rows = dta['hdr']['n_entries']
cols = dta['hdr']['n_e_values']
dta['etable'] = read_float_matrix(fid, rows, cols)
_correct_offset(fid)
elif any([kind == BTI.UB_B_WEIGHTS_USED,
kind[:4] == BTI.UB_B_WEIGHT_TABLE]):
dta['hdr'] = {'version': read_int32(fid),
'entry_size': read_int32(fid),
'n_entries': read_int32(fid),
'name': read_str(fid, 32),
'description': read_str(fid, 80),
'n_anlg': read_int32(fid),
'n_dsp': read_int32(fid),
'reserved': read_str(fid, 72)}
if dta['hdr']['version'] == 2:
dta['ch_names'] = [read_str(fid, 16) for ch in
range(dta['hdr']['n_entries'])]
dta['anlg_ch_names'] = [read_str(fid, 16) for ch in
range(dta['hdr']['n_anlg'])]
dta['dsp_ch_names'] = [read_str(fid, 16) for ch in
range(dta['hdr']['n_dsp'])]
rows = dta['hdr']['n_entries']
cols = dta['hdr']['n_dsp']
dta['dsp_wts'] = read_float_matrix(fid, rows, cols)
cols = dta['hdr']['n_anlg']
dta['anlg_wts'] = read_int16_matrix(fid, rows, cols)
else: # handle MAGNES2500 naming scheme
dta['ch_names'] = ['WH2500'] * dta['hdr']['n_entries']
dta['anlg_ch_names'] = BTI_WH2500_REF_MAG[:3]
dta['hdr']['n_anlg'] = len(dta['anlg_ch_names'])
dta['dsp_ch_names'] = BTI_WH2500_REF_GRAD
dta['hdr.n_dsp'] = len(dta['dsp_ch_names'])
dta['anlg_wts'] = np.zeros((dta['hdr']['n_entries'],
dta['hdr']['n_anlg']),
dtype='i2')
dta['dsp_wts'] = np.zeros((dta['hdr']['n_entries'],
dta['hdr']['n_dsp']),
dtype='f4')
for n in range(dta['hdr']['n_entries']):
dta['anlg_wts'][d] = read_int16_matrix(
fid, 1, dta['hdr']['n_anlg'])
read_int16(fid)
dta['dsp_wts'][d] = read_float_matrix(
fid, 1, dta['hdr']['n_dsp'])
_correct_offset(fid)
elif kind == BTI.UB_B_TRIG_MASK:
dta['version'] = read_int32(fid)
dta['entries'] = read_int32(fid)
fid.seek(16, 1)
dta['masks'] = []
for entry in range(dta['entries']):
d = {'name': read_str(fid, 20),
'nbits': read_uint16(fid),
'shift': read_uint16(fid),
'mask': read_uint32(fid)}
dta['masks'] += [d]
fid.seek(8, 1)
else:
dta['unknown'] = {'hdr': read_char(fid,
ub['hdr']['user_space_size'])}
ub.update(dta) # finally update the userblock data
_correct_offset(fid) # after reading.
cfg['chs'] = list()
# prepare reading channels
dev_header = lambda x: {'size': read_int32(x),
'checksum': read_int32(x),
'reserved': read_str(x, 32)}
for channel in range(cfg['hdr']['total_chans']):
ch = {'name': read_str(fid, 16),
'chan_no': read_int16(fid),
'ch_type': read_uint16(fid),
'sensor_no': read_int16(fid),
'data': dict()}
fid.seek(2, 1)
ch.update({'gain': read_float(fid),
'units_per_bit': read_float(fid),
'yaxis_label': read_str(fid, 16),
'aar_val': read_double(fid),
'checksum': read_int32(fid),
'reserved': read_str(fid, 32)})
cfg['chs'] += [ch]
_correct_offset(fid) # before and after
dta = dict()
if ch['ch_type'] in [BTI.CHTYPE_MEG, BTI.CHTYPE_REFERENCE]:
dev = {'device_info': dev_header(fid),
'inductance': read_float(fid),
'padding': read_str(fid, 4),
'transform': read_transform(fid),
'xform_flag': read_int16(fid),
'total_loops': read_int16(fid)}
fid.seek(4, 1)
dev['reserved'] = read_str(fid, 32)
dta.update({'dev': dev, 'loops': []})
for loop in range(dev['total_loops']):
d = {'position': read_double_matrix(fid, 1, 3),
'orientation': read_double_matrix(fid, 1, 3),
'radius': read_double(fid),
'wire_radius': read_double(fid),
'turns': read_int16(fid)}
fid.seek(2, 1)
d['checksum'] = read_int32(fid)
d['reserved'] = read_str(fid, 32)
dta['loops'] += [d]
elif ch['ch_type'] == BTI.CHTYPE_EEG:
dta = {'device_info': dev_header(fid),
'impedance': read_float(fid),
'padding': read_str(fid, 4),
'transform': read_transform(fid),
'reserved': read_char(fid, 32)}
elif ch['ch_type'] == BTI.CHTYPE_EXTERNAL:
dta = {'device_info': dev_header(fid),
'user_space_size': read_int32(fid),
'reserved': read_str(fid, 32)}
elif ch['ch_type'] == BTI.CHTYPE_TRIGGER:
dta = {'device_info': dev_header(fid),
'user_space_size': read_int32(fid)}
fid.seek(2, 1)
dta['reserved'] = read_str(fid, 32)
elif ch['ch_type'] in [BTI.CHTYPE_UTILITY, BTI.CHTYPE_DERIVED]:
dta = {'device_info': dev_header(fid),
'user_space_size': read_int32(fid),
'reserved': read_str(fid, 32)}
elif ch['ch_type'] == BTI.CHTYPE_SHORTED:
dta = {'device_info': dev_header(fid),
'reserved': read_str(fid, 32)}
ch.update(dta) # add data collected
_correct_offset(fid) # after each reading
return cfg
def _read_epoch(fid):
"""Read BTi PDF epoch"""
out = {'pts_in_epoch': read_int32(fid),
'epoch_duration': read_float(fid),
'expected_iti': read_float(fid),
'actual_iti': read_float(fid),
'total_var_events': read_int32(fid),
'checksum': read_int32(fid),
'epoch_timestamp': read_int32(fid)}
fid.seek(28, 1)
return out
def _read_channel(fid):
"""Read BTi PDF channel"""
out = {'chan_label': read_str(fid, 16),
'chan_no': read_int16(fid),
'attributes': read_int16(fid),
'scale': read_float(fid),
'yaxis_label': read_str(fid, 16),
'valid_min_max': read_int16(fid)}
fid.seek(6, 1)
out.update({'ymin': read_double(fid),
'ymax': read_double(fid),
'index': read_int32(fid),
'checksum': read_int32(fid),
'off_flag': read_str(fid, 16),
'offset': read_float(fid)})
fid.seek(12, 1)
return out
def _read_event(fid):
"""Read BTi PDF event"""
out = {'event_name': read_str(fid, 16),
'start_lat': read_float(fid),
'end_lat': read_float(fid),
'step_size': read_float(fid),
'fixed_event': read_int16(fid),
'checksum': read_int32(fid)}
fid.seek(32, 1)
_correct_offset(fid)
return out
def _read_process(fid):
"""Read BTi PDF process"""
out = {'nbytes': read_int32(fid),
'process_type': read_str(fid, 20),
'checksum': read_int32(fid),
'user': read_str(fid, 32),
'timestamp': read_int32(fid),
'filename': read_str(fid, 256),
'total_steps': read_int32(fid)}
fid.seek(32, 1)
_correct_offset(fid)
out['processing_steps'] = list()
for step in range(out['total_steps']):
this_step = {'nbytes': read_int32(fid),
'process_type': read_str(fid, 20),
'checksum': read_int32(fid)}
ptype = this_step['process_type']
if ptype == BTI.PROC_DEFAULTS:
this_step['scale_option'] = read_int32(fid)
fid.seek(4, 1)
this_step['scale'] = read_double(fid)
this_step['dtype'] = read_int32(fid)
this_step['selected'] = read_int16(fid)
this_step['color_display'] = read_int16(fid)
fid.seek(32, 1)
elif ptype in BTI.PROC_FILTER:
this_step['freq'] = read_float(fid)
fid.seek(32, 1)
elif ptype in BTI.PROC_BPFILTER:
this_step['high_freq'] = read_float(fid)
this_step['low_frew'] = read_float(fid)
else:
jump = this_step['user_space_size'] = read_int32(fid)
fid.seek(32, 1)
fid.seek(jump, 1)
out['processing_steps'] += [this_step]
_correct_offset(fid)
return out
def _read_assoc_file(fid):
"""Read BTi PDF assocfile"""
out = {'file_id': read_int16(fid),
'length': read_int16(fid)}
fid.seek(32, 1)
out['checksum'] = read_int32(fid)
return out
def _read_pfid_ed(fid):
"""Read PDF ed file"""
out = {'comment_size': read_int32(fid),
'name': read_str(fid, 17)}
fid.seek(9, 1)
out.update({'pdf_number': read_int16(fid),
'total_events': read_int32(fid),
'timestamp': read_int32(fid),
'flags': read_int32(fid),
'de_process': read_int32(fid),
'checksum': read_int32(fid),
'ed_id': read_int32(fid),
'win_width': read_float(fid),
'win_offset': read_float(fid)})
fid.seek(8, 1)
return out
def _read_coil_def(fid):
""" Read coil definition """
coildef = {'position': read_double_matrix(fid, 1, 3),
'orientation': read_double_matrix(fid, 1, 3),
'radius': read_double(fid),
'wire_radius': read_double(fid),
'turns': read_int16(fid)}
fid.seek(fid, 2, 1)
coildef['checksum'] = read_int32(fid)
coildef['reserved'] = read_str(fid, 32)
def _read_ch_config(fid):
"""Read BTi channel config"""
cfg = {'name': read_str(fid, BTI.FILE_CONF_CH_NAME),
'chan_no': read_int16(fid),
'ch_type': read_uint16(fid),
'sensor_no': read_int16(fid)}
fid.seek(fid, BTI.FILE_CONF_CH_NEXT, 1)
cfg.update({'gain': read_float(fid),
'units_per_bit': read_float(fid),
'yaxis_label': read_str(fid, BTI.FILE_CONF_CH_YLABEL),
'aar_val': read_double(fid),
'checksum': read_int32(fid),
'reserved': read_str(fid, BTI.FILE_CONF_CH_RESERVED)})
_correct_offset(fid)
# Then the channel info
ch_type, chan = cfg['ch_type'], dict()
chan['dev'] = {'size': read_int32(fid),
'checksum': read_int32(fid),
'reserved': read_str(fid, 32)}
if ch_type in [BTI.CHTYPE_MEG, BTI.CHTYPE_REF]:
chan['loops'] = [_read_coil_def(fid) for d in
range(chan['dev']['total_loops'])]
elif ch_type == BTI.CHTYPE_EEG:
chan['impedance'] = read_float(fid)
chan['padding'] = read_str(fid, BTI.FILE_CONF_CH_PADDING)
chan['transform'] = read_transform(fid)
chan['reserved'] = read_char(fid, BTI.FILE_CONF_CH_RESERVED)
elif ch_type in [BTI.CHTYPE_TRIGGER, BTI.CHTYPE_EXTERNAL,
BTI.CHTYPE_UTILITY, BTI.CHTYPE_DERIVED]:
chan['user_space_size'] = read_int32(fid)
if ch_type == BTI.CHTYPE_TRIGGER:
fid.seek(2, 1)
chan['reserved'] = read_str(fid, BTI.FILE_CONF_CH_RESERVED)
elif ch_type == BTI.CHTYPE_SHORTED:
chan['reserved'] = read_str(fid, BTI.FILE_CONF_CH_RESERVED)
cfg['chan'] = chan
_correct_offset(fid)
return cfg
def _read_bti_header(pdf_fname, config_fname):
""" Read bti PDF header
"""
with open(pdf_fname, 'rb') as fid:
fid.seek(-8, 2)
start = fid.tell()
header_position = read_int64(fid)
check_value = header_position & BTI.FILE_MASK
if ((start + BTI.FILE_CURPOS - check_value) <= BTI.FILE_MASK):
header_position = check_value
# Check header position for alignment issues
if ((header_position % 8) != 0):
header_position += (8 - (header_position % 8))
fid.seek(header_position, 0)
# actual header starts here
info = {'version': read_int16(fid),
'file_type': read_str(fid, 5),
'hdr_size': start - header_position, # add for convenience
'start': start}
fid.seek(1, 1)
info.update({'data_format': read_int16(fid),
'acq_mode': read_int16(fid),
'total_epochs': read_int32(fid),
'input_epochs': read_int32(fid),
'total_events': read_int32(fid),
'total_fixed_events': read_int32(fid),
'sample_period': read_float(fid),
'xaxis_label': read_str(fid, 16),
'total_processes': read_int32(fid),
'total_chans': read_int16(fid)})
fid.seek(2, 1)
info.update({'checksum': read_int32(fid),
'total_ed_classes': read_int32(fid),
'total_associated_files': read_int16(fid),
'last_file_index': read_int16(fid),
'timestamp': read_int32(fid)})
fid.seek(20, 1)
_correct_offset(fid)
# actual header ends here, so dar seems ok.
info['epochs'] = [_read_epoch(fid) for epoch in
range(info['total_epochs'])]
info['chs'] = [_read_channel(fid) for ch in
range(info['total_chans'])]
info['events'] = [_read_event(fid) for event in
range(info['total_events'])]
info['processes'] = [_read_process(fid) for process in
range(info['total_processes'])]
info['assocfiles'] = [_read_assoc_file(fid) for af in
range(info['total_associated_files'])]
info['edclasses'] = [_read_pfid_ed(fid) for ed_class in
range(info['total_ed_classes'])]
info['extra_data'] = fid.read(start - fid.tell())
info['pdf_fname'] = pdf_fname
info['total_slices'] = sum(e['pts_in_epoch'] for e in
info['epochs'])
info['dtype'] = DTYPES[info['data_format']]
bps = info['dtype'].itemsize * info['total_chans']
info['bytes_per_slice'] = bps
cfg = _read_config(config_fname)
info['bti_transform'] = cfg['transforms']
# augment channel list by according info from config.
# get channels from config present in PDF
chans = info['chs']
chans_cfg = [c for c in cfg['chs'] if c['chan_no']
in [c_['chan_no'] for c_ in chans]]
# check all pdf chanels are present in config
match = [c['chan_no'] for c in chans_cfg] == \
[c['chan_no'] for c in chans]
if not match:
raise RuntimeError('Could not match raw data channels with'
' config channels. Some of the channels'
' found are not described in config.')
# transfer channel info from config to channel info
for ch, ch_cfg in zip(chans, chans_cfg):
ch['upb'] = ch_cfg['units_per_bit']
ch['gain'] = ch_cfg['gain']
ch['name'] = ch_cfg['name']
ch['coil_trans'] = (ch_cfg['dev'].get('transform', None)
if 'dev' in ch_cfg else None)
if info['data_format'] <= 2:
ch['cal'] = ch['scale'] * ch['upb'] * (ch['gain'] ** -1)
else:
ch['cal'] = ch['scale'] * ch['gain']
by_index = [(i, d['index']) for i, d in enumerate(chans)]
by_index.sort(key=lambda c: c[1])
by_index = [idx[0] for idx in by_index]
info['chs'] = [chans[pos] for pos in by_index]
by_name = [(i, d['name']) for i, d in enumerate(info['chs'])]
a_chs = filter(lambda c: c[1].startswith('A'), by_name)
other_chs = filter(lambda c: not c[1].startswith('A'), by_name)
by_name = sorted(a_chs, key=lambda c: int(c[1][1:])) + sorted(other_chs)
by_name = [idx[0] for idx in by_name]
info['chs'] = [chans[pos] for pos in by_name]
info['order'] = by_name
# finally add some important fields from the config
info['e_table'] = cfg['user_blocks'][BTI.UB_B_E_TABLE_USED]
info['weights'] = cfg['user_blocks'][BTI.UB_B_WEIGHTS_USED]
return info
def _read_data(info, start=None, stop=None):
""" Helper function: read Bti processed data file (PDF)
Parameters
----------
info : dict
The measurement info.
start : int | None
The number of the first time slice to read. If None, all data will
be read from the beginning.
stop : int | None
The number of the last time slice to read. If None, all data will
be read to the end.
dtype : str | dtype object
The type the data are casted to.
Returns
-------
data : ndarray
The measurement data, a channels x time slices array.
"""
total_slices = info['total_slices']
if start is None:
start = 0
if stop is None:
stop = total_slices
if any([start < 0, stop > total_slices, start >= stop]):
raise RuntimeError('Invalid data range supplied:'
' %d, %d' % (start, stop))
with open(info['pdf_fname'], 'rb') as fid:
fid.seek(info['bytes_per_slice'] * start, 0)
cnt = (stop - start) * info['total_chans']
shape = [stop - start, info['total_chans']]
data = np.fromfile(fid, dtype=info['dtype'],
count=cnt).astype('f4').reshape(shape)
for ch in info['chs']:
data[:, ch['index']] *= ch['cal']
return data[:, info['order']].T
class RawBTi(_BaseRaw):
""" Raw object from 4D Neuroimaging MagnesWH3600 data
Parameters
----------
pdf_fname : str | None
absolute path to the processed data file (PDF)
config_fname : str | None
absolute path to system config file. If None, it is assumed to be in
the same directory.
head_shape_fname : str
absolute path to the head shape file. If None, it is assumed to be in
the same directory.
rotation_x : float | int | None
Degrees to tilt x-axis for sensor frame misalignment.
If None, no adjustment will be applied.
translation : array-like
The translation to place the origin of coordinate system
to the center of the head.
ecg_ch: str | None
The 4D name of the ECG channel. If None, the channel will be treated
as regular EEG channel.
eog_ch: tuple of str | None
The 4D names of the EOG channels. If None, the channels will be treated
as regular EEG channels.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Attributes & Methods
--------------------
See documentation for mne.io.Raw
"""
@verbose
def __init__(self, pdf_fname, config_fname='config',
head_shape_fname='hs_file', rotation_x=None,
translation=(0.0, 0.02, 0.11), ecg_ch='E31',
eog_ch=('E63', 'E64'), verbose=None):
if not op.isabs(pdf_fname):
pdf_fname = op.abspath(pdf_fname)
if not op.isabs(config_fname):
config_fname = op.join(op.dirname(pdf_fname), config_fname)
if not op.exists(config_fname):
raise ValueError('Could not find the config file %s. Please check'
' whether you are in the right directory '
'or pass the full name' % config_fname)
if not op.isabs(head_shape_fname):
head_shape_fname = op.join(op.dirname(pdf_fname), head_shape_fname)
if not op.exists(head_shape_fname):
raise ValueError('Could not find the head_shape file %s. You shoul'
'd check whether you are in the right directory o'
'r pass the full file name.' % head_shape_fname)
logger.info('Reading 4D PDF file %s...' % pdf_fname)
bti_info = _read_bti_header(pdf_fname, config_fname)
# XXX indx is informed guess. Normally only one transform is stored.
dev_ctf_t = bti_info['bti_transform'][0].astype('>f8')
bti_to_nm = bti_to_vv_trans(adjust=rotation_x,
translation=translation, dtype='>f8')
use_hpi = False # hard coded, but marked as later option.
logger.info('Creating Neuromag info structure ...')
info = Info()
info['bads'] = []
info['meas_id'] = None
info['file_id'] = None
info['projs'] = list()
info['comps'] = list()
date = bti_info['processes'][0]['timestamp']
info['meas_date'] = [date, 0]
info['sfreq'] = 1e3 / bti_info['sample_period'] * 1e-3
info['nchan'] = len(bti_info['chs'])
# browse processing info for filter specs.
hp, lp = 0.0, info['sfreq'] * 0.4 # find better default
for proc in bti_info['processes']:
if 'filt' in proc['process_type']:
for step in proc['processing_steps']:
if 'high_freq' in step:
hp, lp = step['high_freq'], step['low_freq']
elif 'hp' in step['process_type']:
hp = step['freq']
elif 'lp' in step['process_type']:
lp = step['freq']
info['highpass'] = hp
info['lowpass'] = lp
info['acq_pars'], info['acq_stim'] = None, None
info['filename'] = None
chs = []
ch_names = [ch['name'] for ch in bti_info['chs']]
self.bti_ch_labels = [c['chan_label'] for c in bti_info['chs']]
info['ch_names'] = _rename_channels(ch_names)
ch_mapping = zip(ch_names, info['ch_names'])
logger.info('... Setting channel info structure.')
for idx, (chan_4d, chan_vv) in enumerate(ch_mapping):
chan_info = dict(zip(FIFF_INFO_CHS_FIELDS, FIFF_INFO_CHS_DEFAULTS))
chan_info['ch_name'] = chan_vv
chan_info['logno'] = idx + BTI.FIFF_LOGNO
chan_info['scanno'] = idx + 1
chan_info['cal'] = bti_info['chs'][idx]['scale']
if any([chan_vv.startswith(k) for k in ('MEG', 'RFG', 'RFM')]):
t, loc = bti_info['chs'][idx]['coil_trans'], None
if t is not None:
t, loc = _convert_coil_trans(t.astype('>f8'), dev_ctf_t,
bti_to_nm)
if idx == 1:
logger.info('... putting coil transforms in Neuromag '
'coordinates')
chan_info['coil_trans'] = t
if loc is not None:
chan_info['loc'] = loc.astype('>f4')
if chan_vv.startswith('MEG'):
chan_info['kind'] = FIFF.FIFFV_MEG_CH
chan_info['coil_type'] = FIFF.FIFFV_COIL_MAGNES_MAG
chan_info['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
chan_info['unit'] = FIFF.FIFF_UNIT_T
elif chan_vv.startswith('RFM'):
chan_info['kind'] = FIFF.FIFFV_REF_MEG_CH
chan_info['coil_type'] = FIFF.FIFFV_COIL_MAGNES_R_MAG
chan_info['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
chan_info['unit'] = FIFF.FIFF_UNIT_T
elif chan_vv.startswith('RFG'):
chan_info['kind'] = FIFF.FIFFV_REF_MEG_CH
chan_info['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
chan_info['unit'] = FIFF.FIFF_UNIT_T_M
if chan_4d in ('GxxA', 'GyyA'):
chan_info['coil_type'] = FIFF.FIFFV_COIL_MAGNES_R_GRAD_DIA
elif chan_4d in ('GyxA', 'GzxA', 'GzyA'):
chan_info['coil_type'] = FIFF.FIFFV_COIL_MAGNES_R_GRAD_OFF
elif chan_vv.startswith('EEG'):
chan_info['kind'] = FIFF.FIFFV_EEG_CH
chan_info['coil_type'] = FIFF.FIFFV_COIL_EEG
chan_info['coord_frame'] = FIFF.FIFFV_COORD_HEAD
chan_info['unit'] = FIFF.FIFF_UNIT_V
elif chan_vv == 'STI 013':
chan_info['kind'] = FIFF.FIFFV_RESP_CH
elif chan_vv == 'STI 014':
chan_info['kind'] = FIFF.FIFFV_STIM_CH
elif chan_vv.startswith('EOG'):
chan_info['kind'] = FIFF.FIFFV_EOG_CH
elif chan_vv == 'ECG 001':
chan_info['kind'] = FIFF.FIFFV_ECG_CH
elif chan_vv.startswith('EXT'):
chan_info['kind'] = FIFF.FIFFV_MISC_CH
elif chan_vv.startswith('UTL'):
chan_info['kind'] = FIFF.FIFFV_MISC_CH
chs.append(chan_info)
info['chs'] = chs
logger.info('... Reading digitization points from %s' %
head_shape_fname)
logger.info('... putting digitization points in Neuromag c'
'oordinates')
info['dig'], ctf_head_t = _setup_head_shape(head_shape_fname, use_hpi)
logger.info('... Computing new device to head transform.')
dev_head_t = _convert_dev_head_t(dev_ctf_t, bti_to_nm,
ctf_head_t)
info['dev_head_t'] = dict()
info['dev_head_t']['from'] = FIFF.FIFFV_COORD_DEVICE
info['dev_head_t']['to'] = FIFF.FIFFV_COORD_HEAD
info['dev_head_t']['trans'] = dev_head_t
info['dev_ctf_t'] = dict()
info['dev_ctf_t']['from'] = FIFF.FIFFV_MNE_COORD_CTF_DEVICE
info['dev_ctf_t']['to'] = FIFF.FIFFV_COORD_HEAD
info['dev_ctf_t']['trans'] = dev_ctf_t
info['ctf_head_t'] = dict()
info['ctf_head_t']['from'] = FIFF.FIFFV_MNE_COORD_CTF_HEAD
info['ctf_head_t']['to'] = FIFF.FIFFV_COORD_HEAD
info['ctf_head_t']['trans'] = ctf_head_t
logger.info('Done.')
if False: # XXX : reminds us to support this as we go
# include digital weights from reference channel
comps = info['comps'] = list()
weights = bti_info['weights']
by_name = lambda x: x[1]
chn = dict(ch_mapping)
columns = [chn[k] for k in weights['dsp_ch_names']]
rows = [chn[k] for k in weights['ch_names']]
col_order, col_names = zip(*sorted(enumerate(columns),
key=by_name))
row_order, row_names = zip(*sorted(enumerate(rows), key=by_name))
# for some reason the C code would invert the signs, so we follow.
mat = -weights['dsp_wts'][row_order, :][:, col_order]
comp_data = dict(data=mat,
col_names=col_names,
row_names=row_names,
nrow=mat.shape[0], ncol=mat.shape[1])
comps += [dict(data=comp_data, ctfkind=101,
# no idea how to calibrate, just ones.
rowcals=np.ones(mat.shape[0], dtype='>f4'),
colcals=np.ones(mat.shape[1], dtype='>f4'),
save_calibrated=0)]
else:
logger.warning('Warning. Currently direct inclusion of 4D weight t'
'ables is not supported. For critical use cases '
'\nplease take into account the MNE command '
'\'mne_create_comp_data\' to include weights as '
'printed out \nby the 4D \'print_table\' routine.')
# check that the info is complete
assert not set(RAW_INFO_FIELDS) - set(info.keys())
# check nchan is correct
assert len(info['ch_names']) == info['nchan']
cals = np.zeros(info['nchan'])
for k in range(info['nchan']):
cals[k] = info['chs'][k]['range'] * info['chs'][k]['cal']
self.verbose = verbose
self.cals = cals
self.rawdirs = list()
self.orig_format = 'double'
self.proj = None
self.comp = None
self._filenames = list()
self.preload = True
self._projector_hashes = [None]
self.info = info
logger.info('Reading raw data from %s...' % pdf_fname)
self._data = _read_data(bti_info)
self.first_samp, self.last_samp = 0, self._data.shape[1] - 1
self._raw_lengths = np.array([self._data.shape[1]])
self._first_samps = np.array([0])
self._last_samps = self._raw_lengths - 1
self.rawdirs = [[]]
assert len(self._data) == len(self.info['ch_names'])
self._times = np.arange(self.first_samp,
self.last_samp + 1) / info['sfreq']
self._projectors = [None]
logger.info(' Range : %d ... %d = %9.3f ... %9.3f secs' % (
self.first_samp, self.last_samp,
float(self.first_samp) / info['sfreq'],
float(self.last_samp) / info['sfreq']))
logger.info('Ready.')
@verbose
def read_raw_bti(pdf_fname, config_fname='config',
head_shape_fname='hs_file', rotation_x=None,
translation=(0.0, 0.02, 0.11), ecg_ch='E31',
eog_ch=('E63', 'E64'), verbose=None):
""" Raw object from 4D Neuroimaging MagnesWH3600 data
Note.
1) Currently direct inclusion of reference channel weights
is not supported. Please use 'mne_create_comp_data' to include
the weights or use the low level functions from this module to
include them by yourself.
2) The informed guess for the 4D name is E31 for the ECG channel and
E63, E63 for the EOG channels. Pleas check and adjust if those channels
are present in your dataset but 'ECG 01' and 'EOG 01', 'EOG 02' don't
appear in the channel names of the raw object.
Parameters
----------
pdf_fname : str | None
absolute path to the processed data file (PDF)
config_fname : str | None
absolute path to system confnig file. If None, it is assumed to be in
the same directory.
head_shape_fname : str
absolute path to the head shape file. If None, it is assumed to be in
the same directory.
rotation_x : float | int | None
Degrees to tilt x-axis for sensor frame misalignment.
If None, no adjustment will be applied.
translation : array-like
The translation to place the origin of coordinate system
to the center of the head.
ecg_ch: str | None
The 4D name of the ECG channel. If None, the channel will be treated
as regular EEG channel.
eog_ch: tuple of str | None
The 4D names of the EOG channels. If None, the channels will be treated
as regular EEG channels.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
"""
return RawBTi(pdf_fname, config_fname=config_fname,
head_shape_fname=head_shape_fname,
rotation_x=rotation_x, translation=translation,
verbose=verbose)
| |
import warnings
from os.path import dirname, join
import numpy as np
import pandas as pd
from nose.tools import assert_almost_equal, assert_equal, raises
from pandas.testing import assert_frame_equal
from rsmtool.comparer import Comparer
from rsmtool.test_utils import do_run_experiment
from scipy.stats import pearsonr
class TestComparer:
def test_make_summary_stat_df(self):
array = np.random.multivariate_normal([100, 25],
[[25, .5],
[.5, 1]],
5000)
df = pd.DataFrame(array, columns=['A', 'B'])
summary = Comparer.make_summary_stat_df(df)
assert np.isclose(summary.loc['A', 'SD'], 5, rtol=0.1)
assert np.isclose(summary.loc['B', 'SD'], 1, rtol=0.1)
assert np.isclose(summary.loc['A', 'MEAN'], 100, rtol=0.1)
assert np.isclose(summary.loc['B', 'MEAN'], 25, rtol=0.1)
def test_make_summary_stat_df_no_warning(self):
df = pd.DataFrame({'A': [1, 2, np.nan],
'B': [np.nan, 2, 3]})
with warnings.catch_warnings():
warnings.simplefilter('error')
Comparer.make_summary_stat_df(df)
def test_process_confusion_matrix(self):
in_cm = pd.DataFrame({1: [2, 3, 5],
2: [2, 5, 7],
3: [1, 3, 6]},
index=[1, 2, 3])
expected_out_cm = pd.DataFrame({'human 1': [2, 3, 5],
'human 2': [2, 5, 7],
'human 3': [1, 3, 6]},
index=['machine 1', 'machine 2', 'machine 3'])
out_cm = Comparer.process_confusion_matrix(in_cm)
assert_frame_equal(out_cm, expected_out_cm)
def test_process_confusion_matrix_with_zero(self):
in_cm = pd.DataFrame({0: [2, 3, 5],
1: [2, 5, 7],
2: [1, 3, 6]},
index=[0, 1, 2])
expected_out_cm = pd.DataFrame({'human 0': [2, 3, 5],
'human 1': [2, 5, 7],
'human 2': [1, 3, 6]},
index=['machine 0', 'machine 1', 'machine 2'])
out_cm = Comparer.process_confusion_matrix(in_cm)
assert_frame_equal(out_cm, expected_out_cm)
def test_compute_correlations_between_versions_default_columns(self):
df_old = pd.DataFrame({'spkitemid': ['a', 'b', 'c'],
'feature1': [1.3, 1.5, 2.1],
'feature2': [1.1, 6.2, 2.1],
'sc1': [2, 3, 4]})
df_new = pd.DataFrame({'spkitemid': ['a', 'b', 'c'],
'feature1': [-1.3, -1.5, -2.1],
'feature2': [1.1, 6.2, 2.1],
'sc1': [2, 3, 4]})
df_cors = Comparer.compute_correlations_between_versions(df_old, df_new)
assert_almost_equal(df_cors.at['feature1', 'old_new'], -1.0)
assert_almost_equal(df_cors.at['feature2', 'old_new'], 1.0)
assert_equal(df_cors.at['feature1', 'human_old'], pearsonr(df_old['feature1'],
df_old['sc1'])[0])
assert_equal(df_cors.at['feature1', 'human_new'], pearsonr(df_new['feature1'],
df_new['sc1'])[0])
assert_equal(df_cors.at['feature1', "N"], 3)
def test_compute_correlations_between_versions_custom_columns(self):
df_old = pd.DataFrame({'id': ['a', 'b', 'c'],
'feature1': [1.3, 1.5, 2.1],
'feature2': [1.1, 6.2, 2.1],
'r1': [2, 3, 4]})
df_new = pd.DataFrame({'id': ['a', 'b', 'c'],
'feature1': [-1.3, -1.5, -2.1],
'feature2': [1.1, 6.2, 2.1],
'r1': [2, 3, 4]})
df_cors = Comparer.compute_correlations_between_versions(df_old,
df_new,
human_score='r1',
id_column='id')
assert_almost_equal(df_cors.at['feature1', 'old_new'], -1.0)
assert_almost_equal(df_cors.at['feature2', 'old_new'], 1.0)
assert_equal(df_cors.at['feature1', 'human_old'], pearsonr(df_old['feature1'],
df_old['r1'])[0])
assert_equal(df_cors.at['feature1', 'human_new'], pearsonr(df_new['feature1'],
df_new['r1'])[0])
assert_equal(df_cors.at['feature1', "N"], 3)
@raises(ValueError)
def test_compute_correlations_between_versions_no_matching_feature(self):
df_old = pd.DataFrame({'id': ['a', 'b', 'c'],
'feature1': [1.3, 1.5, 2.1],
'feature2': [1.1, 6.2, 2.1],
'r1': [2, 3, 4]})
df_new = pd.DataFrame({'id': ['a', 'b', 'c'],
'feature3': [-1.3, -1.5, -2.1],
'feature4': [1.1, 6.2, 2.1],
'r1': [2, 3, 4]})
Comparer.compute_correlations_between_versions(df_old, df_new,
human_score='r1',
id_column='id')
def test_compute_correlations_between_versions_extra_data(self):
df_old = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'feature1': [1.3, 1.5, 2.1, 5],
'feature2': [1.1, 6.2, 2.1, 1],
'feature3': [3, 5, 6, 7],
'sc1': [2, 3, 4, 3]})
df_new = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'e'],
'feature1': [-1.3, -1.5, -2.1, 2],
'feature2': [1.1, 6.2, 2.1, 8],
'feature4': [1, 3, 6, 7],
'sc1': [2, 3, 4, 3]})
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
df_cors = Comparer.compute_correlations_between_versions(df_old, df_new)
assert_almost_equal(df_cors.at['feature1', 'old_new'], -1.0)
assert_almost_equal(df_cors.at['feature2', 'old_new'], 1.0)
assert_equal(df_cors.at['feature1', "N"], 3)
assert_equal(len(df_cors), 2)
@raises(ValueError)
def test_compute_correlations_between_versions_no_matching_ids(self):
df_old = pd.DataFrame({'id': ['a', 'b', 'c'],
'feature1': [1.3, 1.5, 2.1],
'feature2': [1.1, 6.2, 2.1],
'r1': [2, 3, 4]})
df_new = pd.DataFrame({'id': ['a1', 'b1', 'c1'],
'feature1': [-1.3, -1.5, -2.1],
'feature2': [1.1, 6.2, 2.1],
'r1': [2, 3, 4]})
Comparer.compute_correlations_between_versions(df_old,
df_new,
human_score='r1',
id_column='id')
def test_load_rsmtool_output(self):
source = 'lr-subgroups-with-h2'
experiment_id = 'lr_subgroups_with_h2'
test_dir = dirname(__file__)
config_file = join(test_dir,
'data',
'experiments',
source,
'{}.json'.format(experiment_id))
do_run_experiment(source, experiment_id, config_file)
output_dir = join('test_outputs', source, 'output')
figure_dir = join('test_outputs', source, 'figure')
comparer = Comparer()
csvs, figs, file_format = comparer.load_rsmtool_output(output_dir,
figure_dir,
experiment_id,
'scale',
['QUESTION', 'L1'])
expected_csv_keys = ['df_coef',
'df_confmatrix',
'df_consistency',
'df_degradation',
'df_descriptives',
'df_disattenuated_correlations',
'df_disattenuated_correlations_by_L1',
'df_disattenuated_correlations_by_L1_overview',
'df_disattenuated_correlations_by_QUESTION',
'df_disattenuated_correlations_by_QUESTION_overview',
'df_eval',
'df_eval_by_L1',
'df_eval_by_L1_m_sd',
'df_eval_by_L1_overview',
'df_eval_by_QUESTION',
'df_eval_by_QUESTION_m_sd',
'df_eval_by_QUESTION_overview',
'df_eval_for_degradation',
'df_feature_cors',
'df_mcor_sc1',
'df_mcor_sc1_L1_overview',
'df_mcor_sc1_QUESTION_overview',
'df_mcor_sc1_by_L1',
'df_mcor_sc1_by_QUESTION',
'df_mcor_sc1_overview',
'df_model_fit',
'df_outliers',
'df_pca',
'df_pcavar',
'df_pcor_sc1',
'df_pcor_sc1_L1_overview',
'df_pcor_sc1_QUESTION_overview',
'df_pcor_sc1_by_L1',
'df_pcor_sc1_by_QUESTION',
'df_pcor_sc1_overview',
'df_percentiles',
'df_score_dist',
'df_scores',
'df_train_features',
'df_true_score_eval']
expected_fig_keys = ['betas',
'eval_barplot_by_L1',
'eval_barplot_by_QUESTION',
'feature_boxplots_by_L1_svg',
'feature_boxplots_by_QUESTION_svg',
'feature_distplots',
'pca_scree_plot']
assert_equal(file_format, 'csv')
assert_equal(expected_csv_keys, sorted(csvs.keys()))
assert_equal(expected_fig_keys, sorted(figs.keys()))
| |
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.components.trainer.executor."""
import copy
import json
import os
import unittest
from unittest import mock
import tensorflow as tf
from tfx.components.testdata.module_file import trainer_module
from tfx.components.trainer import executor
from tfx.dsl.io import fileio
from tfx.proto import trainer_pb2
from tfx.types import artifact_utils
from tfx.types import standard_artifacts
from tfx.types import standard_component_specs
from tfx.utils import io_utils
from tfx.utils import name_utils
from tfx.utils import path_utils
from tfx.utils import proto_utils
@unittest.skipIf(tf.__version__ < '2',
'This test uses testdata only compatible with TF 2.x')
class ExecutorTest(tf.test.TestCase):
def setUp(self):
super().setUp()
self._source_data_dir = os.path.join(
os.path.dirname(os.path.dirname(__file__)), 'testdata')
self._output_data_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
# Create input dict.
e1 = standard_artifacts.Examples()
e1.uri = os.path.join(self._source_data_dir,
'transform/transformed_examples')
e1.split_names = artifact_utils.encode_split_names(['train', 'eval'])
e2 = copy.deepcopy(e1)
self._single_artifact = [e1]
self._multiple_artifacts = [e1, e2]
transform_graph = standard_artifacts.TransformGraph()
transform_graph.uri = os.path.join(self._source_data_dir,
'transform/transform_graph')
schema = standard_artifacts.Schema()
schema.uri = os.path.join(self._source_data_dir, 'schema_gen')
previous_model = standard_artifacts.Model()
previous_model.uri = os.path.join(self._source_data_dir, 'trainer/previous')
self._input_dict = {
standard_component_specs.EXAMPLES_KEY: self._single_artifact,
standard_component_specs.TRANSFORM_GRAPH_KEY: [transform_graph],
standard_component_specs.SCHEMA_KEY: [schema],
standard_component_specs.BASE_MODEL_KEY: [previous_model]
}
# Create output dict.
self._model_exports = standard_artifacts.Model()
self._model_exports.uri = os.path.join(self._output_data_dir,
'model_export_path')
self._model_run_exports = standard_artifacts.ModelRun()
self._model_run_exports.uri = os.path.join(self._output_data_dir,
'model_run_path')
self._output_dict = {
standard_component_specs.MODEL_KEY: [self._model_exports],
standard_component_specs.MODEL_RUN_KEY: [self._model_run_exports]
}
# Create exec properties skeleton.
self._exec_properties = {
standard_component_specs.TRAIN_ARGS_KEY:
proto_utils.proto_to_json(trainer_pb2.TrainArgs(num_steps=1000)),
standard_component_specs.EVAL_ARGS_KEY:
proto_utils.proto_to_json(trainer_pb2.EvalArgs(num_steps=500)),
'warm_starting':
False,
}
self._module_file = os.path.join(self._source_data_dir,
standard_component_specs.MODULE_FILE_KEY,
'trainer_module.py')
self._trainer_fn = name_utils.get_full_name(trainer_module.trainer_fn)
# Executors for test.
self._trainer_executor = executor.Executor()
self._generic_trainer_executor = executor.GenericExecutor()
def _verify_model_exports(self):
self.assertTrue(
fileio.exists(path_utils.eval_model_dir(self._model_exports.uri)))
self.assertTrue(
fileio.exists(path_utils.serving_model_dir(self._model_exports.uri)))
def _verify_no_eval_model_exports(self):
self.assertFalse(
fileio.exists(path_utils.eval_model_dir(self._model_exports.uri)))
def _verify_model_run_exports(self):
self.assertTrue(fileio.exists(os.path.dirname(self._model_run_exports.uri)))
def _do(self, test_executor):
test_executor.Do(
input_dict=self._input_dict,
output_dict=self._output_dict,
exec_properties=self._exec_properties)
def testGenericExecutor(self):
self._exec_properties[
standard_component_specs.MODULE_FILE_KEY] = self._module_file
self._do(self._generic_trainer_executor)
self._verify_model_exports()
self._verify_model_run_exports()
@mock.patch('tfx.components.trainer.executor._is_chief')
def testDoChief(self, mock_is_chief):
mock_is_chief.return_value = True
self._exec_properties[
standard_component_specs.MODULE_FILE_KEY] = self._module_file
self._do(self._trainer_executor)
self._verify_model_exports()
self._verify_model_run_exports()
@mock.patch('tfx.components.trainer.executor._is_chief')
def testDoNonChief(self, mock_is_chief):
mock_is_chief.return_value = False
self._exec_properties[
standard_component_specs.MODULE_FILE_KEY] = self._module_file
self._do(self._trainer_executor)
self._verify_no_eval_model_exports()
self._verify_model_run_exports()
def testDoWithModuleFile(self):
self._exec_properties[
standard_component_specs.MODULE_FILE_KEY] = self._module_file
self._do(self._trainer_executor)
self._verify_model_exports()
self._verify_model_run_exports()
def testDoWithTrainerFn(self):
self._exec_properties[
standard_component_specs.TRAINER_FN_KEY] = self._trainer_fn
self._do(self._trainer_executor)
self._verify_model_exports()
self._verify_model_run_exports()
def testDoWithNoTrainerFn(self):
with self.assertRaises(ValueError):
self._do(self._trainer_executor)
def testDoWithHyperParameters(self):
hp_artifact = standard_artifacts.HyperParameters()
hp_artifact.uri = os.path.join(self._output_data_dir, 'hyperparameters/')
# TODO(jyzhao): use real keras_tuner.HyperParameters instead of dict.
hyperparameters = {}
hyperparameters['first_dnn_layer_size'] = 100
hyperparameters['num_dnn_layers'] = 4
hyperparameters['dnn_decay_factor'] = 0.7
io_utils.write_string_file(
os.path.join(hp_artifact.uri, 'hyperparameters.txt'),
json.dumps(hyperparameters))
self._input_dict[standard_component_specs.HYPERPARAMETERS_KEY] = [
hp_artifact
]
self._exec_properties[
standard_component_specs.MODULE_FILE_KEY] = self._module_file
self._do(self._trainer_executor)
self._verify_model_exports()
self._verify_model_run_exports()
def testMultipleArtifacts(self):
self._input_dict[
standard_component_specs.EXAMPLES_KEY] = self._multiple_artifacts
self._exec_properties[
standard_component_specs.MODULE_FILE_KEY] = self._module_file
self._do(self._generic_trainer_executor)
self._verify_model_exports()
self._verify_model_run_exports()
def testDoWithCustomSplits(self):
# Update input dict.
io_utils.copy_dir(
os.path.join(self._source_data_dir,
'transform/transformed_examples/Split-train'),
os.path.join(self._output_data_dir, 'data/Split-training'))
io_utils.copy_dir(
os.path.join(self._source_data_dir,
'transform/transformed_examples/Split-eval'),
os.path.join(self._output_data_dir, 'data/Split-evaluating'))
examples = standard_artifacts.Examples()
examples.uri = os.path.join(self._output_data_dir, 'data')
examples.split_names = artifact_utils.encode_split_names(
['training', 'evaluating'])
self._input_dict[standard_component_specs.EXAMPLES_KEY] = [examples]
# Update exec properties skeleton with custom splits.
self._exec_properties[
standard_component_specs.TRAIN_ARGS_KEY] = proto_utils.proto_to_json(
trainer_pb2.TrainArgs(splits=['training'], num_steps=1000))
self._exec_properties[
standard_component_specs.EVAL_ARGS_KEY] = proto_utils.proto_to_json(
trainer_pb2.EvalArgs(splits=['evaluating'], num_steps=500))
self._exec_properties[
standard_component_specs.MODULE_FILE_KEY] = self._module_file
self._do(self._trainer_executor)
self._verify_model_exports()
self._verify_model_run_exports()
if __name__ == '__main__':
tf.test.main()
| |
# Copyright 2010 Rusty Klophaus <rusty@basho.com>
# Copyright 2010 Justin Sheehy <justin@basho.com>
# Copyright 2009 Jay Baird <jay@mochimedia.com>
# Copyright 2010-present Basho Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six import string_types, PY2
import mimetypes
from riak.util import lazy_property
from riak.datatypes import TYPES
def bucket_property(name, doc=None):
def _prop_getter(self):
return self.get_property(name)
def _prop_setter(self, value):
return self.set_property(name, value)
return property(_prop_getter, _prop_setter, doc=doc)
class RiakBucket(object):
"""
The ``RiakBucket`` object allows you to access and change information
about a Riak bucket, and provides methods to create or retrieve
objects within the bucket.
"""
def __init__(self, client, name, bucket_type):
"""
Returns a new ``RiakBucket`` instance.
:param client: A :class:`RiakClient <riak.client.RiakClient>`
instance
:type client: :class:`RiakClient <riak.client.RiakClient>`
:param name: The bucket name
:type name: string
:param bucket_type: The parent bucket type of this bucket
:type bucket_type: :class:`BucketType`
"""
if not isinstance(name, string_types):
raise TypeError('Bucket name must be a string')
if PY2:
try:
name = name.encode('ascii')
except UnicodeError:
raise TypeError('Unicode bucket names are not supported.')
if not isinstance(bucket_type, BucketType):
raise TypeError('Parent bucket type must be a BucketType instance')
self._client = client
self.name = name
self.bucket_type = bucket_type
self._encoders = {}
self._decoders = {}
self._resolver = None
def __hash__(self):
return hash((self.bucket_type.name, self.name, self._client))
def __eq__(self, other):
if isinstance(other, self.__class__):
return hash(self) == hash(other)
else:
return False
def __ne__(self, other):
if isinstance(other, self.__class__):
return hash(self) != hash(other)
else:
return True
def get_encoder(self, content_type):
"""
Get the encoding function for the provided content type for
this bucket.
:param content_type: the requested media type
:type content_type: str
:param content_type: Content type requested
"""
if content_type in self._encoders:
return self._encoders[content_type]
else:
return self._client.get_encoder(content_type)
def set_encoder(self, content_type, encoder):
"""
Set the encoding function for the provided content type for
this bucket.
:param content_type: the requested media type
:type content_type: str
:param encoder: an encoding function, takes a single object
argument and returns a string data as single argument.
:type encoder: function
"""
self._encoders[content_type] = encoder
return self
def get_decoder(self, content_type):
"""
Get the decoding function for the provided content type for
this bucket.
:param content_type: the requested media type
:type content_type: str
:rtype: function
"""
if content_type in self._decoders:
return self._decoders[content_type]
else:
return self._client.get_decoder(content_type)
def set_decoder(self, content_type, decoder):
"""
Set the decoding function for the provided content type for
this bucket.
:param content_type: the requested media type
:type content_type: str
:param decoder: a decoding function, takes a string and
returns a Python type
:type decoder: function
"""
self._decoders[content_type] = decoder
return self
def new(self, key=None, data=None, content_type='application/json',
encoded_data=None):
"""A shortcut for manually instantiating a new
:class:`~riak.riak_object.RiakObject` or a new
:class:`~riak.datatypes.Datatype`, based on the presence and value
of the :attr:`datatype <BucketType.datatype>` bucket property. When
the bucket contains a :class:`~riak.datatypes.Datatype`, all
arguments are ignored except ``key``, otherwise they are used to
initialize the :class:`~riak.riak_object.RiakObject`.
:param key: Name of the key. Leaving this to be None (default)
will make Riak generate the key on store.
:type key: str
:param data: The data to store in a
:class:`~riak.riak_object.RiakObject`, see
:attr:`RiakObject.data <riak.riak_object.RiakObject.data>`.
:type data: object
:param content_type: The media type of the data stored in the
:class:`~riak.riak_object.RiakObject`, see
:attr:`RiakObject.content_type
<riak.riak_object.RiakObject.content_type>`.
:type content_type: str
:param encoded_data: The encoded data to store in a
:class:`~riak.riak_object.RiakObject`, see
:attr:`RiakObject.encoded_data
<riak.riak_object.RiakObject.encoded_data>`.
:type encoded_data: str
:rtype: :class:`~riak.riak_object.RiakObject` or
:class:`~riak.datatypes.Datatype`
"""
from riak import RiakObject
if self.bucket_type.datatype:
return TYPES[self.bucket_type.datatype](bucket=self, key=key)
if PY2:
try:
if isinstance(data, string_types):
data = data.encode('ascii')
except UnicodeError:
raise TypeError('Unicode data values are not supported.')
obj = RiakObject(self._client, self, key)
obj.content_type = content_type
if data is not None:
obj.data = data
if encoded_data is not None:
obj.encoded_data = encoded_data
return obj
def get(self, key, r=None, pr=None, timeout=None, include_context=None,
basic_quorum=None, notfound_ok=None, head_only=False):
"""
Retrieve a :class:`~riak.riak_object.RiakObject` or
:class:`~riak.datatypes.Datatype`, based on the presence and value
of the :attr:`datatype <BucketType.datatype>` bucket property.
:param key: Name of the key.
:type key: string
:param r: R-Value of the request (defaults to bucket's R)
:type r: integer
:param pr: PR-Value of the request (defaults to bucket's PR)
:type pr: integer
:param timeout: a timeout value in milliseconds
:type timeout: int
:param include_context: if the bucket contains datatypes, include
the opaque context in the result
:type include_context: bool
:param basic_quorum: whether to use the "basic quorum" policy
for not-founds
:type basic_quorum: bool
:param notfound_ok: whether to treat not-found responses as successful
:type notfound_ok: bool
:param head_only: whether to fetch without value, so only metadata
(only available on PB transport)
:type head_only: bool
:rtype: :class:`RiakObject <riak.riak_object.RiakObject>` or
:class:`~riak.datatypes.Datatype`
"""
from riak import RiakObject
if self.bucket_type.datatype:
return self._client.fetch_datatype(self, key, r=r, pr=pr,
timeout=timeout,
include_context=include_context,
basic_quorum=basic_quorum,
notfound_ok=notfound_ok)
else:
obj = RiakObject(self._client, self, key)
return obj.reload(r=r, pr=pr, timeout=timeout,
basic_quorum=basic_quorum,
notfound_ok=notfound_ok,
head_only=head_only)
def multiget(self, keys, r=None, pr=None, timeout=None,
basic_quorum=None, notfound_ok=None,
head_only=False):
"""
Retrieves a list of keys belonging to this bucket in parallel.
:param keys: the keys to fetch
:type keys: list
:param r: R-Value for the requests (defaults to bucket's R)
:type r: integer
:param pr: PR-Value for the requests (defaults to bucket's PR)
:type pr: integer
:param timeout: a timeout value in milliseconds
:type timeout: int
:param basic_quorum: whether to use the "basic quorum" policy
for not-founds
:type basic_quorum: bool
:param notfound_ok: whether to treat not-found responses as successful
:type notfound_ok: bool
:param head_only: whether to fetch without value, so only metadata
(only available on PB transport)
:type head_only: bool
:rtype: list of :class:`RiakObjects <riak.riak_object.RiakObject>`,
:class:`Datatypes <riak.datatypes.Datatype>`, or tuples of
bucket_type, bucket, key, and the exception raised on fetch
"""
bkeys = [(self.bucket_type.name, self.name, key) for key in keys]
return self._client.multiget(bkeys, r=r, pr=pr, timeout=timeout,
basic_quorum=basic_quorum,
notfound_ok=notfound_ok,
head_only=head_only)
def _get_resolver(self):
if callable(self._resolver):
return self._resolver
elif self._resolver is None:
return self._client.resolver
else:
raise TypeError("resolver is not a function")
def _set_resolver(self, value):
if value is None or callable(value):
self._resolver = value
else:
raise TypeError("resolver is not a function")
resolver = property(_get_resolver, _set_resolver,
doc="""The sibling-resolution function for this
bucket. If the resolver is not set, the
client's resolver will be used.""")
n_val = bucket_property('n_val', doc="""
N-value for this bucket, which is the number of replicas
that will be written of each object in the bucket.
.. warning:: Set this once before you write any data to the
bucket, and never change it again, otherwise unpredictable
things could happen. This should only be used if you know what
you are doing.
""")
allow_mult = bucket_property('allow_mult', doc="""
If set to True, then writes with conflicting data will be stored
and returned to the client.
:type bool: boolean
""")
r = bucket_property('r', doc="""
The default 'read' quorum for this bucket (how many replicas must
reply for a successful read). This should be an integer less than
the 'n_val' property, or a string of 'one', 'quorum', 'all', or
'default'""")
pr = bucket_property('pr', doc="""
The default 'primary read' quorum for this bucket (how many
primary replicas are required for a successful read). This should
be an integer less than the 'n_val' property, or a string of
'one', 'quorum', 'all', or 'default'""")
rw = bucket_property('rw', doc="""
The default 'read' and 'write' quorum for this bucket (equivalent
to 'r' and 'w' but for deletes). This should be an integer less
than the 'n_val' property, or a string of 'one', 'quorum', 'all',
or 'default'""")
w = bucket_property('w', doc="""
The default 'write' quorum for this bucket (how many replicas must
acknowledge receipt of a write). This should be an integer less
than the 'n_val' property, or a string of 'one', 'quorum', 'all',
or 'default'""")
dw = bucket_property('dw', doc="""
The default 'durable write' quorum for this bucket (how many
replicas must commit the write). This should be an integer less
than the 'n_val' property, or a string of 'one', 'quorum', 'all',
or 'default'""")
pw = bucket_property('pw', doc="""
The default 'primary write' quorum for this bucket (how many
primary replicas are required for a successful write). This should
be an integer less than the 'n_val' property, or a string of
'one', 'quorum', 'all', or 'default'""")
def set_property(self, key, value):
"""
Set a bucket property.
:param key: Property to set.
:type key: string
:param value: Property value.
:type value: mixed
"""
return self.set_properties({key: value})
def get_property(self, key):
"""
Retrieve a bucket property.
:param key: The property to retrieve.
:type key: string
:rtype: mixed
"""
return self.get_properties()[key]
def set_properties(self, props):
"""
Set multiple bucket properties in one call.
:param props: A dictionary of properties
:type props: dict
"""
self._client.set_bucket_props(self, props)
def get_properties(self):
"""
Retrieve a dict of all bucket properties.
:rtype: dict
"""
return self._client.get_bucket_props(self)
def clear_properties(self):
"""
Reset all bucket properties to their defaults.
"""
return self._client.clear_bucket_props(self)
def get_keys(self):
"""
Return all keys within the bucket.
:rtype: list of keys
"""
return self._client.get_keys(self)
def stream_keys(self):
"""
Streams all keys within the bucket through an iterator.
The caller must close the stream when finished. See
:meth:`RiakClient.stream_keys()
<riak.client.RiakClient.stream_keys>` for more details.
:rtype: iterator
"""
return self._client.stream_keys(self)
def new_from_file(self, key, filename):
"""Create a new Riak object in the bucket, using the contents of
the specified file. This is a shortcut for :meth:`new`, where the
``encoded_data`` and ``content_type`` are set for you.
.. warning:: This is not supported for buckets that contain
:class:`Datatypes <riak.datatypes.Datatype>`.
:param key: the key of the new object
:type key: string
:param filename: the file to read the contents from
:type filename: string
:rtype: :class:`RiakObject <riak.riak_object.RiakObject>`
"""
binary_data = None
with open(filename, 'rb') as f:
binary_data = f.read()
mimetype, encoding = mimetypes.guess_type(filename)
if encoding:
binary_data = bytearray(binary_data, encoding)
else:
binary_data = bytearray(binary_data)
if not mimetype:
mimetype = 'application/octet-stream'
if PY2:
return self.new(key, encoded_data=binary_data,
content_type=mimetype)
else:
return self.new(key, encoded_data=bytes(binary_data),
content_type=mimetype)
def search_enabled(self):
"""
Returns True if search indexing is enabled for this
bucket.
.. deprecated:: 2.1.0 (Riak 2.0)
Use :ref:`Riak Search 2.0 <yz-label>` instead.
"""
return self.get_properties().get('search', False)
def enable_search(self):
"""
Enable search indexing for this bucket.
.. deprecated:: 2.1.0 (Riak 2.0)
Use :ref:`Riak Search 2.0 <yz-label>` instead.
"""
if not self.search_enabled():
self.set_property('search', True)
return True
def disable_search(self):
"""
Disable search indexing for this bucket.
.. deprecated:: 2.1.0 (Riak 2.0)
Use :ref:`Riak Search 2.0 <yz-label>` instead.
"""
if self.search_enabled():
self.set_property('search', False)
return True
def search(self, query, index=None, **params):
"""
Queries a search index over objects in this bucket/index. See
:meth:`RiakClient.fulltext_search()
<riak.client.RiakClient.fulltext_search>` for more details.
:param query: the search query
:type query: string
:param index: the index to search over. Defaults to the bucket's name.
:type index: string or None
:param params: additional query flags
:type params: dict
"""
search_index = index or self.name
return self._client.fulltext_search(search_index, query, **params)
def get_index(self, index, startkey, endkey=None, return_terms=None,
max_results=None, continuation=None, timeout=None,
term_regex=None):
"""
Queries a secondary index over objects in this bucket,
returning keys or index/key pairs. See
:meth:`RiakClient.get_index()
<riak.client.RiakClient.get_index>` for more details.
"""
return self._client.get_index(self, index, startkey, endkey,
return_terms=return_terms,
max_results=max_results,
continuation=continuation,
timeout=timeout, term_regex=term_regex)
def paginate_index(self, index, startkey, endkey=None,
return_terms=None, max_results=1000,
continuation=None, timeout=None, term_regex=None):
"""
Paginates through a secondary index over objects in this bucket,
returning keys or index/key pairs. See
:meth:`RiakClient.paginate_index()
<riak.client.RiakClient.paginate_index>` for more details.
"""
return self._client.paginate_index(self, index, startkey, endkey,
return_terms=return_terms,
max_results=max_results,
continuation=continuation,
timeout=timeout,
term_regex=term_regex)
def stream_index(self, index, startkey, endkey=None, return_terms=None,
max_results=None, continuation=None, timeout=None,
term_regex=None):
"""
Queries a secondary index over objects in this bucket,
streaming keys or index/key pairs via an iterator.
The caller must close the stream when finished. See
:meth:`RiakClient.stream_index()
<riak.client.RiakClient.stream_index>` for more details.
"""
return self._client.stream_index(self, index, startkey, endkey,
return_terms=return_terms,
max_results=max_results,
continuation=continuation,
timeout=timeout,
term_regex=term_regex)
def paginate_stream_index(self, index, startkey, endkey=None,
return_terms=None, max_results=1000,
continuation=None, timeout=None,
term_regex=None):
"""
Paginates through a secondary index over objects in this bucket,
streaming keys or index/key pairs. The caller must close the stream
when finished. See :meth:`RiakClient.paginate_stream_index()
<riak.client.RiakClient.paginate_stream_index>` for more details.
"""
return self._client.paginate_stream_index(self, index, startkey,
endkey,
return_terms=return_terms,
max_results=max_results,
continuation=continuation,
timeout=timeout,
term_regex=term_regex)
def delete(self, key, **kwargs):
"""Deletes a key from Riak. Short hand for
``bucket.new(key).delete()``. See :meth:`RiakClient.delete()
<riak.client.RiakClient.delete>` for options.
:param key: The key for the object
:type key: string
:rtype: RiakObject
"""
return self.new(key).delete(**kwargs)
def get_counter(self, key, **kwargs):
"""
Gets the value of a counter stored in this bucket. See
:meth:`RiakClient.get_counter()
<riak.client.RiakClient.get_counter>` for options.
.. deprecated:: 2.1.0 (Riak 2.0) Riak 1.4-style counters are
deprecated in favor of the :class:`~riak.datatypes.Counter`
datatype.
:param key: the key of the counter
:type key: string
:rtype: int
"""
return self._client.get_counter(self, key, **kwargs)
def update_counter(self, key, value, **kwargs):
"""
Updates the value of a counter stored in this bucket. Positive
values increment the counter, negative values decrement. See
:meth:`RiakClient.update_counter()
<riak.client.RiakClient.update_counter>` for options.
.. deprecated:: 2.1.0 (Riak 2.0) Riak 1.4-style counters are
deprecated in favor of the :class:`~riak.datatypes.Counter`
datatype.
:param key: the key of the counter
:type key: string
:param value: the amount to increment or decrement
:type value: integer
"""
return self._client.update_counter(self, key, value, **kwargs)
increment_counter = update_counter
def get_preflist(self, key):
"""
Retrieve the preflist associated with a given bucket/key
:param key: Name of the key.
:type key: string
:rtype: list of dict()
"""
return self._client.get_preflist(self, key)
def __str__(self):
if self.bucket_type.is_default():
return '<RiakBucket {0!r}>'.format(self.name)
else:
return '<RiakBucket {0!r}/{1!r}>'.format(self.bucket_type.name,
self.name)
__repr__ = __str__
class BucketType(object):
"""
The ``BucketType`` object allows you to access and change
properties on a Riak bucket type and access buckets within its
namespace.
"""
def __init__(self, client, name):
"""
Returns a new ``BucketType`` instance.
:param client: A :class:`RiakClient <riak.client.RiakClient>`
instance
:type client: :class:`RiakClient <riak.client.RiakClient>`
:param name: The bucket-type's name
:type name: string
"""
self._client = client
self.name = name
def is_default(self):
"""
Whether this bucket type is the default type, or a user-defined type.
:rtype: bool
"""
return self.name == 'default'
def get_property(self, key):
"""
Retrieve a bucket-type property.
:param key: The property to retrieve.
:type key: string
:rtype: mixed
"""
return self.get_properties()[key]
def set_property(self, key, value):
"""
Set a bucket-type property.
:param key: Property to set.
:type key: string
:param value: Property value.
:type value: mixed
"""
return self.set_properties({key: value})
def get_properties(self):
"""
Retrieve a dict of all bucket-type properties.
:rtype: dict
"""
return self._client.get_bucket_type_props(self)
def set_properties(self, props):
"""
Set multiple bucket-type properties in one call.
:param props: A dictionary of properties
:type props: dict
"""
self._client.set_bucket_type_props(self, props)
def bucket(self, name):
"""
Gets a bucket that belongs to this bucket-type.
:param name: the bucket name
:type name: str
:rtype: :class:`RiakBucket`
"""
return self._client.bucket(name, self)
def get_buckets(self, timeout=None):
"""
Get the list of buckets under this bucket-type as
:class:`RiakBucket <riak.bucket.RiakBucket>` instances.
.. warning:: Do not use this in production, as it requires
traversing through all keys stored in a cluster.
.. note:: This request is automatically retried :attr:`retries`
times if it fails due to network error.
:param timeout: a timeout value in milliseconds
:type timeout: int
:rtype: list of :class:`RiakBucket <riak.bucket.RiakBucket>`
instances
"""
return self._client.get_buckets(bucket_type=self, timeout=timeout)
def stream_buckets(self, timeout=None):
"""
Streams the list of buckets under this bucket-type. This is a
generator method that should be iterated over.
The caller must close the stream when finished. See
:meth:`RiakClient.stream_buckets()
<riak.client.RiakClient.stream_buckets>` for more details.
.. warning:: Do not use this in production, as it requires
traversing through all keys stored in a cluster.
:param timeout: a timeout value in milliseconds
:type timeout: int
:rtype: iterator that yields lists of :class:`RiakBucket
<riak.bucket.RiakBucket>` instances
"""
return self._client.stream_buckets(bucket_type=self, timeout=timeout)
@lazy_property
def datatype(self):
"""
The assigned datatype for this bucket type, if present.
:rtype: None or string
"""
if self.is_default():
return None
else:
return self.get_properties().get('datatype')
def __str__(self):
return "<BucketType {0!r}>".format(self.name)
__repr__ = __str__
def __hash__(self):
return hash((self.name, self._client))
def __eq__(self, other):
if isinstance(other, self.__class__):
return hash(self) == hash(other)
else:
return False
def __ne__(self, other):
if isinstance(other, self.__class__):
return hash(self) != hash(other)
else:
return True
| |
#----------------------------------------------------------------------
# Copyright (c) 2008 Board of Trustees, Princeton University
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
import re
from sfa.util.faults import *
# for convenience and smoother translation - we should get rid of these functions eventually
def get_leaf(hrn): return Xrn(hrn).get_leaf()
def get_authority(hrn): return Xrn(hrn).get_authority_hrn()
def urn_to_hrn(urn): xrn=Xrn(urn); return (xrn.hrn, xrn.type)
def hrn_to_urn(hrn,type): return Xrn(hrn, type=type).urn
def hrn_authfor_hrn(parenthrn, hrn): return Xrn.hrn_is_auth_for_hrn(parenthrn, hrn)
class Xrn:
########## basic tools on HRNs
# split a HRN-like string into pieces
# this is like split('.') except for escaped (backslashed) dots
# e.g. hrn_split ('a\.b.c.d') -> [ 'a\.b','c','d']
@staticmethod
def hrn_split(hrn):
return [ x.replace('--sep--','\\.') for x in hrn.replace('\\.','--sep--').split('.') ]
# e.g. hrn_leaf ('a\.b.c.d') -> 'd'
@staticmethod
def hrn_leaf(hrn): return Xrn.hrn_split(hrn)[-1]
# e.g. hrn_auth_list ('a\.b.c.d') -> ['a\.b', 'c']
@staticmethod
def hrn_auth_list(hrn): return Xrn.hrn_split(hrn)[0:-1]
# e.g. hrn_auth ('a\.b.c.d') -> 'a\.b.c'
@staticmethod
def hrn_auth(hrn): return '.'.join(Xrn.hrn_auth_list(hrn))
# e.g. escape ('a.b') -> 'a\.b'
@staticmethod
def escape(token): return re.sub(r'([^\\])\.', r'\1\.', token)
# e.g. unescape ('a\.b') -> 'a.b'
@staticmethod
def unescape(token): return token.replace('\\.','.')
# Return the HRN authority chain from top to bottom.
# e.g. hrn_auth_chain('a\.b.c.d') -> ['a\.b', 'a\.b.c']
@staticmethod
def hrn_auth_chain(hrn):
parts = Xrn.hrn_auth_list(hrn)
chain = []
for i in range(len(parts)):
chain.append('.'.join(parts[:i+1]))
# Include the HRN itself?
#chain.append(hrn)
return chain
# Is the given HRN a true authority over the namespace of the other
# child HRN?
# A better alternative than childHRN.startswith(parentHRN)
# e.g. hrn_is_auth_for_hrn('a\.b', 'a\.b.c.d') -> True,
# but hrn_is_auth_for_hrn('a', 'a\.b.c.d') -> False
# Also hrn_is_uauth_for_hrn('a\.b.c.d', 'a\.b.c.d') -> True
@staticmethod
def hrn_is_auth_for_hrn(parenthrn, hrn):
if parenthrn == hrn:
return True
for auth in Xrn.hrn_auth_chain(hrn):
if parenthrn == auth:
return True
return False
URN_PREFIX = "urn:publicid:IDN"
########## basic tools on URNs
@staticmethod
def urn_full (urn):
if urn.startswith(Xrn.URN_PREFIX): return urn
else: return Xrn.URN_PREFIX+URN
@staticmethod
def urn_meaningful (urn):
if urn.startswith(Xrn.URN_PREFIX): return urn[len(Xrn.URN_PREFIX):]
else: return urn
@staticmethod
def urn_split (urn):
return Xrn.urn_meaningful(urn).split('+')
####################
# the local fields that are kept consistent
# self.urn
# self.hrn
# self.type
# self.path
# provide either urn, or (hrn + type)
def __init__ (self, xrn, type=None):
if not xrn: xrn = ""
# user has specified xrn : guess if urn or hrn
if xrn.startswith(Xrn.URN_PREFIX):
self.hrn=None
self.urn=xrn
self.urn_to_hrn()
else:
self.urn=None
self.hrn=xrn
self.type=type
self.hrn_to_urn()
# happens all the time ..
# if not type:
# debug_logger.debug("type-less Xrn's are not safe")
def get_urn(self): return self.urn
def get_hrn(self): return self.hrn
def get_type(self): return self.type
def get_hrn_type(self): return (self.hrn, self.type)
def _normalize(self):
if self.hrn is None: raise SfaAPIError, "Xrn._normalize"
if not hasattr(self,'leaf'):
self.leaf=Xrn.hrn_split(self.hrn)[-1]
# self.authority keeps a list
if not hasattr(self,'authority'):
self.authority=Xrn.hrn_auth_list(self.hrn)
def get_leaf(self):
self._normalize()
return self.leaf
def get_authority_hrn(self):
self._normalize()
return '.'.join( self.authority )
def get_authority_urn(self):
self._normalize()
return ':'.join( [Xrn.unescape(x) for x in self.authority] )
def urn_to_hrn(self):
"""
compute tuple (hrn, type) from urn
"""
# if not self.urn or not self.urn.startswith(Xrn.URN_PREFIX):
if not self.urn.startswith(Xrn.URN_PREFIX):
raise SfaAPIError, "Xrn.urn_to_hrn"
parts = Xrn.urn_split(self.urn)
type=parts.pop(2)
# Remove the authority name (e.g. '.sa')
if type == 'authority':
name = parts.pop()
# Drop the sa. This is a bad hack, but its either this
# or completely change how record types are generated/stored
if name != 'sa':
type = type + "+" + name
# convert parts (list) into hrn (str) by doing the following
# 1. remove blank parts
# 2. escape dots inside parts
# 3. replace ':' with '.' inside parts
# 3. join parts using '.'
hrn = '.'.join([Xrn.escape(part).replace(':','.') for part in parts if part])
self.hrn=str(hrn)
self.type=str(type)
def hrn_to_urn(self):
"""
compute urn from (hrn, type)
"""
# if not self.hrn or self.hrn.startswith(Xrn.URN_PREFIX):
if self.hrn.startswith(Xrn.URN_PREFIX):
raise SfaAPIError, "Xrn.hrn_to_urn, hrn=%s"%self.hrn
if self.type and self.type.startswith('authority'):
self.authority = Xrn.hrn_split(self.hrn)
type_parts = self.type.split("+")
self.type = type_parts[0]
name = 'sa'
if len(type_parts) > 1:
name = type_parts[1]
else:
self.authority = Xrn.hrn_auth_list(self.hrn)
name = Xrn.hrn_leaf(self.hrn)
authority_string = self.get_authority_urn()
if self.type == None:
urn = "+".join(['',authority_string,Xrn.unescape(name)])
else:
urn = "+".join(['',authority_string,self.type,Xrn.unescape(name)])
self.urn = Xrn.URN_PREFIX + urn
def dump_string(self):
result="-------------------- XRN\n"
result += "URN=%s\n"%self.urn
result += "HRN=%s\n"%self.hrn
result += "TYPE=%s\n"%self.type
result += "LEAF=%s\n"%self.get_leaf()
result += "AUTH(hrn format)=%s\n"%self.get_authority_hrn()
result += "AUTH(urn format)=%s\n"%self.get_authority_urn()
return result
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._certificates_operations import build_create_or_update_request, build_delete_request, build_get_request, build_list_by_resource_group_request, build_list_request, build_update_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class CertificatesOperations:
"""CertificatesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.web.v2021_01_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
filter: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.CertificateCollection"]:
"""Get all certificates for a subscription.
Description for Get all certificates for a subscription.
:param filter: Return only information specified in the filter (using OData syntax). For
example: $filter=KeyVaultId eq 'KeyVaultId'.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CertificateCollection or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2021_01_01.models.CertificateCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
filter=filter,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
filter=filter,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("CertificateCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Web/certificates'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.CertificateCollection"]:
"""Get all certificates in a resource group.
Description for Get all certificates in a resource group.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CertificateCollection or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2021_01_01.models.CertificateCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("CertificateCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> "_models.Certificate":
"""Get a certificate.
Description for Get a certificate.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the certificate.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Certificate, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2021_01_01.models.Certificate
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Certificate"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Certificate', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}'} # type: ignore
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
name: str,
certificate_envelope: "_models.Certificate",
**kwargs: Any
) -> "_models.Certificate":
"""Create or update a certificate.
Description for Create or update a certificate.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the certificate.
:type name: str
:param certificate_envelope: Details of certificate, if it exists already.
:type certificate_envelope: ~azure.mgmt.web.v2021_01_01.models.Certificate
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Certificate, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2021_01_01.models.Certificate
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Certificate"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(certificate_envelope, 'Certificate')
request = build_create_or_update_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Certificate', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}'} # type: ignore
@distributed_trace_async
async def delete(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> None:
"""Delete a certificate.
Description for Delete a certificate.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the certificate.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}'} # type: ignore
@distributed_trace_async
async def update(
self,
resource_group_name: str,
name: str,
certificate_envelope: "_models.CertificatePatchResource",
**kwargs: Any
) -> "_models.Certificate":
"""Create or update a certificate.
Description for Create or update a certificate.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the certificate.
:type name: str
:param certificate_envelope: Details of certificate, if it exists already.
:type certificate_envelope: ~azure.mgmt.web.v2021_01_01.models.CertificatePatchResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Certificate, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2021_01_01.models.Certificate
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Certificate"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(certificate_envelope, 'CertificatePatchResource')
request = build_update_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Certificate', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}'} # type: ignore
| |
# Copyright 2011 University of Southern California
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import webob
from nova.api.openstack.compute import flavors_extraspecs \
as flavorextraspecs_v21
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.objects import test_flavor
def return_create_flavor_extra_specs(context, flavor_id, extra_specs,
*args, **kwargs):
return stub_flavor_extra_specs()
def return_flavor_extra_specs(context, flavor_id):
return stub_flavor_extra_specs()
def return_flavor_extra_specs_item(context, flavor_id, key):
return {key: stub_flavor_extra_specs()[key]}
def return_empty_flavor_extra_specs(context, flavor_id):
return {}
def delete_flavor_extra_specs(context, flavor_id, key):
pass
def stub_flavor_extra_specs():
specs = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"}
return specs
class FlavorsExtraSpecsTestV21(test.TestCase):
bad_request = exception.ValidationError
flavorextraspecs = flavorextraspecs_v21
def _get_request(self, url, use_admin_context=False):
req_url = '/v2/fake/flavors/' + url
return fakes.HTTPRequest.blank(req_url,
use_admin_context=use_admin_context)
def setUp(self):
super(FlavorsExtraSpecsTestV21, self).setUp()
fakes.stub_out_key_pair_funcs(self)
self.controller = self.flavorextraspecs.FlavorExtraSpecsController()
def test_index(self):
flavor = dict(test_flavor.fake_flavor,
extra_specs={'key1': 'value1'})
req = self._get_request('1/os-extra_specs')
with mock.patch('nova.objects.Flavor._flavor_get_by_flavor_id_from_db'
) as mock_get:
mock_get.return_value = flavor
res_dict = self.controller.index(req, 1)
self.assertEqual('value1', res_dict['extra_specs']['key1'])
@mock.patch('nova.objects.Flavor.get_by_flavor_id')
def test_index_no_data(self, mock_get):
flavor = objects.Flavor(flavorid='1', extra_specs={})
mock_get.return_value = flavor
req = self._get_request('1/os-extra_specs')
res_dict = self.controller.index(req, 1)
self.assertEqual(0, len(res_dict['extra_specs']))
@mock.patch('nova.objects.Flavor.get_by_flavor_id')
def test_index_flavor_not_found(self, mock_get):
req = self._get_request('1/os-extra_specs',
use_admin_context=True)
mock_get.side_effect = exception.FlavorNotFound(flavor_id='1')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.index,
req, 1)
def test_show(self):
flavor = objects.Flavor(flavorid='1', extra_specs={'key5': 'value5'})
req = self._get_request('1/os-extra_specs/key5')
with mock.patch('nova.objects.Flavor.get_by_flavor_id') as mock_get:
mock_get.return_value = flavor
res_dict = self.controller.show(req, 1, 'key5')
self.assertEqual('value5', res_dict['key5'])
@mock.patch('nova.objects.Flavor.get_by_flavor_id')
def test_show_spec_not_found(self, mock_get):
mock_get.return_value = objects.Flavor(extra_specs={})
req = self._get_request('1/os-extra_specs/key6')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, 1, 'key6')
def test_not_found_because_flavor(self):
req = self._get_request('1/os-extra_specs/key5',
use_admin_context=True)
with mock.patch('nova.objects.Flavor.get_by_flavor_id') as mock_get:
mock_get.side_effect = exception.FlavorNotFound(flavor_id='1')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, 1, 'key5')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
req, 1, 'key5', body={'key5': 'value5'})
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, 1, 'key5')
req = self._get_request('1/os-extra_specs', use_admin_context=True)
with mock.patch('nova.objects.Flavor.get_by_flavor_id') as mock_get:
mock_get.side_effect = exception.FlavorNotFound(flavor_id='1')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, 1, body={'extra_specs': {'key5': 'value5'}})
@mock.patch('nova.objects.Flavor._flavor_get_by_flavor_id_from_db')
def test_delete(self, mock_get):
flavor = dict(test_flavor.fake_flavor,
extra_specs={'key5': 'value5'})
req = self._get_request('1/os-extra_specs/key5',
use_admin_context=True)
mock_get.return_value = flavor
with mock.patch('nova.objects.Flavor.save'):
self.controller.delete(req, 1, 'key5')
def test_delete_no_admin(self):
self.stub_out('nova.objects.flavor._flavor_extra_specs_del',
delete_flavor_extra_specs)
req = self._get_request('1/os-extra_specs/key5')
self.assertRaises(exception.Forbidden, self.controller.delete,
req, 1, 'key 5')
def test_delete_spec_not_found(self):
req = self._get_request('1/os-extra_specs/key6',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, 1, 'key6')
def test_create(self):
body = {"extra_specs": {"key1": "value1", "key2": 0.5, "key3": 5}}
req = self._get_request('1/os-extra_specs', use_admin_context=True)
res_dict = self.controller.create(req, 1, body=body)
self.assertEqual('value1', res_dict['extra_specs']['key1'])
self.assertEqual(0.5, res_dict['extra_specs']['key2'])
self.assertEqual(5, res_dict['extra_specs']['key3'])
def test_create_no_admin(self):
body = {"extra_specs": {"key1": "value1"}}
req = self._get_request('1/os-extra_specs')
self.assertRaises(exception.Forbidden, self.controller.create,
req, 1, body=body)
def test_create_flavor_not_found(self):
body = {"extra_specs": {"key1": "value1"}}
req = self._get_request('1/os-extra_specs', use_admin_context=True)
with mock.patch('nova.objects.Flavor.save',
side_effect=exception.FlavorNotFound(flavor_id='')):
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, 1, body=body)
def test_create_flavor_db_duplicate(self):
body = {"extra_specs": {"key1": "value1"}}
req = self._get_request('1/os-extra_specs', use_admin_context=True)
with mock.patch(
'nova.objects.Flavor.save',
side_effect=exception.FlavorExtraSpecUpdateCreateFailed(
id='', retries=10)):
self.assertRaises(webob.exc.HTTPConflict, self.controller.create,
req, 1, body=body)
def _test_create_bad_request(self, body):
self.stub_out('nova.objects.flavor._flavor_extra_specs_add',
return_create_flavor_extra_specs)
req = self._get_request('1/os-extra_specs', use_admin_context=True)
self.assertRaises(self.bad_request, self.controller.create,
req, 1, body=body)
def test_create_empty_body(self):
self._test_create_bad_request('')
def test_create_non_dict_extra_specs(self):
self._test_create_bad_request({"extra_specs": "non_dict"})
def test_create_non_string_key(self):
self._test_create_bad_request({"extra_specs": {None: "value1"}})
def test_create_non_string_value(self):
self._test_create_bad_request({"extra_specs": {"key1": None}})
def test_create_zero_length_key(self):
self._test_create_bad_request({"extra_specs": {"": "value1"}})
def test_create_long_key(self):
key = "a" * 256
self._test_create_bad_request({"extra_specs": {key: "value1"}})
def test_create_long_value(self):
value = "a" * 256
self._test_create_bad_request({"extra_specs": {"key1": value}})
def test_create_really_long_integer_value(self):
value = 10 ** 1000
req = self._get_request('1/os-extra_specs', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, 1, body={"extra_specs": {"key1": value}})
def test_create_invalid_specs_key(self):
invalid_keys = ("key1/", "<key>", "$$akey$", "!akey", "")
for key in invalid_keys:
body = {"extra_specs": {key: "value1"}}
req = self._get_request('1/os-extra_specs', use_admin_context=True)
self.assertRaises(self.bad_request, self.controller.create,
req, 1, body=body)
@mock.patch('nova.objects.flavor._flavor_extra_specs_add')
def test_create_valid_specs_key(self, mock_flavor_extra_specs):
valid_keys = ("key1", "month.price", "I_am-a Key", "finance:g2")
mock_flavor_extra_specs.side_effects = return_create_flavor_extra_specs
for key in valid_keys:
body = {"extra_specs": {key: "value1"}}
req = self._get_request('1/os-extra_specs', use_admin_context=True)
res_dict = self.controller.create(req, 1, body=body)
self.assertEqual('value1', res_dict['extra_specs'][key])
@mock.patch('nova.objects.flavor._flavor_extra_specs_add')
def test_update_item(self, mock_add):
mock_add.side_effect = return_create_flavor_extra_specs
body = {"key1": "value1"}
req = self._get_request('1/os-extra_specs/key1',
use_admin_context=True)
res_dict = self.controller.update(req, 1, 'key1', body=body)
self.assertEqual('value1', res_dict['key1'])
def test_update_item_no_admin(self):
body = {"key1": "value1"}
req = self._get_request('1/os-extra_specs/key1')
self.assertRaises(exception.Forbidden, self.controller.update,
req, 1, 'key1', body=body)
def _test_update_item_bad_request(self, body):
req = self._get_request('1/os-extra_specs/key1',
use_admin_context=True)
self.assertRaises(self.bad_request, self.controller.update,
req, 1, 'key1', body=body)
def test_update_item_empty_body(self):
self._test_update_item_bad_request('')
def test_update_item_too_many_keys(self):
body = {"key1": "value1", "key2": "value2"}
self._test_update_item_bad_request(body)
def test_update_item_non_dict_extra_specs(self):
self._test_update_item_bad_request("non_dict")
def test_update_item_non_string_key(self):
self._test_update_item_bad_request({None: "value1"})
def test_update_item_non_string_value(self):
self._test_update_item_bad_request({"key1": None})
def test_update_item_zero_length_key(self):
self._test_update_item_bad_request({"": "value1"})
def test_update_item_long_key(self):
key = "a" * 256
self._test_update_item_bad_request({key: "value1"})
def test_update_item_long_value(self):
value = "a" * 256
self._test_update_item_bad_request({"key1": value})
def test_update_item_body_uri_mismatch(self):
body = {"key1": "value1"}
req = self._get_request('1/os-extra_specs/bad', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 1, 'bad', body=body)
def test_update_flavor_not_found(self):
body = {"key1": "value1"}
req = self._get_request('1/os-extra_specs/key1',
use_admin_context=True)
with mock.patch('nova.objects.Flavor.save',
side_effect=exception.FlavorNotFound(flavor_id='')):
self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
req, 1, 'key1', body=body)
def test_update_flavor_db_duplicate(self):
body = {"key1": "value1"}
req = self._get_request('1/os-extra_specs/key1',
use_admin_context=True)
with mock.patch(
'nova.objects.Flavor.save',
side_effect=exception.FlavorExtraSpecUpdateCreateFailed(
id=1, retries=5)):
self.assertRaises(webob.exc.HTTPConflict, self.controller.update,
req, 1, 'key1', body=body)
def test_update_really_long_integer_value(self):
value = 10 ** 1000
req = self._get_request('1/os-extra_specs/key1',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 1, 'key1', body={"key1": value})
| |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Device.user_agent_md5'
db.add_column(u'djanalytics_device', 'user_agent_md5',
self.gf('django.db.models.fields.CharField')(default='', unique=True, max_length=32, db_index=True),
keep_default=False)
# Adding index on 'Device', fields ['screen_width']
db.create_index(u'djanalytics_device', ['screen_width'])
# Adding index on 'Device', fields ['os_version']
db.create_index(u'djanalytics_device', ['os_version'])
# Adding index on 'Device', fields ['browser_version']
db.create_index(u'djanalytics_device', ['browser_version'])
# Adding index on 'Device', fields ['device']
db.create_index(u'djanalytics_device', ['device'])
# Adding index on 'Device', fields ['browser']
db.create_index(u'djanalytics_device', ['browser'])
# Adding index on 'Device', fields ['os']
db.create_index(u'djanalytics_device', ['os'])
# Adding index on 'Device', fields ['screen_height']
db.create_index(u'djanalytics_device', ['screen_height'])
# Adding index on 'Device', fields ['screen_width', 'screen_height']
db.create_index(u'djanalytics_device', ['screen_width', 'screen_height'])
# Adding index on 'Device', fields ['os', 'os_version']
db.create_index(u'djanalytics_device', ['os', 'os_version'])
# Adding index on 'Device', fields ['browser', 'browser_version']
db.create_index(u'djanalytics_device', ['browser', 'browser_version'])
# Adding index on 'PageType', fields ['code']
db.create_index(u'djanalytics_pagetype', ['code'])
# Adding index on 'RequestEvent', fields ['tracking_key']
db.create_index(u'djanalytics_requestevent', ['tracking_key'])
# Adding index on 'RequestEvent', fields ['tracking_user_id']
db.create_index(u'djanalytics_requestevent', ['tracking_user_id'])
# Adding index on 'Visitor', fields ['uuid']
db.create_index(u'djanalytics_visitor', ['uuid'])
# Adding index on 'Visit', fields ['visit_date']
db.create_index(u'djanalytics_visit', ['visit_date'])
# Adding index on 'Visit', fields ['uuid']
db.create_index(u'djanalytics_visit', ['uuid'])
# Adding index on 'ReferrerType', fields ['code']
db.create_index(u'djanalytics_referrertype', ['code'])
# Adding index on 'DeviceType', fields ['code']
db.create_index(u'djanalytics_devicetype', ['code'])
# Adding field 'PagePattern.display_path'
db.add_column(u'djanalytics_pagepattern', 'display_path',
self.gf('django.db.models.fields.CharField')(default='', max_length=200),
keep_default=False)
# Adding index on 'PagePattern', fields ['code']
db.create_index(u'djanalytics_pagepattern', ['code'])
def backwards(self, orm):
# Removing index on 'PagePattern', fields ['code']
db.delete_index(u'djanalytics_pagepattern', ['code'])
# Removing index on 'DeviceType', fields ['code']
db.delete_index(u'djanalytics_devicetype', ['code'])
# Removing index on 'ReferrerType', fields ['code']
db.delete_index(u'djanalytics_referrertype', ['code'])
# Removing index on 'Visit', fields ['uuid']
db.delete_index(u'djanalytics_visit', ['uuid'])
# Removing index on 'Visit', fields ['visit_date']
db.delete_index(u'djanalytics_visit', ['visit_date'])
# Removing index on 'Visitor', fields ['uuid']
db.delete_index(u'djanalytics_visitor', ['uuid'])
# Removing index on 'RequestEvent', fields ['tracking_user_id']
db.delete_index(u'djanalytics_requestevent', ['tracking_user_id'])
# Removing index on 'RequestEvent', fields ['tracking_key']
db.delete_index(u'djanalytics_requestevent', ['tracking_key'])
# Removing index on 'PageType', fields ['code']
db.delete_index(u'djanalytics_pagetype', ['code'])
# Removing index on 'Device', fields ['browser', 'browser_version']
db.delete_index(u'djanalytics_device', ['browser', 'browser_version'])
# Removing index on 'Device', fields ['os', 'os_version']
db.delete_index(u'djanalytics_device', ['os', 'os_version'])
# Removing index on 'Device', fields ['screen_width', 'screen_height']
db.delete_index(u'djanalytics_device', ['screen_width', 'screen_height'])
# Removing index on 'Device', fields ['screen_height']
db.delete_index(u'djanalytics_device', ['screen_height'])
# Removing index on 'Device', fields ['os']
db.delete_index(u'djanalytics_device', ['os'])
# Removing index on 'Device', fields ['browser']
db.delete_index(u'djanalytics_device', ['browser'])
# Removing index on 'Device', fields ['device']
db.delete_index(u'djanalytics_device', ['device'])
# Removing index on 'Device', fields ['browser_version']
db.delete_index(u'djanalytics_device', ['browser_version'])
# Removing index on 'Device', fields ['os_version']
db.delete_index(u'djanalytics_device', ['os_version'])
# Removing index on 'Device', fields ['screen_width']
db.delete_index(u'djanalytics_device', ['screen_width'])
# Deleting field 'Device.user_agent_md5'
db.delete_column(u'djanalytics_device', 'user_agent_md5')
# Deleting field 'PagePattern.display_path'
db.delete_column(u'djanalytics_pagepattern', 'display_path')
models = {
'djanalytics.client': {
'Meta': {'object_name': 'Client'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'a613d16b-2fca-43c4-8f10-d3d967289c7b'", 'max_length': '36'})
},
'djanalytics.device': {
'Meta': {'object_name': 'Device', 'index_together': "[('browser', 'browser_version'), ('os', 'os_version'), ('screen_width', 'screen_height')]"},
'browser': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'browser_version': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'null': 'True', 'blank': 'True'}),
'device': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'device_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djanalytics.DeviceType']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'os': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'os_version': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'null': 'True', 'blank': 'True'}),
'screen_height': ('django.db.models.fields.PositiveSmallIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'screen_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user_agent': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'user_agent_md5': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'})
},
'djanalytics.devicetype': {
'Meta': {'object_name': 'DeviceType'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'})
},
'djanalytics.domain': {
'Meta': {'object_name': 'Domain'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djanalytics.Client']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'pattern': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'web_property': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djanalytics.WebProperty']", 'null': 'True'})
},
'djanalytics.ipfilter': {
'Meta': {'object_name': 'IPFilter'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djanalytics.Client']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'include': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'netmask': ('django.db.models.fields.CharField', [], {'max_length': '19'})
},
u'djanalytics.location': {
'Meta': {'object_name': 'Location'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'djanalytics.page': {
'Meta': {'unique_together': "(('path', 'client'),)", 'object_name': 'Page'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djanalytics.Client']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djanalytics.PageType']", 'null': 'True'}),
'path': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'djanalytics.pagepattern': {
'Meta': {'unique_together': "(('pattern', 'client'),)", 'object_name': 'PagePattern'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djanalytics.Client']"}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'display_path': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'page_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djanalytics.PageType']"}),
'pattern': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'djanalytics.pagetype': {
'Meta': {'object_name': 'PageType'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'djanalytics.pagevisit': {
'Meta': {'object_name': 'PageVisit'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'duration': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '7', 'decimal_places': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djanalytics.Page']"}),
'request_event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djanalytics.RequestEvent']"}),
'visit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djanalytics.Visit']"})
},
'djanalytics.pathfilter': {
'Meta': {'object_name': 'PathFilter'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djanalytics.Client']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'include': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'path_pattern': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'djanalytics.referrertype': {
'Meta': {'object_name': 'ReferrerType'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'pattern': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'djanalytics.requestevent': {
'Meta': {'object_name': 'RequestEvent'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djanalytics.Client']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'db_index': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['djanalytics.Location']", 'null': 'True', 'blank': 'True'}),
'method': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '5', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.URLField', [], {'db_index': 'True', 'max_length': '200', 'blank': 'True'}),
'protocol': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'query_string': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'referrer': ('django.db.models.fields.URLField', [], {'max_length': '2083', 'null': 'True', 'blank': 'True'}),
'response_code': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'screen_height': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'screen_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'tracking_key': ('django.db.models.fields.CharField', [], {'default': "'e9721934-59a1-4bdb-a5e8-377c6aafc1b6'", 'max_length': '36', 'db_index': 'True'}),
'tracking_user_id': ('django.db.models.fields.CharField', [], {'default': "'5d725c5a-e9fd-477e-8a99-dc2512d19dbe'", 'max_length': '36', 'db_index': 'True'}),
'user_agent': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'djanalytics.visit': {
'Meta': {'object_name': 'Visit'},
'conversion_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'device': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djanalytics.Device']"}),
'duration': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '7', 'decimal_places': '0'}),
'first_page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'first_visit'", 'to': "orm['djanalytics.Page']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_visit'", 'null': 'True', 'to': "orm['djanalytics.Page']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'pages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['djanalytics.Page']", 'through': "orm['djanalytics.PageVisit']", 'symmetrical': 'False'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'db_index': 'True'}),
'visit_date': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'visitor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djanalytics.Visitor']"}),
'web_property': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djanalytics.WebProperty']"})
},
'djanalytics.visitor': {
'Meta': {'object_name': 'Visitor'},
'clients': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['djanalytics.Client']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'db_index': 'True'})
},
'djanalytics.webproperty': {
'Meta': {'object_name': 'WebProperty'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100'})
}
}
complete_apps = ['djanalytics']
| |
"""
Suggestions for acquisition parameters
"""
from java.lang import *
import de.bruker.nmr.mfw.root as root
import math
import sys
import os
sys.path.append(root.UtilPath.getTopspinHome()+ '/exp/stan/nmr/py/BioPY/modules/')
import CPDtools
import TopCmds
import GetNUCs as Nuc
import FREQ as fq
BioNuc= "H","N","C","CA","Ca","CO","Co","CB","Cb","D"
# First let's find out the dimensions of the pulsprog
def PPDIM():
ppdim=1; aqseq=""
ph2D=""; ph3D=""; ph4D=""
Nucl1D=""; Nucl2D=""; Nucl3D=""; Nucl4D=""
#Acquisition dimension from acq file
Nucl1D="".join(letter for letter in Nuc.list()[0] if letter.isalpha())
#TopCmds.MSG(Nucl1D)
pp=TopCmds.GETPAR("PULPROG")
dir=get_dir("PP_DIRS")
Name=find_file(dir,pp)
#TopCmds.MSG(Name)
if os.path.exists(Name) == 1:
f=open(Name, 'r')
text=f.readlines()
f.close()
else:
TopCmds.MSG("Pulse Program not found: Exiting")
TopCmds.EXIT()
# Determine pulse sequence dimension and TPPI phases
for line in text:
lines = line.rstrip()
if lines.find("aqseq") >=0:
#Check for comments
if lines.find(";") < 0 :
aqseq="".join(letter for letter in lines if letter.isdigit())
elif lines.find(";") > lines.find("aqseq"):
aqseq="".join(letter for letter in lines if letter.isdigit())
if lines.find("F1PH") >=0 or lines.find("F1EA") >=0 :
if lines.find("F1PH") >=0 : search="F1PH"
if lines.find("F1EA") >=0 : search="F1EA"
#Check for comments
if lines.find(";") < 0 :
if ppdim < 2 : ppdim=2
ph2D=find_phase(lines,search)
ph2Dp=ph2D+")"
ph2D=ph2D+" "
elif lines.find(";") > lines.find(search):
if ppdim < 2 : ppdim=2
ph2D=find_phase(lines,search)
ph2Dp=ph2D+")"
ph2D=ph2D+" "
if lines.find("F1QF") >=0:
#Check for comments
if lines.find(";") < 0 :
if ppdim < 2 : ppdim = 2
Nucl2D="_"
elif lines.find(";") > lines.find("F1QF"):
if ppdim < 2 : ppdim = 2
Nucl2D="_"
if lines.find("F2PH") >=0 or lines.find("F2EA") >=0 :
if lines.find("F2PH") >=0 : search="F2PH"
if lines.find("F2EA") >=0 : search="F2EA"
#Check for comments
if lines.find(";") < 0 :
if ppdim < 3: ppdim = 3
ph3D=find_phase(lines,search)
ph3Dp=ph3D+")"
ph3D=ph3D+" "
elif lines.find(";") > lines.find(search):
if ppdim < 3: ppdim = 3
ph3D=find_phase(lines,search)
ph3Dp=ph3D+")"
ph3D=ph3D+" "
if lines.find("F2QF") >=0:
#Check for comments
if lines.find(";") < 0 :
if ppdim < 3 : ppdim= 3
Nucl3D="_"
elif lines.find(";") > lines.find("F2QF"):
if ppdim < 3 : ppdim= 3
Nucl3D="_"
if lines.find("F3PH") >=0 or lines.find("F3EA") >=0 :
if lines.find("F3PH") >=0 : search="F3PH"
if lines.find("F3EA") >=0 : search="F3EA"
#Check for comments
if lines.find(";") < 0 :
if ppdim < 4 : ppdim= 4
ph4D=find_phase(lines,search)
ph4Dp=ph4D+")"
ph4D=ph4D+" "
elif lines.find(";") > lines.find("F3PH"):
if ppdim < 4 : ppdim= 4
ph4D=find_phase(lines,search)
ph4Dp=ph4D+")"
ph4D=ph4D+" "
if lines.find("F3QF") >=0:
#Check for comments
if lines.find(";") < 0 :
if ppdim < 4 : ppdim = 4
Nucl4D="_"
elif lines.find(";") > lines.find("F3QF"):
if ppdim < 4 : ppdim = 4
Nucl4D="_"
#TopCmds.MSG(ph2D+":2D\n"+ph3D+": 3D\n"+ph4D+": 4D")
#TopCmds.MSG(str(ppdim)+" ppdim\n"+str(ph2D)+" ph2D")
# From TPPI phases make a string corresponding to pulse nucleus
for line in text:
lines=line.rstrip()
if ppdim >= 2 :
if lines.find(ph2D) >= 0 and Nucl2D == "":
#TopCmds.MSG(lines)
Nucl2D=find_nuc(lines,ph2D)
#TopCmds.MSG(Nucl2D)
elif lines.find(ph2Dp) >=0 and Nucl2D == "":
#TopCmds.MSG(lines)
Nucl2D=find_nuc(lines,ph2Dp)
#TopCmds.MSG(Nucl2D)
if ppdim >= 3 :
if lines.find(ph3D) >= 0 and Nucl3D == "":
#TopCmds.MSG(lines)
Nucl3D=find_nuc(lines,ph3D)
#TopCmds.MSG(Nucl3D)
elif lines.find(ph3Dp) >= 0 and Nucl3D == "":
#TopCmds.MSG(lines)
Nucl3D=find_nuc(lines,ph3Dp)
#TopCmds.MSG(Nucl3D)
if ppdim >= 4 :
if lines.find(ph4D) >= 0 and Nucl4D == "":
#TopCmds.MSG(lines)
Nucl4D=find_nuc(lines,ph4D)
elif lines.find(ph4Dp) >=0 and Nucl4D == "":
#TopCmds.MSG(lines)
Nucl4D=find_nuc(lines,ph4Dp)
#TopCmds.MSG(str(ppdim)+" :ppdim\n"+Nucl2D+" :Nucl2D\n"+Nucl3D+" :Nucl3D\n"+Nucl4D+" :Nucl4D\n")
# Translate, Concatenate and return the Nucls
if ppdim >= 4 and Nucl4D.isdigit(): Nucl4D="".join(letter for letter in Nuc.list()[int(Nucl4D)-1] if letter.isalpha())
if ppdim >= 3 and Nucl3D.isdigit(): Nucl3D="".join(letter for letter in Nuc.list()[int(Nucl3D)-1] if letter.isalpha())
if ppdim >= 2 and Nucl2D.isdigit(): Nucl2D="".join(letter for letter in Nuc.list()[int(Nucl2D)-1] if letter.isalpha())
#TopCmds.MSG(Nucl1D+" :Nucl1D\n"+Nucl2D+" :Nucl2D\n"+Nucl3D+" :Nucl3D\n"+Nucl4D+" :Nucl4D\n")
if ppdim==4:
EXP= Nucl4D+Nucl3D+Nucl2D; aqseq="4321"
if ppdim==3:
if aqseq=="321": EXP= Nucl3D+Nucl2D
if aqseq=="312": EXP= Nucl2D+Nucl3D
if aqseq=="": EXP= Nucl3D+Nucl2D; aqseq="321"
if ppdim==2: EXP= Nucl2D; aqseq="21"
return ppdim, aqseq, Nucl1D+EXP
def PPNUC(dim,fre):
PP=TopCmds.GETPAR("PULPROG")
ppnuc=[]
temp_nuc=[]
k=0
for letter in range(len(PP)):
if PP[letter].isupper():
if PP[letter] != "A" and PP[letter] != "O" and PP[letter] != "X" :
k=k+1
if letter+1 < len(PP):
if PP[letter+1] == "A" or PP[letter+1] == "O" or PP[letter+1] == "a" or PP[letter+1] == "o":
ppnuc.append(PP[letter]+PP[letter+1])
else:
ppnuc.append(PP[letter])
else:
ppnuc.append(PP[letter])
#Deal with non-standard names
if k > dim:
i=0; j=0
for letter in ppnuc:
for item in BioNuc:
if item == letter :
if j == 0 or j == i-1:
temp_nuc.append(letter)
j=i
else:
temp_nuc=[]
j=0
i=i+1
if len(temp_nuc) == dim : ppnuc=temp_nuc
if k < dim:
ppnuc=[]
for i in range(dim):ppnuc.append(fre[dim-1-i])
return ppnuc, k
def PPACQ(aqseq,ppfre,ppnuc):
ppacq=[]
for i in range(len(ppfre)):
ppacq.append(ppnuc[int(aqseq[i])-1])
return ppacq
def SetInc(nuc,key,MAS):
increment=[]
SWNUC=key['CA']
TauR=1000000./MAS
for i in range(len(nuc)):
if nuc[i].name=="CA" or nuc[i].name=="Ca":SWNUC=key['CA']
if nuc[i].name=="CO" or nuc[i].name=="Co":SWNUC=key['CO']
if nuc[i].name=="C" :SWNUC=key['C']
if nuc[i].name=="N" :SWNUC=key['N']
if nuc[i].name=="H" :SWNUC=key['H']
#direct detect H exception
if i==0 and nuc[i].name=="H": SWNUC=key['Hdet']
freqhigh=nuc[i].ppm2offs(nuc[i].offsp+SWNUC/2.)
freqlow =nuc[i].ppm2offs(nuc[i].offsp-SWNUC/2.)
#TopCmds.MSG("carr "+str(nuc[i].offsp)+"\nHigh: "+str(freqhigh)+"\nLow: "+str(freqlow)+"\
#\nBF: "+str(nuc[i].bf)+"\nOffs: "+str(nuc[i].offs))
#TopCmds.MSG("carr "+str(nuc[i].offsp)+"\nBF: "+str(nuc[i].bf)+"\nOffs: "+str(nuc[i].offs))
sw = (freqhigh-freqlow)
incr=1000000./(freqhigh-freqlow)
if incr > TauR:
if nuc[i].name=="CO" or nuc[i].name=="Co":
incr=TauR*math.floor(incr/TauR)
else :
incr=TauR*math.floor(2*incr/TauR)
if incr < TauR:
incr=TauR/math.floor(TauR/incr)
increment.append(incr)
return increment
def find_nuc(lines,search):
nuc=""
if lines.find("=") < 0:
i=lines.find(search)
j=lines[i:].find(":")
#TopCmds.MSG("found "+search+ "at position #"+str(i+j)+"\n"+lines)
k=1
nuc=lines[i+j+k]
while nuc==" " and i+j+k+1 <= len(lines[i+j+k:]):
k=k+1
nuc=lines[i+j+k+1]
if nuc=="f":
nuc=lines[i+j+k+1]
return nuc
def find_phase(lines,search):
#Topspin 3 notation
if lines.find("calph") >=0 :
i=lines.find(search)
j=lines[i:].find("calph")
k=lines[i+j:].find("(")+1
l=lines[i+j+k:].find(",")
ph=lines[i+j+k:i+j+k+l]
if ph.find(","):
Phase=ph[:len(ph)]
else:
Phase=ph
#Topspin 2 MC notation
else:
i=lines.find(search)
j=lines[i:].find("(")+1
k=lines[i+j:].find(",")
#TopCmds.MSG(str(i)+": i\n"+str(j)+": j\n"+str(k)+": k\n")
#TopCmds.MSG(lines[i+j:i+j+k])
#get first instance of incrementing phase
if lines[i+j:i+j+k].find("ip")>=0:
l=lines[i+j:i+j+k].find("ip")
ph=lines[i+j+l+1:i+j+l+3]
if ph.find(","):ph=ph[:2]
Phase=ph[:1]+"h"+ph[1:]
#get first instance of decrementing phase
if lines[i+j:i+j+k].find("dp")>=0:
l=lines[i+j:i+j+k].find("dp")
ph=lines[i+j+l+1:i+j+l+3]
if ph.find(","):ph=ph[:2]
Phase=ph[:1]+"h"+ph[1:]
#TopCmds.MSG(search+" "+Phase)
return Phase
def get_dir(Search):
waves = []
lines = []
l = []
target =''
name=root.UtilPath.getCurdir()+'/parfile-dirs.prop'
defaultdir=root.UtilPath.getTopspinHome()+'/exp/stan/nmr/'
f = open(name, 'r')
text=f.readlines()
f.close()
i=0
for line in text:
lines = line.rstrip()
if lines.find(Search) >=0:
j=lines.find('=')
Shapes=lines[j+1:]
#print(Shapes)
i=0
while i <= len(Shapes):
#print(Shapes[i:i+1])
if Shapes[i:i+1].find(';') >= 0 :
l.append(i)
i=i+1
j=0
k=0
while k <= (len(l)-1) :
waves.append(Shapes[j:l[k]])
j=l[k]+1
k=k+1
waves.append(Shapes[j:])
#TopCmds.MSG(str(waves))
k=0
while k <= (len(waves)-1) :
if waves[k][0:1] != '/' :
#print (waves[k])
waves[k]=str(defaultdir + waves[k])
k=k+1
#TopCmds.MSG(str(waves))
#print(waves)
return waves
def find_file(dirs,name):
found=0
i=0
path=''
while i <= (len(dirs) - 1):
print (dirs[i], found )
if found == 0:
search = str(dirs[i] + '/' + name)
"""
TopCmds.MSG("This is here to remind you that the os package is removed")
found=1
path=search
"""
if os.path.exists(search) == 1:
found = 1
path = search
i=i+1
if found == 0:
TopCmds.MSG("File named " + name + " not found\n Exiting")
TopCmds.EXIT()
return path
def PathAcqNuc(dim):
dataset=TopCmds.CURDATA()
path=dataset[3]+'/'+ dataset[0]+'/'+dataset[1]
text=""
if dim <= 1:
acqu=path+'/acqu'
if dim > 1:
acqu=path+'/acqu'+str(dim)
return acqu
def GetFnMODE(dim):
acqu=PathAcqNuc(dim)
if os.path.exists(acqu) == 1:
f=open(acqu, 'r')
text=f.readlines()
f.close()
for line in text:
lines = line.rstrip()
if lines.find('##$FnMODE') >=0:
j=lines.find('=')
ThingIWant="".join(letter for letter in lines[j+1:] if letter.isdigit())
#print ThingIWant
return ThingIWant
def GetAcqNUC1(dim):
#TopCmds.MSG(PathAcqNuc(dim))
acqu=PathAcqNuc(dim)
j=0
k=1
lines=" "
if os.path.exists(acqu) == 1:
f=open(acqu, 'r')
text=f.readlines()
f.close()
for line in text:
lines = line.rstrip()
if lines.find('##$NUC1') >=0:
j=lines.find('<')
k=lines.find('>')
ThingIWant=lines[j+1:k]
return ThingIWant
def SetAcqNUC1(dim,Nucl):
acqu=PathAcqNuc(dim)
acqus=acqu+"s"
acqu_bak=PathAcqNuc(dim)+".bak"
j=0
k=1
lines=" "
newline=[]
if os.path.exists(acqu) == 1:
#TopCmds.MSG("It opened "+str(dim))
f=open(acqu, 'r')
text=f.readlines()
f.close()
if os.path.exists(acqu_bak):os.remove(acqu_bak)
os.rename(acqu,acqu_bak)
file=open(acqu,'w')
for line in text:
lines = line.rstrip()
if lines.find('##$NUC1') >=0:
newline.append("##$NUC1= <"+Nucl+">\n")
else:
newline.append(line)
for line in newline:
file.write(line)
file.close()
return
def SetFnMODE(dim,mode):
#TopCmds.MSG(str(dim)+" "+mode)
acqu=PathAcqNuc(dim)
acqus=acqu+"s"
acqu_bak=PathAcqNuc(dim)+".bak"
j=0
k=1
lines=" "
newline=[]
if os.path.exists(acqu) == 1:
f=open(acqu, 'r')
text=f.readlines()
f.close()
if os.path.exists(acqu_bak):os.remove(acqu_bak)
os.rename(acqu,acqu_bak)
file=open(acqu,'w')
for line in text:
lines = line.rstrip()
if lines.find('##$FnMODE') >=0:
newline.append("##$FnMODE= "+str(mode)+"\n")
else:
newline.append(line)
for line in newline:
file.write(line)
file.close()
return
| |
# coding=utf-8
# Copyright 2022 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scheduled Sampling.
This module implemented scheduled sampling as described in (Bengio et al, 2015).
The entry points are two functions,
`sequential_scheduled_sampling_for_t2tmodel()`:
scheduled sampling adapted to instances of T2TModel.
`sequential_scheduled_sampling()`:
raw implementation of scheduled sampling. May be used independent of T2T.
**WARNING** This code is VERY slow. Its runtime is at least O(n^2) for
sequences of length n. For models with self-attention, its runtime is O(n^3).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensor2tensor.layers import common_layers
import tensorflow.compat.v1 as tf
from tensorflow.python.ops import inplace_ops # pylint: disable=g-direct-tensorflow-import
def sequential_scheduled_sampling_for_t2tmodel(t2tmodel, features):
"""Schedule Sampling for T2TModels.
Args:
t2tmodel: T2TModel instance.
features: {str: Tensor}. Input features.
Returns:
ss_logits: [batch_size, seq_len, 1, 1, vocab_size].
losses_dict: {str: scalar Tensor}. Losses to minimize.
"""
targets = features["targets"]
targets_size = common_layers.shape_list(targets)
batch_size = targets_size[0]
seq_len = targets_size[1]
targets = tf.reshape(targets, [batch_size, seq_len])
adapter = ScheduledSamplingAdapter(t2tmodel, features)
ss_tokens, ss_logits, losses_dict = sequential_scheduled_sampling(
infer_fn=adapter.infer_fn,
mix_fn=adapter.mix_fn,
loss_fn=adapter.loss_fn,
targets=targets)
_ = ss_tokens # unused.
targets_vocab_size = t2tmodel.problem_hparams.vocab_size["targets"]
ss_logits = tf.reshape(ss_logits,
[batch_size, seq_len, 1, 1, targets_vocab_size])
return ss_logits, losses_dict
def sequential_scheduled_sampling(infer_fn, mix_fn, loss_fn, targets):
"""Scheduled Sampling.
Args:
infer_fn: Function. Computes logits for all timesteps.
mix_fn: Function. Mixes gold and sample tokens.
loss_fn: Function. Computes loss between gold tokens and logits.
targets: Tensor of shape [batch_size, seq_len]. Gold tokens.
Returns:
ss_tokens: Tensor of shape [batch_size, seq_len]. Scheduled sampling tokens.
ss_logits: Tensor of shape [batch_size, seq_len, vocab_size]. Logits for
next token when conditioning on ss_tokens.
losses_dict: {str: scalar Tensor}. Losses to optimize.
"""
targets_shape = common_layers.shape_list(targets)
batch_size = targets_shape[0]
seq_len = targets_shape[1]
if not targets.shape.is_fully_defined():
# TODO(duckworthd): When running on GPU, I get the following error. Solve
# it to enable use on other devices.
#
# Cannot use 'Identity_186' as input to
# 'transformer/parallel_0_7/transformer/transformer/symbol_modality_16282_512/shared/convert_gradient_to_tensor_HBc3xYw22Mw'
# because 'Identity_186' is in a while loop.
raise ValueError(
"The following code only works on TPU. As targets.shape isn't fully "
"defined, I am assuming you are using a different device.")
def cond_fn(i, ss_tokens):
"""True if i < seq_len."""
_ = ss_tokens
return i < seq_len
def body_fn(i, ss_tokens):
"""Constructs conditioning tokens for scheduled sampling."""
# next_token_logits depends on timesteps 0...i-1.
#
# [batch_size, seq_len] -> [batch_size, seq_len, vocab_size]
ss_tokens_logits = infer_fn(ss_tokens)
# Same as 'next_token_logits = ss_tokens_logits[:, i, :]'.
vocab_size = common_layers.shape_list(ss_tokens_logits)[2]
next_token_logits = tf.slice(
ss_tokens_logits, begin=[0, i, 0], size=[batch_size, 1, vocab_size])
next_token_logits = tf.squeeze(next_token_logits, axis=[1])
# [batch_size, vocab_size] -> [batch_size]
sampled_next_tokens = _sample_next_tokens(next_token_logits)
# Same as 'gold_next_tokens = targets[:, i]'.
gold_next_tokens = tf.slice(targets, begin=[0, i], size=[batch_size, 1])
gold_next_tokens = tf.squeeze(gold_next_tokens, axis=[1])
next_tokens = mix_fn(gold_next_tokens, sampled_next_tokens)
ss_tokens = _update_timestep(ss_tokens, timestep=i, values=next_tokens)
return i+1, tf.stop_gradient(ss_tokens)
# tf.while_loop() over all timesteps. Generate scheduled sampling tokens.
i = 0
ss_tokens = tf.zeros([batch_size, seq_len], dtype=tf.int32)
i, ss_tokens = tf.while_loop(cond_fn, body_fn, [i, ss_tokens])
ss_logits = infer_fn(ss_tokens)
return ss_tokens, ss_logits, loss_fn(targets, ss_logits)
def _mix_tokens(p_sample, gold_targets, sampled_targets):
"""Interleave sampled and gold tokens randomly.
Args:
p_sample: float in [0, 1]. Probability a token will come from
'sampled_targets'. 0 means all-gold, 1 means all-sampled.
gold_targets: Tensor. Gold token IDs.
sampled_targets: Tensor. Sampled token IDs. Same shape as 'gold_targets'.
Returns:
Tensor of same shape as 'gold_targets' containing a mix of tokens from
'gold_targets' and 'sampled_targets'.
"""
targets_shape = common_layers.shape_list(sampled_targets)
return tf.where(
tf.less(tf.random_uniform(targets_shape), p_sample),
sampled_targets, gold_targets)
def _sample_next_tokens(logits):
"""Sample tokens for next timestep."""
batch_size = common_layers.shape_list(logits)[0]
next_tokens = tf.random.categorical(logits, 1)
next_tokens = tf.cast(next_tokens, tf.int32)
next_tokens = tf.reshape(next_tokens, [batch_size])
return next_tokens
def _update_timestep(x, timestep, values):
"""Set x[:, timestep] = values.
This operation is **NOT** differentiable.
Args:
x: Tensor of shape [batch_size, seq_len, ...]
timestep: int or scalar Tensor. Index to update in x.
values: Tensor of shape [batch_size, ...]. New values for x[:, i].
Returns:
Copy of 'x' after setting x[:, timestep] = values.
"""
perm = range(x.shape.ndims)
perm[0], perm[1] = perm[1], perm[0]
x = tf.transpose(x, perm)
x = inplace_ops.alias_inplace_update(x, timestep, values)
x = tf.transpose(x, perm)
return x
def inverse_decay_mix_prob(warmup_schedule_name, p_max, num_warmup_steps):
"""Interpolate from 0.001 to 'p_max' over 'num_warmup_steps'."""
warmup_schedule_fn = {
"exp": common_layers.inverse_exp_decay,
"linear": common_layers.inverse_lin_decay,
"sigmoid": common_layers.inverse_sigmoid_decay,
}[warmup_schedule_name]
return p_max * warmup_schedule_fn(num_warmup_steps, min_value=0.001)
class ScheduledSamplingAdapter(object):
"""Adapts T2TModel for sequential_scheduled_sampling()."""
def __init__(self, t2tmodel, features):
self._t2tmodel = t2tmodel
self._features = features
hparams = self._t2tmodel.hparams
assert hparams.mode == tf.estimator.ModeKeys.TRAIN, hparams.mode
def infer_fn(self, partial_targets):
"""Computes logits for all timesteps.
Args:
partial_targets: [batch_size, seq_len]. Targets to condition on.
Returns:
next_token_logits: [batch_size, seq_len, vocab_size]
"""
batch_size, seq_len = common_layers.shape_list(partial_targets)
partial_targets = tf.reshape(partial_targets, [batch_size, seq_len, 1, 1])
features = copy.copy(self._features)
features["targets"] = partial_targets
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
transformed_features = self._t2tmodel.bottom(features)
with tf.variable_scope("body"):
body_outputs, losses = self._t2tmodel._normalize_body_output( # pylint: disable=protected-access
self._t2tmodel.body(transformed_features))
assert losses == {"extra": 0.0}, (
"Auxiliary losses are not propagated in this code. %s"
% (losses,))
logits = self._t2tmodel.top(body_outputs, features)
vocab_size = self._t2tmodel.problem_hparams.vocab_size["targets"]
logits = tf.reshape(logits, [batch_size, seq_len, vocab_size])
return logits
def mix_fn(self, gold_tokens, sampled_tokens):
"""Mixes gold and sampled tokens randomly."""
hparams = self._t2tmodel.hparams
p_sample = inverse_decay_mix_prob(
hparams.scheduled_sampling_warmup_schedule,
hparams.scheduled_sampling_gold_mixin_prob,
hparams.scheduled_sampling_warmup_steps)
return _mix_tokens(
p_sample=p_sample,
gold_targets=gold_tokens,
sampled_targets=sampled_tokens)
def loss_fn(self, targets, logits):
"""Constructs loss dict.
Args:
targets: [batch_size, seq_len]
logits: [batch_size, seq_len, vocab_size]
Returns:
{str: Tensor of shape []}. Losses.
"""
batch_size, seq_len, vocab_size = common_layers.shape_list(logits)
targets = tf.reshape(targets, [batch_size, seq_len, 1, 1])
logits = tf.reshape(logits, [batch_size, seq_len, 1, 1, vocab_size])
features = copy.copy(self._features)
features["targets"] = targets
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
losses = {
"training": self._t2tmodel.loss(logits, features),
}
return losses
| |
# BEGIN_COPYRIGHT
#
# Copyright 2009-2013 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
# DEV NOTE: this module is used by the setup script, so it MUST NOT
# rely on extension modules.
import os, subprocess as sp, glob, re
import xml.dom.minidom as dom
from xml.parsers.expat import ExpatError
try:
from config import DEFAULT_HADOOP_HOME
except ImportError: # should only happen at compile time
DEFAULT_HADOOP_HOME = None
class HadoopVersionError(Exception):
def __init__(self, version_str):
self.value = "unrecognized version string format: %r" % (version_str,)
def __str__(self):
return repr(self.value)
class HadoopXMLError(Exception):
pass
class HadoopVersion(object):
"""
Stores Hadoop version information.
Hadoop version strings are in the <MAIN>-<REST> format, where <MAIN>
is in the typical dot-separated integers format, while <REST> is
subject to a higher degree of variation. Examples: '0.20.2',
'0.20.203.0', '0.20.2-cdh3u4', '1.0.4-SNAPSHOT', '2.0.0-mr1-cdh4.1.0'.
The constructor parses the version string and stores:
* a ``main`` attribute for the <MAIN> part;
* a ``cdh`` attribute for the cdh part, if present;
* an ``ext`` attribute for other appendages, if present.
If the version string is not in the expected format, it raises
``HadoopVersionError``. For consistency, all attributes are stored
as tuples.
"""
def __init__(self, version_str):
self.__str = version_str
version = self.__str.split("-", 1)
try:
self.main = tuple(map(int, version[0].split(".")))
except ValueError:
raise HadoopVersionError(self.__str)
if len(version) > 1:
self.cdh, self.ext = self.__parse_rest(version[1])
else:
self.cdh = self.ext = ()
self.__tuple = self.main + self.cdh + self.ext
def __parse_rest(self, rest_str):
rest = rest_str.split("-", 1)
rest.reverse()
m = re.match(r"cdh(.+)", rest[0])
if m is None:
return (), tuple(rest)
cdh_version_str = m.groups()[0]
m = re.match(r"(\d+)u(\d+)", cdh_version_str)
if m is None:
cdh_version = cdh_version_str.split(".")
else:
cdh_version = m.groups()
try:
cdh_version = tuple(map(int, cdh_version))
except ValueError:
raise HadoopVersionError(self.__str)
return cdh_version, tuple(rest[1:])
def is_cloudera(self):
return bool(self.cdh)
def has_security(self):
return self.cdh >= (3, 0, 0) or self.main >= (0, 20, 203)
@property
def tuple(self):
return self.__tuple
def __lt__(self, other): return self.tuple.__lt__(other.tuple)
def __le__(self, other): return self.tuple.__le__(other.tuple)
def __eq__(self, other): return self.tuple.__eq__(other.tuple)
def __ne__(self, other): return self.tuple.__ne__(other.tuple)
def __gt__(self, other): return self.tuple.__gt__(other.tuple)
def __ge__(self, other): return self.tuple.__ge__(other.tuple)
def tag(self):
parts = self.main
if self.cdh:
parts += ("cdh",) + self.cdh
if self.ext:
parts += self.ext
return "_".join(map(str, parts))
def __str__(self):
return self.__str
def cdh_mr1_version(version):
if not isinstance(version, HadoopVersion):
version = HadoopVersion(version)
rtype = str
else:
rtype = HadoopVersion
if not version.is_cloudera():
raise ValueError("%r is not a cdh version string" % (str(version),))
mr1_version_str = str(version).replace("cdh", "mr1-cdh")
return rtype(mr1_version_str)
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
def is_readable(fpath):
return os.path.exists(fpath) and os.access(fpath, os.R_OK)
def first_dir_in_glob(pattern):
for path in sorted(glob.glob(pattern)):
if os.path.isdir(path):
return path
def extract_text(node):
return "".join(
c.data.strip() for c in node.childNodes if c.nodeType == c.TEXT_NODE
)
def parse_hadoop_conf_file(fn):
items = []
try:
doc = dom.parse(fn)
except ExpatError as e:
raise HadoopXMLError("not a valid XML file (%s)" % e)
conf = doc.documentElement
if conf.nodeName != "configuration":
raise HadoopXMLError("not a valid Hadoop configuration file")
props = [n for n in conf.childNodes if n.nodeName == "property"]
nv = {}
for p in props:
for n in p.childNodes:
if n.childNodes:
nv[n.nodeName] = extract_text(n)
try:
items.append((nv["name"], nv["value"]))
except KeyError:
pass
return dict(items)
def hadoop_home_from_path():
for path in os.getenv("PATH", "").split(os.pathsep):
if is_exe(os.path.join(path, 'hadoop')):
return os.path.dirname(path)
class PathFinder(object):
"""
Encapsulates the logic to find paths and other info required by Pydoop.
"""
CDH_HADOOP_EXEC = "/usr/bin/hadoop"
CDH_HADOOP_HOME_PKG = "/usr/lib/hadoop"
CDH_HADOOP_HOME_PARCEL = first_dir_in_glob(
"/opt/cloudera/parcels/CDH-*/lib/hadoop"
)
def __init__(self):
self.__hadoop_home = None
self.__hadoop_exec = None
self.__hadoop_conf = None
self.__hadoop_version = None # str
self.__hadoop_version_info = None # HadoopVersion
self.__is_cloudera = None
self.__hadoop_params = None
self.__hadoop_classpath = None
def reset(self):
self.__init__()
@staticmethod
def __error(what, env_var):
raise ValueError("%s not found, try setting %s" % (what, env_var))
def hadoop_home(self, fallback=DEFAULT_HADOOP_HOME):
if not self.__hadoop_home:
self.__hadoop_home = (
os.getenv("HADOOP_HOME") or
fallback or
first_dir_in_glob("/usr/lib/hadoop*") or
first_dir_in_glob("/opt/hadoop*") or
hadoop_home_from_path()
)
if not self.__hadoop_home:
PathFinder.__error("hadoop home", "HADOOP_HOME")
return self.__hadoop_home
def hadoop_exec(self, hadoop_home=None):
if not self.__hadoop_exec:
# allow overriding of package-installed hadoop exec
if not (hadoop_home or os.getenv("HADOOP_HOME")):
if is_exe(self.CDH_HADOOP_EXEC):
self.__hadoop_exec = self.CDH_HADOOP_EXEC
else:
fn = os.path.join(hadoop_home or self.hadoop_home(), "bin", "hadoop")
if is_exe(fn):
self.__hadoop_exec = fn
if not self.__hadoop_exec:
PathFinder.__error("hadoop executable", "HADOOP_HOME or PATH")
return self.__hadoop_exec
def hadoop_version(self, hadoop_home=None):
if not self.__hadoop_version:
try:
self.__hadoop_version = os.environ["HADOOP_VERSION"]
except KeyError:
try:
hadoop = self.hadoop_exec(hadoop_home)
except ValueError:
pass
else:
try:
env = os.environ.copy()
env.pop("HADOOP_HOME", None)
p = sp.Popen(
[hadoop, "version"], stdout=sp.PIPE, stderr=sp.PIPE, env=env,
)
out, err = p.communicate()
if p.returncode:
raise RuntimeError(err or out)
self.__hadoop_version = out.splitlines()[0].split()[-1]
except (OSError, IndexError):
pass
if not self.__hadoop_version:
PathFinder.__error("hadoop version", "HADOOP_VERSION")
return self.__hadoop_version
def hadoop_version_info(self, hadoop_home=None):
if not self.__hadoop_version_info:
self.__hadoop_version_info = HadoopVersion(
self.hadoop_version(hadoop_home)
)
return self.__hadoop_version_info
def cloudera(self, version=None, hadoop_home=None):
if not self.__is_cloudera:
version_info = HadoopVersion(version or self.hadoop_version(hadoop_home))
self.__is_cloudera = version_info.is_cloudera()
return self.__is_cloudera
def hadoop_conf(self, hadoop_home=None):
if not self.__hadoop_conf:
try:
self.__hadoop_conf = os.environ["HADOOP_CONF_DIR"]
except KeyError:
if self.cloudera():
candidate = '/etc/hadoop/conf'
else:
candidate = os.path.join(hadoop_home or self.hadoop_home(), 'conf')
if os.path.isdir(candidate):
self.__hadoop_conf = candidate
if not self.__hadoop_conf:
PathFinder.__error("hadoop conf dir", "HADOOP_CONF_DIR")
os.environ["HADOOP_CONF_DIR"] = self.__hadoop_conf
return self.__hadoop_conf
def hadoop_params(self, hadoop_conf=None, hadoop_home=None):
if not self.__hadoop_params:
params = {}
if not hadoop_conf:
hadoop_conf = self.hadoop_conf(hadoop_home)
for n in "hadoop", "core", "hdfs", "mapred":
fn = os.path.join(hadoop_conf, "%s-site.xml" % n)
try:
params.update(parse_hadoop_conf_file(fn))
except (IOError, HadoopXMLError) as e:
pass # silently ignore, as in Hadoop
self.__hadoop_params = params
return self.__hadoop_params
def hadoop_classpath(self, hadoop_home=None):
if hadoop_home is None:
hadoop_home = self.hadoop_home()
if not self.__hadoop_classpath:
v = self.hadoop_version_info(hadoop_home)
if not v.cdh or v.cdh < (4, 0, 0):
self.__hadoop_classpath = ':'.join(
glob.glob(os.path.join(hadoop_home, 'hadoop*.jar')) +
glob.glob(os.path.join(hadoop_home, 'lib', '*.jar'))
)
else: # FIXME: this does not cover from-tarball installation
if os.path.isdir(self.CDH_HADOOP_HOME_PKG):
hadoop_home = self.CDH_HADOOP_HOME_PKG
elif os.path.isdir(self.CDH_HADOOP_HOME_PARCEL or ""):
hadoop_home = self.CDH_HADOOP_HOME_PARCEL
else:
raise RuntimeError("unsupported CDH deployment")
mr1_home = "%s-0.20-mapreduce" % hadoop_home
self.__hadoop_classpath = ':'.join(
glob.glob(os.path.join(hadoop_home, 'client', '*.jar')) +
glob.glob(os.path.join(hadoop_home, 'hadoop-annotations*.jar')) +
glob.glob(os.path.join(mr1_home, 'hadoop*.jar'))
)
return self.__hadoop_classpath
def find(self):
info = {}
for a in (
"hadoop_exec",
"hadoop_version_info",
"hadoop_home",
"hadoop_conf",
"hadoop_params",
"hadoop_classpath",
):
try:
info[a] = getattr(self, a)()
except ValueError:
info[a] = None
return info
| |
#
# Aodh documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 27 11:38:59 2011.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT = os.path.abspath(os.path.join(BASE_DIR, "..", ".."))
sys.path.insert(0, ROOT)
sys.path.insert(0, BASE_DIR)
# This is required for ReadTheDocs.org, but isn't a bad idea anyway.
os.environ['DJANGO_SETTINGS_MODULE'] = 'openstack_dashboard.settings'
def write_autodoc_index():
def find_autodoc_modules(module_name, sourcedir):
"""Return a list of modules in the SOURCE directory."""
modlist = []
os.chdir(os.path.join(sourcedir, module_name))
print("SEARCHING %s" % sourcedir)
for root, dirs, files in os.walk("."):
for filename in files:
if filename.endswith(".py"):
# remove the pieces of the root
elements = root.split(os.path.sep)
# replace the leading "." with the module name
elements[0] = module_name
# and get the base module name
base, extension = os.path.splitext(filename)
if not (base == "__init__"):
elements.append(base)
result = ".".join(elements)
#print result
modlist.append(result)
return modlist
RSTDIR = os.path.abspath(os.path.join(BASE_DIR, "sourcecode"))
SRCS = {'aodh': ROOT}
EXCLUDED_MODULES = ('aodh.tests')
CURRENT_SOURCES = {}
if not(os.path.exists(RSTDIR)):
os.mkdir(RSTDIR)
CURRENT_SOURCES[RSTDIR] = ['autoindex.rst']
INDEXOUT = open(os.path.join(RSTDIR, "autoindex.rst"), "w")
INDEXOUT.write("=================\n")
INDEXOUT.write("Source Code Index\n")
INDEXOUT.write("=================\n")
for modulename, path in SRCS.items():
sys.stdout.write("Generating source documentation for %s\n" %
modulename)
INDEXOUT.write("\n%s\n" % modulename.capitalize())
INDEXOUT.write("%s\n" % ("=" * len(modulename),))
INDEXOUT.write(".. toctree::\n")
INDEXOUT.write(" :maxdepth: 1\n")
INDEXOUT.write("\n")
MOD_DIR = os.path.join(RSTDIR, modulename)
CURRENT_SOURCES[MOD_DIR] = []
if not(os.path.exists(MOD_DIR)):
os.mkdir(MOD_DIR)
for module in find_autodoc_modules(modulename, path):
if any([module.startswith(exclude)
for exclude
in EXCLUDED_MODULES]):
print("Excluded module %s." % module)
continue
mod_path = os.path.join(path, *module.split("."))
generated_file = os.path.join(MOD_DIR, "%s.rst" % module)
INDEXOUT.write(" %s/%s\n" % (modulename, module))
# Find the __init__.py module if this is a directory
if os.path.isdir(mod_path):
source_file = ".".join((os.path.join(mod_path, "__init__"),
"py",))
else:
source_file = ".".join((os.path.join(mod_path), "py"))
CURRENT_SOURCES[MOD_DIR].append("%s.rst" % module)
# Only generate a new file if the source has changed or we don't
# have a doc file to begin with.
if not os.access(generated_file, os.F_OK) or \
os.stat(generated_file).st_mtime < \
os.stat(source_file).st_mtime:
print("Module %s updated, generating new documentation." \
% module)
FILEOUT = open(generated_file, "w")
header = "The :mod:`%s` Module" % module
FILEOUT.write("%s\n" % ("=" * len(header),))
FILEOUT.write("%s\n" % header)
FILEOUT.write("%s\n" % ("=" * len(header),))
FILEOUT.write(".. automodule:: %s\n" % module)
FILEOUT.write(" :members:\n")
FILEOUT.write(" :undoc-members:\n")
FILEOUT.write(" :show-inheritance:\n")
FILEOUT.write(" :noindex:\n")
FILEOUT.close()
INDEXOUT.close()
# Delete auto-generated .rst files for sources which no longer exist
for directory, subdirs, files in list(os.walk(RSTDIR)):
for old_file in files:
if old_file not in CURRENT_SOURCES.get(directory, []):
print("Removing outdated file for %s" % old_file)
os.remove(os.path.join(directory, old_file))
write_autodoc_index()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings.
# They can be extensions coming with Sphinx (named 'sphinx.ext.*')
# or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinxcontrib.autohttp.flask',
'wsmeext.sphinxext',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.viewcode',
'sphinxcontrib.pecanwsme.rest',
'oslosphinx',
'sphinxcontrib.docbookrestapi.setup'
]
wsme_protocols = ['restjson', 'restxml']
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
if os.getenv('HUDSON_PUBLISH_DOCS'):
templates_path = ['_ga', '_templates']
else:
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Aodh'
copyright = u'2012-2015, OpenStack Foundation'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['**/#*', '**~', '**/#*#']
# The reST default role (used for this markup: `text`)
# to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
primary_domain = 'py'
nitpicky = False
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme_path = ['.']
# html_theme = '_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"nosidebar": "false"
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1"
html_last_updated_fmt = os.popen(git_cmd).read()
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Aodhdoc'
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'Aodh.tex', u'Aodh Documentation',
u'OpenStack Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'aodh', u'Aodh Documentation',
[u'OpenStack'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Aodh', u'Aodh Documentation', u'OpenStack',
'Aodh', 'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output --------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Aodh'
epub_author = u'OpenStack'
epub_publisher = u'OpenStack'
epub_copyright = u'2012-2015, OpenStack'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be an ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import uuid
from keystoneclient.common import cms
from oslo_serialization import jsonutils
import six
from six.moves import http_client
from testtools import matchers
from keystone.common import extension as keystone_extension
import keystone.conf
from keystone.tests import unit
from keystone.tests.unit import default_fixtures
from keystone.tests.unit import ksfixtures
from keystone.tests.unit import rest
from keystone.tests.unit.schema import v2
CONF = keystone.conf.CONF
class CoreApiTests(object):
def assertValidError(self, error):
self.assertIsNotNone(error.get('code'))
self.assertIsNotNone(error.get('title'))
self.assertIsNotNone(error.get('message'))
def assertValidVersion(self, version):
self.assertIsNotNone(version)
self.assertIsNotNone(version.get('id'))
self.assertIsNotNone(version.get('status'))
self.assertIsNotNone(version.get('updated'))
def assertValidExtension(self, extension):
self.assertIsNotNone(extension)
self.assertIsNotNone(extension.get('name'))
self.assertIsNotNone(extension.get('namespace'))
self.assertIsNotNone(extension.get('alias'))
self.assertIsNotNone(extension.get('updated'))
def assertValidExtensionLink(self, link):
self.assertIsNotNone(link.get('rel'))
self.assertIsNotNone(link.get('type'))
self.assertIsNotNone(link.get('href'))
def assertValidTenant(self, tenant):
self.assertIsNotNone(tenant.get('id'))
self.assertIsNotNone(tenant.get('name'))
self.assertNotIn('domain_id', tenant)
self.assertNotIn('parent_id', tenant)
def assertValidUser(self, user):
self.assertIsNotNone(user.get('id'))
self.assertIsNotNone(user.get('name'))
def assertValidRole(self, tenant):
self.assertIsNotNone(tenant.get('id'))
self.assertIsNotNone(tenant.get('name'))
def test_public_not_found(self):
r = self.public_request(
path='/%s' % uuid.uuid4().hex,
expected_status=http_client.NOT_FOUND)
self.assertValidErrorResponse(r)
def test_admin_not_found(self):
r = self.admin_request(
path='/%s' % uuid.uuid4().hex,
expected_status=http_client.NOT_FOUND)
self.assertValidErrorResponse(r)
def test_public_multiple_choice(self):
r = self.public_request(path='/', expected_status=300)
self.assertValidMultipleChoiceResponse(r)
def test_admin_multiple_choice(self):
r = self.admin_request(path='/', expected_status=300)
self.assertValidMultipleChoiceResponse(r)
def test_public_version(self):
r = self.public_request(path='/v2.0/')
self.assertValidVersionResponse(r)
def test_admin_version(self):
r = self.admin_request(path='/v2.0/')
self.assertValidVersionResponse(r)
def test_public_extensions(self):
r = self.public_request(path='/v2.0/extensions')
self.assertValidExtensionListResponse(
r, keystone_extension.PUBLIC_EXTENSIONS)
def test_admin_extensions(self):
r = self.admin_request(path='/v2.0/extensions')
self.assertValidExtensionListResponse(
r, keystone_extension.ADMIN_EXTENSIONS)
def test_admin_extensions_returns_not_found(self):
self.admin_request(path='/v2.0/extensions/invalid-extension',
expected_status=http_client.NOT_FOUND)
def test_public_osksadm_extension_returns_not_found(self):
self.public_request(path='/v2.0/extensions/OS-KSADM',
expected_status=http_client.NOT_FOUND)
def test_admin_osksadm_extension(self):
r = self.admin_request(path='/v2.0/extensions/OS-KSADM')
self.assertValidExtensionResponse(
r, keystone_extension.ADMIN_EXTENSIONS)
def test_authenticate(self):
r = self.public_request(
method='POST',
path='/v2.0/tokens',
body={
'auth': {
'passwordCredentials': {
'username': self.user_foo['name'],
'password': self.user_foo['password'],
},
'tenantId': self.tenant_bar['id'],
},
},
expected_status=http_client.OK)
self.assertValidAuthenticationResponse(r, require_service_catalog=True)
def test_authenticate_unscoped(self):
r = self.public_request(
method='POST',
path='/v2.0/tokens',
body={
'auth': {
'passwordCredentials': {
'username': self.user_foo['name'],
'password': self.user_foo['password'],
},
},
},
expected_status=http_client.OK)
self.assertValidAuthenticationResponse(r)
def test_get_tenants_for_token(self):
r = self.public_request(path='/v2.0/tenants',
token=self.get_scoped_token())
self.assertValidTenantListResponse(r)
def test_validate_token(self):
token = self.get_scoped_token()
r = self.admin_request(
path='/v2.0/tokens/%(token_id)s' % {
'token_id': token,
},
token=token)
self.assertValidAuthenticationResponse(r)
def test_invalid_token_returns_not_found(self):
token = self.get_scoped_token()
self.admin_request(
path='/v2.0/tokens/%(token_id)s' % {
'token_id': 'invalid',
},
token=token,
expected_status=http_client.NOT_FOUND)
def test_validate_token_service_role(self):
self.md_foobar = self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'],
self.tenant_service['id'],
self.role_service['id'])
token = self.get_scoped_token(
tenant_id=default_fixtures.SERVICE_TENANT_ID)
r = self.admin_request(
path='/v2.0/tokens/%s' % token,
token=token)
self.assertValidAuthenticationResponse(r)
def test_remove_role_revokes_token(self):
self.md_foobar = self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'],
self.tenant_service['id'],
self.role_service['id'])
token = self.get_scoped_token(
tenant_id=default_fixtures.SERVICE_TENANT_ID)
r = self.admin_request(
path='/v2.0/tokens/%s' % token,
token=token)
self.assertValidAuthenticationResponse(r)
self.assignment_api.remove_role_from_user_and_project(
self.user_foo['id'],
self.tenant_service['id'],
self.role_service['id'])
r = self.admin_request(
path='/v2.0/tokens/%s' % token,
token=token,
expected_status=http_client.UNAUTHORIZED)
def test_validate_token_belongs_to(self):
token = self.get_scoped_token()
path = ('/v2.0/tokens/%s?belongsTo=%s' % (token,
self.tenant_bar['id']))
r = self.admin_request(path=path, token=token)
self.assertValidAuthenticationResponse(r, require_service_catalog=True)
def test_validate_token_no_belongs_to_still_returns_catalog(self):
token = self.get_scoped_token()
path = ('/v2.0/tokens/%s' % token)
r = self.admin_request(path=path, token=token)
self.assertValidAuthenticationResponse(r, require_service_catalog=True)
def test_validate_token_head(self):
"""The same call as above, except using HEAD.
There's no response to validate here, but this is included for the
sake of completely covering the core API.
"""
token = self.get_scoped_token()
self.admin_request(
method='HEAD',
path='/v2.0/tokens/%(token_id)s' % {
'token_id': token,
},
token=token,
expected_status=http_client.OK)
def test_endpoints(self):
token = self.get_scoped_token()
r = self.admin_request(
path='/v2.0/tokens/%(token_id)s/endpoints' % {
'token_id': token,
},
token=token)
self.assertValidEndpointListResponse(r)
def test_get_tenant(self):
token = self.get_scoped_token()
r = self.admin_request(
path='/v2.0/tenants/%(tenant_id)s' % {
'tenant_id': self.tenant_bar['id'],
},
token=token)
self.assertValidTenantResponse(r)
def test_get_tenant_by_name(self):
token = self.get_scoped_token()
r = self.admin_request(
path='/v2.0/tenants?name=%(tenant_name)s' % {
'tenant_name': self.tenant_bar['name'],
},
token=token)
self.assertValidTenantResponse(r)
def test_get_user_roles_with_tenant(self):
token = self.get_scoped_token()
r = self.admin_request(
path='/v2.0/tenants/%(tenant_id)s/users/%(user_id)s/roles' % {
'tenant_id': self.tenant_bar['id'],
'user_id': self.user_foo['id'],
},
token=token)
self.assertValidRoleListResponse(r)
def test_get_user_roles_without_tenant(self):
token = self.get_scoped_token()
self.admin_request(
path='/v2.0/users/%(user_id)s/roles' % {
'user_id': self.user_foo['id'],
},
token=token, expected_status=http_client.NOT_IMPLEMENTED)
def test_get_user(self):
token = self.get_scoped_token()
r = self.admin_request(
path='/v2.0/users/%(user_id)s' % {
'user_id': self.user_foo['id'],
},
token=token)
self.assertValidUserResponse(r)
def test_get_user_by_name(self):
token = self.get_scoped_token()
r = self.admin_request(
path='/v2.0/users?name=%(user_name)s' % {
'user_name': self.user_foo['name'],
},
token=token)
self.assertValidUserResponse(r)
def test_create_update_user_invalid_enabled_type(self):
# Enforce usage of boolean for 'enabled' field
token = self.get_scoped_token()
# Test CREATE request
r = self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'enabled': "False",
},
},
token=token,
expected_status=http_client.BAD_REQUEST)
self.assertValidErrorResponse(r)
r = self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
# In JSON, 0|1 are not booleans
'enabled': 0,
},
},
token=token,
expected_status=http_client.BAD_REQUEST)
self.assertValidErrorResponse(r)
# Test UPDATE request
path = '/v2.0/users/%(user_id)s' % {
'user_id': self.user_foo['id'],
}
r = self.admin_request(
method='PUT',
path=path,
body={
'user': {
'enabled': "False",
},
},
token=token,
expected_status=http_client.BAD_REQUEST)
self.assertValidErrorResponse(r)
r = self.admin_request(
method='PUT',
path=path,
body={
'user': {
# In JSON, 0|1 are not booleans
'enabled': 1,
},
},
token=token,
expected_status=http_client.BAD_REQUEST)
self.assertValidErrorResponse(r)
def test_create_update_user_valid_enabled_type(self):
# Enforce usage of boolean for 'enabled' field
token = self.get_scoped_token()
# Test CREATE request
self.admin_request(method='POST',
path='/v2.0/users',
body={
'user': {
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'enabled': False,
},
},
token=token,
expected_status=http_client.OK)
def test_error_response(self):
"""Trigger assertValidErrorResponse by convention."""
self.public_request(path='/v2.0/tenants',
expected_status=http_client.UNAUTHORIZED)
def test_invalid_parameter_error_response(self):
token = self.get_scoped_token()
bad_body = {
'OS-KSADM:service%s' % uuid.uuid4().hex: {
'name': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
},
}
res = self.admin_request(method='POST',
path='/v2.0/OS-KSADM/services',
body=bad_body,
token=token,
expected_status=http_client.BAD_REQUEST)
self.assertValidErrorResponse(res)
res = self.admin_request(method='POST',
path='/v2.0/users',
body=bad_body,
token=token,
expected_status=http_client.BAD_REQUEST)
self.assertValidErrorResponse(res)
def _get_user_id(self, r):
"""Helper method to return user ID from a response.
This needs to be overridden by child classes
based on their content type.
"""
raise NotImplementedError()
def _get_role_id(self, r):
"""Helper method to return a role ID from a response.
This needs to be overridden by child classes
based on their content type.
"""
raise NotImplementedError()
def _get_role_name(self, r):
"""Helper method to return role NAME from a response.
This needs to be overridden by child classes
based on their content type.
"""
raise NotImplementedError()
def _get_project_id(self, r):
"""Helper method to return project ID from a response.
This needs to be overridden by child classes
based on their content type.
"""
raise NotImplementedError()
def assertNoRoles(self, r):
"""Helper method to assert No Roles.
This needs to be overridden by child classes
based on their content type.
"""
raise NotImplementedError()
def test_update_user_tenant(self):
token = self.get_scoped_token()
# Create a new user
r = self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'tenantId': self.tenant_bar['id'],
'enabled': True,
},
},
token=token,
expected_status=http_client.OK)
user_id = self._get_user_id(r.result)
# Check if member_role is in tenant_bar
r = self.admin_request(
path='/v2.0/tenants/%(project_id)s/users/%(user_id)s/roles' % {
'project_id': self.tenant_bar['id'],
'user_id': user_id
},
token=token,
expected_status=http_client.OK)
self.assertEqual(CONF.member_role_name, self._get_role_name(r.result))
# Create a new tenant
r = self.admin_request(
method='POST',
path='/v2.0/tenants',
body={
'tenant': {
'name': 'test_update_user',
'description': 'A description ...',
'enabled': True,
},
},
token=token,
expected_status=http_client.OK)
project_id = self._get_project_id(r.result)
# Update user's tenant
r = self.admin_request(
method='PUT',
path='/v2.0/users/%(user_id)s' % {
'user_id': user_id,
},
body={
'user': {
'tenantId': project_id,
},
},
token=token,
expected_status=http_client.OK)
# 'member_role' should be in new_tenant
r = self.admin_request(
path='/v2.0/tenants/%(project_id)s/users/%(user_id)s/roles' % {
'project_id': project_id,
'user_id': user_id
},
token=token,
expected_status=http_client.OK)
self.assertEqual('_member_', self._get_role_name(r.result))
# 'member_role' should not be in tenant_bar any more
r = self.admin_request(
path='/v2.0/tenants/%(project_id)s/users/%(user_id)s/roles' % {
'project_id': self.tenant_bar['id'],
'user_id': user_id
},
token=token,
expected_status=http_client.OK)
self.assertNoRoles(r.result)
def test_update_user_with_invalid_tenant(self):
token = self.get_scoped_token()
# Create a new user
r = self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': 'test_invalid_tenant',
'password': uuid.uuid4().hex,
'tenantId': self.tenant_bar['id'],
'enabled': True,
},
},
token=token,
expected_status=http_client.OK)
user_id = self._get_user_id(r.result)
# Update user with an invalid tenant
r = self.admin_request(
method='PUT',
path='/v2.0/users/%(user_id)s' % {
'user_id': user_id,
},
body={
'user': {
'tenantId': 'abcde12345heha',
},
},
token=token,
expected_status=http_client.NOT_FOUND)
def test_update_user_with_invalid_tenant_no_prev_tenant(self):
token = self.get_scoped_token()
# Create a new user
r = self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': 'test_invalid_tenant',
'password': uuid.uuid4().hex,
'enabled': True,
},
},
token=token,
expected_status=http_client.OK)
user_id = self._get_user_id(r.result)
# Update user with an invalid tenant
r = self.admin_request(
method='PUT',
path='/v2.0/users/%(user_id)s' % {
'user_id': user_id,
},
body={
'user': {
'tenantId': 'abcde12345heha',
},
},
token=token,
expected_status=http_client.NOT_FOUND)
def test_update_user_with_old_tenant(self):
token = self.get_scoped_token()
# Create a new user
r = self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'tenantId': self.tenant_bar['id'],
'enabled': True,
},
},
token=token,
expected_status=http_client.OK)
user_id = self._get_user_id(r.result)
# Check if member_role is in tenant_bar
r = self.admin_request(
path='/v2.0/tenants/%(project_id)s/users/%(user_id)s/roles' % {
'project_id': self.tenant_bar['id'],
'user_id': user_id
},
token=token,
expected_status=http_client.OK)
self.assertEqual(CONF.member_role_name, self._get_role_name(r.result))
# Update user's tenant with old tenant id
r = self.admin_request(
method='PUT',
path='/v2.0/users/%(user_id)s' % {
'user_id': user_id,
},
body={
'user': {
'tenantId': self.tenant_bar['id'],
},
},
token=token,
expected_status=http_client.OK)
# 'member_role' should still be in tenant_bar
r = self.admin_request(
path='/v2.0/tenants/%(project_id)s/users/%(user_id)s/roles' % {
'project_id': self.tenant_bar['id'],
'user_id': user_id
},
token=token,
expected_status=http_client.OK)
self.assertEqual('_member_', self._get_role_name(r.result))
def test_authenticating_a_user_with_no_password(self):
token = self.get_scoped_token()
username = uuid.uuid4().hex
# create the user
self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': username,
'enabled': True,
},
},
token=token)
# fail to authenticate
r = self.public_request(
method='POST',
path='/v2.0/tokens',
body={
'auth': {
'passwordCredentials': {
'username': username,
'password': 'password',
},
},
},
expected_status=http_client.UNAUTHORIZED)
self.assertValidErrorResponse(r)
def test_www_authenticate_header(self):
r = self.public_request(
path='/v2.0/tenants',
expected_status=http_client.UNAUTHORIZED)
self.assertEqual('Keystone uri="http://localhost"',
r.headers.get('WWW-Authenticate'))
def test_www_authenticate_header_host(self):
test_url = 'http://%s:4187' % uuid.uuid4().hex
self.config_fixture.config(public_endpoint=test_url)
r = self.public_request(
path='/v2.0/tenants',
expected_status=http_client.UNAUTHORIZED)
self.assertEqual('Keystone uri="%s"' % test_url,
r.headers.get('WWW-Authenticate'))
class LegacyV2UsernameTests(object):
"""Test to show the broken username behavior in V2.
The V2 API is documented to use `username` instead of `name`. The
API forced used to use name and left the username to fall into the
`extra` field.
These tests ensure this behavior works so fixes to `username`/`name`
will be backward compatible.
"""
def create_user(self, **user_attrs):
"""Create a users and returns the response object.
:param user_attrs: attributes added to the request body (optional)
"""
token = self.get_scoped_token()
body = {
'user': {
'name': uuid.uuid4().hex,
'enabled': True,
},
}
body['user'].update(user_attrs)
return self.admin_request(
method='POST',
path='/v2.0/users',
token=token,
body=body,
expected_status=http_client.OK)
def test_create_with_extra_username(self):
"""The response for creating a user will contain the extra fields."""
fake_username = uuid.uuid4().hex
r = self.create_user(username=fake_username)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(fake_username, user.get('username'))
def test_get_returns_username_from_extra(self):
"""The response for getting a user will contain the extra fields."""
token = self.get_scoped_token()
fake_username = uuid.uuid4().hex
r = self.create_user(username=fake_username)
id_ = self.get_user_attribute_from_response(r, 'id')
r = self.admin_request(path='/v2.0/users/%s' % id_, token=token)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(fake_username, user.get('username'))
def test_update_returns_new_username_when_adding_username(self):
"""The response for updating a user will contain the extra fields.
This is specifically testing for updating a username when a value
was not previously set.
"""
token = self.get_scoped_token()
r = self.create_user()
id_ = self.get_user_attribute_from_response(r, 'id')
name = self.get_user_attribute_from_response(r, 'name')
enabled = self.get_user_attribute_from_response(r, 'enabled')
r = self.admin_request(
method='PUT',
path='/v2.0/users/%s' % id_,
token=token,
body={
'user': {
'name': name,
'username': 'new_username',
'enabled': enabled,
},
},
expected_status=http_client.OK)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual('new_username', user.get('username'))
def test_update_returns_new_username_when_updating_username(self):
"""The response for updating a user will contain the extra fields.
This tests updating a username that was previously set.
"""
token = self.get_scoped_token()
r = self.create_user(username='original_username')
id_ = self.get_user_attribute_from_response(r, 'id')
name = self.get_user_attribute_from_response(r, 'name')
enabled = self.get_user_attribute_from_response(r, 'enabled')
r = self.admin_request(
method='PUT',
path='/v2.0/users/%s' % id_,
token=token,
body={
'user': {
'name': name,
'username': 'new_username',
'enabled': enabled,
},
},
expected_status=http_client.OK)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual('new_username', user.get('username'))
def test_username_is_always_returned_create(self):
"""Username is set as the value of name if no username is provided.
This matches the v2.0 spec where we really should be using username
and not name.
"""
r = self.create_user()
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(user.get('name'), user.get('username'))
def test_username_is_always_returned_get(self):
"""Username is set as the value of name if no username is provided.
This matches the v2.0 spec where we really should be using username
and not name.
"""
token = self.get_scoped_token()
r = self.create_user()
id_ = self.get_user_attribute_from_response(r, 'id')
r = self.admin_request(path='/v2.0/users/%s' % id_, token=token)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(user.get('name'), user.get('username'))
def test_username_is_always_returned_get_by_name(self):
"""Username is set as the value of name if no username is provided.
This matches the v2.0 spec where we really should be using username
and not name.
"""
token = self.get_scoped_token()
r = self.create_user()
name = self.get_user_attribute_from_response(r, 'name')
r = self.admin_request(path='/v2.0/users?name=%s' % name, token=token)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(user.get('name'), user.get('username'))
def test_username_is_always_returned_update_no_username_provided(self):
"""Username is set as the value of name if no username is provided.
This matches the v2.0 spec where we really should be using username
and not name.
"""
token = self.get_scoped_token()
r = self.create_user()
id_ = self.get_user_attribute_from_response(r, 'id')
name = self.get_user_attribute_from_response(r, 'name')
enabled = self.get_user_attribute_from_response(r, 'enabled')
r = self.admin_request(
method='PUT',
path='/v2.0/users/%s' % id_,
token=token,
body={
'user': {
'name': name,
'enabled': enabled,
},
},
expected_status=http_client.OK)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(user.get('name'), user.get('username'))
def test_updated_username_is_returned(self):
"""Username is set as the value of name if no username is provided.
This matches the v2.0 spec where we really should be using username
and not name.
"""
token = self.get_scoped_token()
r = self.create_user()
id_ = self.get_user_attribute_from_response(r, 'id')
name = self.get_user_attribute_from_response(r, 'name')
enabled = self.get_user_attribute_from_response(r, 'enabled')
r = self.admin_request(
method='PUT',
path='/v2.0/users/%s' % id_,
token=token,
body={
'user': {
'name': name,
'enabled': enabled,
},
},
expected_status=http_client.OK)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(user.get('name'), user.get('username'))
def test_username_can_be_used_instead_of_name_create(self):
token = self.get_scoped_token()
r = self.admin_request(
method='POST',
path='/v2.0/users',
token=token,
body={
'user': {
'username': uuid.uuid4().hex,
'enabled': True,
},
},
expected_status=http_client.OK)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(user.get('name'), user.get('username'))
def test_username_can_be_used_instead_of_name_update(self):
token = self.get_scoped_token()
r = self.create_user()
id_ = self.get_user_attribute_from_response(r, 'id')
new_username = uuid.uuid4().hex
enabled = self.get_user_attribute_from_response(r, 'enabled')
r = self.admin_request(
method='PUT',
path='/v2.0/users/%s' % id_,
token=token,
body={
'user': {
'username': new_username,
'enabled': enabled,
},
},
expected_status=http_client.OK)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(new_username, user.get('name'))
self.assertEqual(user.get('name'), user.get('username'))
class RestfulTestCase(rest.RestfulTestCase):
def setUp(self):
super(RestfulTestCase, self).setUp()
# TODO(termie): add an admin user to the fixtures and use that user
# override the fixtures, for now
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'],
self.tenant_bar['id'],
self.role_admin['id'])
class V2TestCase(object):
def config_overrides(self):
super(V2TestCase, self).config_overrides()
self.config_fixture.config(
group='catalog',
driver='templated',
template_file=unit.dirs.tests('default_catalog.templates'))
def _get_user_id(self, r):
return r['user']['id']
def _get_role_name(self, r):
return r['roles'][0]['name']
def _get_role_id(self, r):
return r['roles'][0]['id']
def _get_project_id(self, r):
return r['tenant']['id']
def _get_token_id(self, r):
return r.result['access']['token']['id']
def assertNoRoles(self, r):
self.assertEqual([], r['roles'])
def assertValidErrorResponse(self, r):
self.assertIsNotNone(r.result.get('error'))
self.assertValidError(r.result['error'])
self.assertEqual(r.result['error']['code'], r.status_code)
def assertValidExtension(self, extension, expected):
super(V2TestCase, self).assertValidExtension(extension)
descriptions = [ext['description'] for ext in six.itervalues(expected)]
description = extension.get('description')
self.assertIsNotNone(description)
self.assertIn(description, descriptions)
self.assertIsNotNone(extension.get('links'))
self.assertNotEmpty(extension.get('links'))
for link in extension.get('links'):
self.assertValidExtensionLink(link)
def assertValidExtensionListResponse(self, r, expected):
self.assertIsNotNone(r.result.get('extensions'))
self.assertIsNotNone(r.result['extensions'].get('values'))
self.assertNotEmpty(r.result['extensions'].get('values'))
for extension in r.result['extensions']['values']:
self.assertValidExtension(extension, expected)
def assertValidExtensionResponse(self, r, expected):
self.assertValidExtension(r.result.get('extension'), expected)
def assertValidUser(self, user):
super(V2TestCase, self).assertValidUser(user)
self.assertNotIn('default_project_id', user)
if 'tenantId' in user:
# NOTE(morganfainberg): tenantId should never be "None", it gets
# filtered out of the object if it is there. This is suspenders
# and a belt check to avoid unintended regressions.
self.assertIsNotNone(user.get('tenantId'))
def assertValidAuthenticationResponse(self, r,
require_service_catalog=False):
self.assertIsNotNone(r.result.get('access'))
self.assertIsNotNone(r.result['access'].get('token'))
self.assertIsNotNone(r.result['access'].get('user'))
# validate token
self.assertIsNotNone(r.result['access']['token'].get('id'))
self.assertIsNotNone(r.result['access']['token'].get('expires'))
tenant = r.result['access']['token'].get('tenant')
if tenant is not None:
# validate tenant
self.assertIsNotNone(tenant.get('id'))
self.assertIsNotNone(tenant.get('name'))
# validate user
self.assertIsNotNone(r.result['access']['user'].get('id'))
self.assertIsNotNone(r.result['access']['user'].get('name'))
if require_service_catalog:
# roles are only provided with a service catalog
roles = r.result['access']['user'].get('roles')
self.assertNotEmpty(roles)
for role in roles:
self.assertIsNotNone(role.get('name'))
serviceCatalog = r.result['access'].get('serviceCatalog')
# validate service catalog
if require_service_catalog:
self.assertIsNotNone(serviceCatalog)
if serviceCatalog is not None:
self.assertIsInstance(serviceCatalog, list)
if require_service_catalog:
self.assertNotEmpty(serviceCatalog)
for service in r.result['access']['serviceCatalog']:
# validate service
self.assertIsNotNone(service.get('name'))
self.assertIsNotNone(service.get('type'))
# services contain at least one endpoint
self.assertIsNotNone(service.get('endpoints'))
self.assertNotEmpty(service['endpoints'])
for endpoint in service['endpoints']:
# validate service endpoint
self.assertIsNotNone(endpoint.get('publicURL'))
def assertValidTenantListResponse(self, r):
self.assertIsNotNone(r.result.get('tenants'))
self.assertNotEmpty(r.result['tenants'])
for tenant in r.result['tenants']:
self.assertValidTenant(tenant)
self.assertIsNotNone(tenant.get('enabled'))
self.assertIn(tenant.get('enabled'), [True, False])
def assertValidUserResponse(self, r):
self.assertIsNotNone(r.result.get('user'))
self.assertValidUser(r.result['user'])
def assertValidTenantResponse(self, r):
self.assertIsNotNone(r.result.get('tenant'))
self.assertValidTenant(r.result['tenant'])
def assertValidRoleListResponse(self, r):
self.assertIsNotNone(r.result.get('roles'))
self.assertNotEmpty(r.result['roles'])
for role in r.result['roles']:
self.assertValidRole(role)
def assertValidVersion(self, version):
super(V2TestCase, self).assertValidVersion(version)
self.assertIsNotNone(version.get('links'))
self.assertNotEmpty(version.get('links'))
for link in version.get('links'):
self.assertIsNotNone(link.get('rel'))
self.assertIsNotNone(link.get('href'))
self.assertIsNotNone(version.get('media-types'))
self.assertNotEmpty(version.get('media-types'))
for media in version.get('media-types'):
self.assertIsNotNone(media.get('base'))
self.assertIsNotNone(media.get('type'))
def assertValidMultipleChoiceResponse(self, r):
self.assertIsNotNone(r.result.get('versions'))
self.assertIsNotNone(r.result['versions'].get('values'))
self.assertNotEmpty(r.result['versions']['values'])
for version in r.result['versions']['values']:
self.assertValidVersion(version)
def assertValidVersionResponse(self, r):
self.assertValidVersion(r.result.get('version'))
def assertValidEndpointListResponse(self, r):
self.assertIsNotNone(r.result.get('endpoints'))
self.assertNotEmpty(r.result['endpoints'])
for endpoint in r.result['endpoints']:
self.assertIsNotNone(endpoint.get('id'))
self.assertIsNotNone(endpoint.get('name'))
self.assertIsNotNone(endpoint.get('type'))
self.assertIsNotNone(endpoint.get('publicURL'))
self.assertIsNotNone(endpoint.get('internalURL'))
self.assertIsNotNone(endpoint.get('adminURL'))
def get_user_from_response(self, r):
return r.result.get('user')
def get_user_attribute_from_response(self, r, attribute_name):
return r.result['user'][attribute_name]
def test_service_crud_requires_auth(self):
"""Service CRUD should return unauthorized without an X-Auth-Token."""
# values here don't matter because it will be unauthorized before
# they're checked (bug 1006822).
service_path = '/v2.0/OS-KSADM/services/%s' % uuid.uuid4().hex
service_body = {
'OS-KSADM:service': {
'name': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
},
}
r = self.admin_request(method='GET',
path='/v2.0/OS-KSADM/services',
expected_status=http_client.UNAUTHORIZED)
self.assertValidErrorResponse(r)
r = self.admin_request(method='POST',
path='/v2.0/OS-KSADM/services',
body=service_body,
expected_status=http_client.UNAUTHORIZED)
self.assertValidErrorResponse(r)
r = self.admin_request(method='GET',
path=service_path,
expected_status=http_client.UNAUTHORIZED)
self.assertValidErrorResponse(r)
r = self.admin_request(method='DELETE',
path=service_path,
expected_status=http_client.UNAUTHORIZED)
self.assertValidErrorResponse(r)
def test_user_role_list_requires_auth(self):
"""User role list return unauthorized without an X-Auth-Token."""
# values here don't matter because it will be unauthorized before
# they're checked (bug 1006815).
path = '/v2.0/tenants/%(tenant_id)s/users/%(user_id)s/roles' % {
'tenant_id': uuid.uuid4().hex,
'user_id': uuid.uuid4().hex,
}
r = self.admin_request(path=path,
expected_status=http_client.UNAUTHORIZED)
self.assertValidErrorResponse(r)
def test_fetch_revocation_list_nonadmin_fails(self):
self.admin_request(
method='GET',
path='/v2.0/tokens/revoked',
expected_status=http_client.UNAUTHORIZED)
def test_fetch_revocation_list_admin_200(self):
token = self.get_scoped_token()
r = self.admin_request(
method='GET',
path='/v2.0/tokens/revoked',
token=token,
expected_status=http_client.OK)
self.assertValidRevocationListResponse(r)
def assertValidRevocationListResponse(self, response):
self.assertIsNotNone(response.result['signed'])
def _fetch_parse_revocation_list(self):
token1 = self.get_scoped_token()
# TODO(morganfainberg): Because this is making a restful call to the
# app a change to UTCNOW via mock.patch will not affect the returned
# token. The only surefire way to ensure there is not a transient bug
# based upon when the second token is issued is with a sleep. This
# issue all stems from the limited resolution (no microseconds) on the
# expiry time of tokens and the way revocation events utilizes token
# expiry to revoke individual tokens. This is a stop-gap until all
# associated issues with resolution on expiration and revocation events
# are resolved.
time.sleep(1)
token2 = self.get_scoped_token()
self.admin_request(method='DELETE',
path='/v2.0/tokens/%s' % token2,
token=token1)
r = self.admin_request(
method='GET',
path='/v2.0/tokens/revoked',
token=token1,
expected_status=http_client.OK)
signed_text = r.result['signed']
data_json = cms.cms_verify(signed_text, CONF.signing.certfile,
CONF.signing.ca_certs)
data = jsonutils.loads(data_json)
return (data, token2)
def test_fetch_revocation_list_md5(self):
"""Hash for tokens in revocation list and server config should match.
If the server is configured for md5, then the revocation list has
tokens hashed with MD5.
"""
# The default hash algorithm is md5.
hash_algorithm = 'md5'
(data, token) = self._fetch_parse_revocation_list()
token_hash = cms.cms_hash_token(token, mode=hash_algorithm)
self.assertThat(token_hash, matchers.Equals(data['revoked'][0]['id']))
def test_fetch_revocation_list_sha256(self):
"""Hash for tokens in revocation list and server config should match.
If the server is configured for sha256, then the revocation list has
tokens hashed with SHA256.
"""
hash_algorithm = 'sha256'
self.config_fixture.config(group='token',
hash_algorithm=hash_algorithm)
(data, token) = self._fetch_parse_revocation_list()
token_hash = cms.cms_hash_token(token, mode=hash_algorithm)
self.assertThat(token_hash, matchers.Equals(data['revoked'][0]['id']))
def test_create_update_user_invalid_enabled_type(self):
# Enforce usage of boolean for 'enabled' field
token = self.get_scoped_token()
# Test CREATE request
r = self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
# In JSON, "true|false" are not boolean
'enabled': "true",
},
},
token=token,
expected_status=http_client.BAD_REQUEST)
self.assertValidErrorResponse(r)
# Test UPDATE request
r = self.admin_request(
method='PUT',
path='/v2.0/users/%(user_id)s' % {
'user_id': self.user_foo['id'],
},
body={
'user': {
# In JSON, "true|false" are not boolean
'enabled': "true",
},
},
token=token,
expected_status=http_client.BAD_REQUEST)
self.assertValidErrorResponse(r)
def test_authenticating_a_user_with_an_OSKSADM_password(self):
token = self.get_scoped_token()
username = uuid.uuid4().hex
password = uuid.uuid4().hex
# create the user
r = self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': username,
'OS-KSADM:password': password,
'enabled': True,
},
},
token=token)
# successfully authenticate
self.public_request(
method='POST',
path='/v2.0/tokens',
body={
'auth': {
'passwordCredentials': {
'username': username,
'password': password,
},
},
},
expected_status=http_client.OK)
# ensure password doesn't leak
user_id = r.result['user']['id']
r = self.admin_request(
method='GET',
path='/v2.0/users/%s' % user_id,
token=token,
expected_status=http_client.OK)
self.assertNotIn('OS-KSADM:password', r.result['user'])
def test_updating_a_user_with_an_OSKSADM_password(self):
token = self.get_scoped_token()
user_id = self.user_foo['id']
password = uuid.uuid4().hex
# update the user
self.admin_request(
method='PUT',
path='/v2.0/users/%s/OS-KSADM/password' % user_id,
body={
'user': {
'password': password,
},
},
token=token,
expected_status=http_client.OK)
# successfully authenticate
self.public_request(
method='POST',
path='/v2.0/tokens',
body={
'auth': {
'passwordCredentials': {
'username': self.user_foo['name'],
'password': password,
},
},
},
expected_status=http_client.OK)
def test_enable_or_disable_user(self):
token = self.get_scoped_token()
user_id = self.user_badguy['id']
self.assertFalse(self.user_badguy['enabled'])
def _admin_request(body, status):
resp = self.admin_request(
method='PUT',
path='/v2.0/users/%s/OS-KSADM/enabled' % user_id,
token=token,
body=body,
expected_status=status)
return resp
# Enable the user.
body = {'user': {'enabled': True}}
resp = _admin_request(body, http_client.OK)
self.assertTrue(resp.json['user']['enabled'])
# Disable the user.
body = {'user': {'enabled': False}}
resp = _admin_request(body, http_client.OK)
self.assertFalse(resp.json['user']['enabled'])
# Attributes other than `enabled` should still work due to bug 1607751
body = {
'user': {
'description': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'enabled': True
}
}
_admin_request(body, http_client.OK)
# `enabled` is boolean, type other than boolean is not allowed.
body = {'user': {'enabled': uuid.uuid4().hex}}
_admin_request(body, http_client.BAD_REQUEST)
class V2TestCaseUUID(V2TestCase, RestfulTestCase, CoreApiTests,
LegacyV2UsernameTests):
def config_overrides(self):
super(V2TestCaseUUID, self).config_overrides()
self.config_fixture.config(group='token', provider='uuid')
class V2TestCaseFernet(V2TestCase, RestfulTestCase, CoreApiTests,
LegacyV2UsernameTests):
def config_overrides(self):
super(V2TestCaseFernet, self).config_overrides()
self.config_fixture.config(group='token', provider='fernet')
self.useFixture(ksfixtures.KeyRepository(self.config_fixture))
def test_fetch_revocation_list_md5(self):
self.skipTest('Revocation lists do not support Fernet')
def test_fetch_revocation_list_sha256(self):
self.skipTest('Revocation lists do not support Fernet')
class RevokeApiTestCase(V2TestCase, RestfulTestCase, CoreApiTests,
LegacyV2UsernameTests):
def config_overrides(self):
super(RevokeApiTestCase, self).config_overrides()
self.config_fixture.config(
group='token',
provider='pki',
revoke_by_id=False)
def test_fetch_revocation_list_admin_200(self):
self.skip_test_overrides('Revoke API disables revocation_list.')
def test_fetch_revocation_list_md5(self):
self.skip_test_overrides('Revoke API disables revocation_list.')
def test_fetch_revocation_list_sha256(self):
self.skip_test_overrides('Revoke API disables revocation_list.')
class TestFernetTokenProviderV2(RestfulTestCase):
def setUp(self):
super(TestFernetTokenProviderV2, self).setUp()
# Add catalog data
self.region = unit.new_region_ref()
self.region_id = self.region['id']
self.catalog_api.create_region(self.region)
self.service = unit.new_service_ref()
self.service_id = self.service['id']
self.catalog_api.create_service(self.service_id, self.service)
self.endpoint = unit.new_endpoint_ref(service_id=self.service_id,
interface='public',
region_id=self.region_id)
self.endpoint_id = self.endpoint['id']
self.catalog_api.create_endpoint(self.endpoint_id, self.endpoint)
def assertValidUnscopedTokenResponse(self, r):
v2.unscoped_validator.validate(r.json['access'])
def assertValidScopedTokenResponse(self, r):
v2.scoped_validator.validate(r.json['access'])
# Used by RestfulTestCase
def _get_token_id(self, r):
return r.result['access']['token']['id']
def new_project_ref(self):
return {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'domain_id': 'default',
'enabled': True}
def config_overrides(self):
super(TestFernetTokenProviderV2, self).config_overrides()
self.config_fixture.config(group='token', provider='fernet')
self.useFixture(ksfixtures.KeyRepository(self.config_fixture))
def test_authenticate_unscoped_token(self):
unscoped_token = self.get_unscoped_token()
# Fernet token must be of length 255 per usability requirements
self.assertLess(len(unscoped_token), 255)
def test_validate_unscoped_token(self):
# Grab an admin token to validate with
project_ref = self.new_project_ref()
self.resource_api.create_project(project_ref['id'], project_ref)
self.assignment_api.add_role_to_user_and_project(self.user_foo['id'],
project_ref['id'],
self.role_admin['id'])
admin_token = self.get_scoped_token(tenant_id=project_ref['id'])
unscoped_token = self.get_unscoped_token()
path = ('/v2.0/tokens/%s' % unscoped_token)
resp = self.admin_request(
method='GET',
path=path,
token=admin_token,
expected_status=http_client.OK)
self.assertValidUnscopedTokenResponse(resp)
def test_authenticate_scoped_token(self):
project_ref = self.new_project_ref()
self.resource_api.create_project(project_ref['id'], project_ref)
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'], project_ref['id'], self.role_service['id'])
token = self.get_scoped_token(tenant_id=project_ref['id'])
# Fernet token must be of length 255 per usability requirements
self.assertLess(len(token), 255)
def test_validate_scoped_token(self):
project_ref = self.new_project_ref()
self.resource_api.create_project(project_ref['id'], project_ref)
self.assignment_api.add_role_to_user_and_project(self.user_foo['id'],
project_ref['id'],
self.role_admin['id'])
project2_ref = self.new_project_ref()
self.resource_api.create_project(project2_ref['id'], project2_ref)
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'], project2_ref['id'], self.role_member['id'])
admin_token = self.get_scoped_token(tenant_id=project_ref['id'])
member_token = self.get_scoped_token(tenant_id=project2_ref['id'])
path = ('/v2.0/tokens/%s?belongsTo=%s' % (member_token,
project2_ref['id']))
# Validate token belongs to project
resp = self.admin_request(
method='GET',
path=path,
token=admin_token,
expected_status=http_client.OK)
self.assertValidScopedTokenResponse(resp)
def test_token_authentication_and_validation(self):
"""Test token authentication for Fernet token provider.
Verify that token authentication returns validate response code and
valid token belongs to project.
"""
project_ref = self.new_project_ref()
self.resource_api.create_project(project_ref['id'], project_ref)
unscoped_token = self.get_unscoped_token()
self.assignment_api.add_role_to_user_and_project(self.user_foo['id'],
project_ref['id'],
self.role_admin['id'])
token_id = unscoped_token
if six.PY2:
token_id = token_id.encode('ascii')
r = self.public_request(
method='POST',
path='/v2.0/tokens',
body={
'auth': {
'tenantName': project_ref['name'],
'token': {
'id': token_id,
}
}
},
expected_status=http_client.OK)
token_id = self._get_token_id(r)
path = ('/v2.0/tokens/%s?belongsTo=%s' % (token_id, project_ref['id']))
# Validate token belongs to project
resp = self.admin_request(
method='GET',
path=path,
token=self.get_admin_token(),
expected_status=http_client.OK)
self.assertValidScopedTokenResponse(resp)
def test_rescoped_tokens_maintain_original_expiration(self):
project_ref = self.new_project_ref()
self.resource_api.create_project(project_ref['id'], project_ref)
self.assignment_api.add_role_to_user_and_project(self.user_foo['id'],
project_ref['id'],
self.role_admin['id'])
resp = self.public_request(
method='POST',
path='/v2.0/tokens',
body={
'auth': {
'tenantName': project_ref['name'],
'passwordCredentials': {
'username': self.user_foo['name'],
'password': self.user_foo['password']
}
}
},
# NOTE(lbragstad): This test may need to be refactored if Keystone
# decides to disallow rescoping using a scoped token.
expected_status=http_client.OK)
original_token = resp.result['access']['token']['id']
original_expiration = resp.result['access']['token']['expires']
resp = self.public_request(
method='POST',
path='/v2.0/tokens',
body={
'auth': {
'tenantName': project_ref['name'],
'token': {
'id': original_token,
}
}
},
expected_status=http_client.OK)
rescoped_token = resp.result['access']['token']['id']
rescoped_expiration = resp.result['access']['token']['expires']
self.assertNotEqual(original_token, rescoped_token)
self.assertEqual(original_expiration, rescoped_expiration)
self.assertValidScopedTokenResponse(resp)
| |
from django.http import HttpResponse
from django.utils.translation import ugettext as _
import md5, time, random
def djangouser_auth(username, password):
"""
Check username and password against
django.contrib.auth.models.User
"""
from django.contrib.auth.models import User
try:
user = User.objects.get(username=username)
if user.check_password(password):
return True
else:
return False
except User.DoesNotExist:
return False
class NoAuthentication(object):
"""
No authentication: Permit every request.
"""
def is_authenticated(self, request):
return True
def challenge_headers(self):
return {}
class HttpBasicAuthentication(object):
"""
HTTP/1.0 basic authentication.
"""
def __init__(self, authfunc=djangouser_auth, realm=_('Restricted Access')):
"""
authfunc:
A user-defined function which takes a username and
password as its first and second arguments respectively
and returns True if the user is authenticated
realm:
An identifier for the authority that is requesting
authorization
"""
self.realm = realm
self.authfunc = authfunc
def challenge_headers(self):
"""
Returns the http headers that ask for appropriate
authorization.
"""
return {'WWW-Authenticate' : 'Basic realm="%s"' % self.realm}
def is_authenticated(self, request):
"""
Checks whether a request comes from an authorized user.
"""
if not request.META.has_key('HTTP_AUTHORIZATION'):
return False
(authmeth, auth) = request.META['HTTP_AUTHORIZATION'].split(' ', 1)
if authmeth.lower() != 'basic':
return False
auth = auth.strip().decode('base64')
username, password = auth.split(':', 1)
return self.authfunc(username=username, password=password)
def digest_password(realm, username, password):
"""
Construct the appropriate hashcode needed for HTTP digest
"""
return md5.md5("%s:%s:%s" % (username, realm, password)).hexdigest()
class HttpDigestAuthentication(object):
"""
HTTP/1.1 digest authentication (RFC 2617).
Uses code from the Python Paste Project (MIT Licence).
"""
def __init__(self, authfunc, realm=_('Restricted Access')):
"""
authfunc:
A user-defined function which takes a username and
a realm as its first and second arguments respectively
and returns the combined md5 hash of username,
authentication realm and password.
realm:
An identifier for the authority that is requesting
authorization
"""
self.realm = realm
self.authfunc = authfunc
self.nonce = {} # prevention of replay attacks
def get_auth_dict(self, auth_string):
"""
Splits WWW-Authenticate and HTTP_AUTHORIZATION strings
into a dictionaries, e.g.
{
nonce : "951abe58eddbb49c1ed77a3a5fb5fc2e"',
opaque : "34de40e4f2e4f4eda2a3952fd2abab16"',
realm : "realm1"',
qop : "auth"'
}
"""
amap = {}
for itm in auth_string.split(", "):
(k, v) = [s.strip() for s in itm.split("=", 1)]
amap[k] = v.replace('"', '')
return amap
def get_auth_response(self, http_method, fullpath, username, nonce, realm, qop, cnonce, nc):
"""
Returns the server-computed digest response key.
http_method:
The request method, e.g. GET
username:
The user to be authenticated
fullpath:
The absolute URI to be accessed by the user
nonce:
A server-specified data string which should be
uniquely generated each time a 401 response is made
realm:
A string to be displayed to users so they know which
username and password to use
qop:
Indicates the "quality of protection" values supported
by the server. The value "auth" indicates authentication.
cnonce:
An opaque quoted string value provided by the client
and used by both client and server to avoid chosen
plaintext attacks, to provide mutual authentication,
and to provide some message integrity protection.
nc:
Hexadecimal request counter
"""
ha1 = self.authfunc(realm, username)
ha2 = md5.md5('%s:%s' % (http_method, fullpath)).hexdigest()
if qop:
chk = "%s:%s:%s:%s:%s:%s" % (ha1, nonce, nc, cnonce, qop, ha2)
else:
chk = "%s:%s:%s" % (ha1, nonce, ha2)
computed_response = md5.md5(chk).hexdigest()
return computed_response
def challenge_headers(self, stale=''):
"""
Returns the http headers that ask for appropriate
authorization.
"""
nonce = md5.md5(
"%s:%s" % (time.time(), random.random())).hexdigest()
opaque = md5.md5(
"%s:%s" % (time.time(), random.random())).hexdigest()
self.nonce[nonce] = None
parts = {'realm': self.realm, 'qop': 'auth',
'nonce': nonce, 'opaque': opaque }
if stale:
parts['stale'] = 'true'
head = ", ".join(['%s="%s"' % (k, v) for (k, v) in parts.items()])
return {'WWW-Authenticate':'Digest %s' % head}
def is_authenticated(self, request):
"""
Checks whether a request comes from an authorized user.
"""
# Make sure the request is a valid HttpDigest request
if not request.META.has_key('HTTP_AUTHORIZATION'):
return False
fullpath = request.META['SCRIPT_NAME'] + request.META['PATH_INFO']
(authmeth, auth) = request.META['HTTP_AUTHORIZATION'].split(" ", 1)
if authmeth.lower() != 'digest':
return False
# Extract auth parameters from request
amap = self.get_auth_dict(auth)
try:
username = amap['username']
authpath = amap['uri']
nonce = amap['nonce']
realm = amap['realm']
response = amap['response']
assert authpath.split("?", 1)[0] in fullpath
assert realm == self.realm
qop = amap.get('qop', '')
cnonce = amap.get('cnonce', '')
nc = amap.get('nc', '00000000')
if qop:
assert 'auth' == qop
assert nonce and nc
except:
return False
# Compute response key
computed_response = self.get_auth_response(request.method, fullpath, username, nonce, realm, qop, cnonce, nc)
# Compare server-side key with key from client
# Prevent replay attacks
if not computed_response or computed_response != response:
if nonce in self.nonce:
del self.nonce[nonce]
return False
pnc = self.nonce.get(nonce,'00000000')
if nc <= pnc:
if nonce in self.nonce:
del self.nonce[nonce]
return False # stale = True
self.nonce[nonce] = nc
return True
| |
from sqlalchemy import Integer, String, ForeignKey, and_, or_, func, \
literal, update, table, bindparam, column, select, exc
from sqlalchemy import testing
from sqlalchemy.dialects import mysql
from sqlalchemy.engine import default
from sqlalchemy.testing import AssertsCompiledSQL, eq_, fixtures, \
assert_raises_message
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy import util
class _UpdateFromTestBase(object):
@classmethod
def define_tables(cls, metadata):
Table('mytable', metadata,
Column('myid', Integer),
Column('name', String(30)),
Column('description', String(50)))
Table('myothertable', metadata,
Column('otherid', Integer),
Column('othername', String(30)))
Table('users', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(30), nullable=False))
Table('addresses', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('user_id', None, ForeignKey('users.id')),
Column('name', String(30), nullable=False),
Column('email_address', String(50), nullable=False))
Table('dingalings', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('address_id', None, ForeignKey('addresses.id')),
Column('data', String(30)))
Table('update_w_default', metadata,
Column('id', Integer, primary_key=True),
Column('x', Integer),
Column('ycol', Integer, key='y'),
Column('data', String(30), onupdate=lambda: "hi"))
@classmethod
def fixtures(cls):
return dict(
users=(
('id', 'name'),
(7, 'jack'),
(8, 'ed'),
(9, 'fred'),
(10, 'chuck')
),
addresses = (
('id', 'user_id', 'name', 'email_address'),
(1, 7, 'x', 'jack@bean.com'),
(2, 8, 'x', 'ed@wood.com'),
(3, 8, 'x', 'ed@bettyboop.com'),
(4, 8, 'x', 'ed@lala.com'),
(5, 9, 'x', 'fred@fred.com')
),
dingalings = (
('id', 'address_id', 'data'),
(1, 2, 'ding 1/2'),
(2, 5, 'ding 2/5')
),
)
class UpdateTest(_UpdateFromTestBase, fixtures.TablesTest, AssertsCompiledSQL):
__dialect__ = 'default'
def test_update_1(self):
table1 = self.tables.mytable
self.assert_compile(
update(table1, table1.c.myid == 7),
'UPDATE mytable SET name=:name WHERE mytable.myid = :myid_1',
params={table1.c.name: 'fred'})
def test_update_2(self):
table1 = self.tables.mytable
self.assert_compile(
table1.update().
where(table1.c.myid == 7).
values({table1.c.myid: 5}),
'UPDATE mytable SET myid=:myid WHERE mytable.myid = :myid_1',
checkparams={'myid': 5, 'myid_1': 7})
def test_update_3(self):
table1 = self.tables.mytable
self.assert_compile(
update(table1, table1.c.myid == 7),
'UPDATE mytable SET name=:name WHERE mytable.myid = :myid_1',
params={'name': 'fred'})
def test_update_4(self):
table1 = self.tables.mytable
self.assert_compile(
update(table1, values={table1.c.name: table1.c.myid}),
'UPDATE mytable SET name=mytable.myid')
def test_update_5(self):
table1 = self.tables.mytable
self.assert_compile(
update(table1,
whereclause=table1.c.name == bindparam('crit'),
values={table1.c.name: 'hi'}),
'UPDATE mytable SET name=:name WHERE mytable.name = :crit',
params={'crit': 'notthere'},
checkparams={'crit': 'notthere', 'name': 'hi'})
def test_update_6(self):
table1 = self.tables.mytable
self.assert_compile(
update(table1,
table1.c.myid == 12,
values={table1.c.name: table1.c.myid}),
'UPDATE mytable '
'SET name=mytable.myid, description=:description '
'WHERE mytable.myid = :myid_1',
params={'description': 'test'},
checkparams={'description': 'test', 'myid_1': 12})
def test_update_7(self):
table1 = self.tables.mytable
self.assert_compile(
update(table1, table1.c.myid == 12, values={table1.c.myid: 9}),
'UPDATE mytable '
'SET myid=:myid, description=:description '
'WHERE mytable.myid = :myid_1',
params={'myid_1': 12, 'myid': 9, 'description': 'test'})
def test_update_8(self):
table1 = self.tables.mytable
self.assert_compile(
update(table1, table1.c.myid == 12),
'UPDATE mytable SET myid=:myid WHERE mytable.myid = :myid_1',
params={'myid': 18}, checkparams={'myid': 18, 'myid_1': 12})
def test_update_9(self):
table1 = self.tables.mytable
s = table1.update(table1.c.myid == 12, values={table1.c.name: 'lala'})
c = s.compile(column_keys=['id', 'name'])
eq_(str(s), str(c))
def test_update_10(self):
table1 = self.tables.mytable
v1 = {table1.c.name: table1.c.myid}
v2 = {table1.c.name: table1.c.name + 'foo'}
self.assert_compile(
update(table1, table1.c.myid == 12, values=v1).values(v2),
'UPDATE mytable '
'SET '
'name=(mytable.name || :name_1), '
'description=:description '
'WHERE mytable.myid = :myid_1',
params={'description': 'test'})
def test_update_11(self):
table1 = self.tables.mytable
values = {
table1.c.name: table1.c.name + 'lala',
table1.c.myid: func.do_stuff(table1.c.myid, literal('hoho'))
}
self.assert_compile(
update(
table1,
(table1.c.myid == func.hoho(4)) & (
table1.c.name == literal('foo') +
table1.c.name +
literal('lala')),
values=values),
'UPDATE mytable '
'SET '
'myid=do_stuff(mytable.myid, :param_1), '
'name=(mytable.name || :name_1) '
'WHERE '
'mytable.myid = hoho(:hoho_1) AND '
'mytable.name = :param_2 || mytable.name || :param_3')
def test_unconsumed_names_kwargs(self):
t = table("t", column("x"), column("y"))
assert_raises_message(
exc.CompileError,
"Unconsumed column names: z",
t.update().values(x=5, z=5).compile,
)
def test_unconsumed_names_values_dict(self):
t = table("t", column("x"), column("y"))
t2 = table("t2", column("q"), column("z"))
assert_raises_message(
exc.CompileError,
"Unconsumed column names: j",
t.update().values(x=5, j=7).values({t2.c.z: 5}).
where(t.c.x == t2.c.q).compile,
)
def test_unconsumed_names_kwargs_w_keys(self):
t = table("t", column("x"), column("y"))
assert_raises_message(
exc.CompileError,
"Unconsumed column names: j",
t.update().values(x=5, j=7).compile,
column_keys=['j']
)
def test_update_ordered_parameters_1(self):
table1 = self.tables.mytable
# Confirm that we can pass values as list value pairs
# note these are ordered *differently* from table.c
values = [
(table1.c.name, table1.c.name + 'lala'),
(table1.c.myid, func.do_stuff(table1.c.myid, literal('hoho'))),
]
self.assert_compile(
update(
table1,
(table1.c.myid == func.hoho(4)) & (
table1.c.name == literal('foo') +
table1.c.name +
literal('lala')),
preserve_parameter_order=True,
values=values),
'UPDATE mytable '
'SET '
'name=(mytable.name || :name_1), '
'myid=do_stuff(mytable.myid, :param_1) '
'WHERE '
'mytable.myid = hoho(:hoho_1) AND '
'mytable.name = :param_2 || mytable.name || :param_3')
def test_update_ordered_parameters_2(self):
table1 = self.tables.mytable
# Confirm that we can pass values as list value pairs
# note these are ordered *differently* from table.c
values = [
(table1.c.name, table1.c.name + 'lala'),
('description', 'some desc'),
(table1.c.myid, func.do_stuff(table1.c.myid, literal('hoho')))
]
self.assert_compile(
update(
table1,
(table1.c.myid == func.hoho(4)) & (
table1.c.name == literal('foo') +
table1.c.name +
literal('lala')),
preserve_parameter_order=True).values(values),
'UPDATE mytable '
'SET '
'name=(mytable.name || :name_1), '
'description=:description, '
'myid=do_stuff(mytable.myid, :param_1) '
'WHERE '
'mytable.myid = hoho(:hoho_1) AND '
'mytable.name = :param_2 || mytable.name || :param_3')
def test_update_ordered_parameters_fire_onupdate(self):
table = self.tables.update_w_default
values = [
(table.c.y, table.c.x + 5),
('x', 10)
]
self.assert_compile(
table.update(preserve_parameter_order=True).values(values),
"UPDATE update_w_default SET ycol=(update_w_default.x + :x_1), "
"x=:x, data=:data"
)
def test_update_ordered_parameters_override_onupdate(self):
table = self.tables.update_w_default
values = [
(table.c.y, table.c.x + 5),
(table.c.data, table.c.x + 10),
('x', 10)
]
self.assert_compile(
table.update(preserve_parameter_order=True).values(values),
"UPDATE update_w_default SET ycol=(update_w_default.x + :x_1), "
"data=(update_w_default.x + :x_2), x=:x"
)
def test_update_preserve_order_reqs_listtups(self):
table1 = self.tables.mytable
testing.assert_raises_message(
ValueError,
"When preserve_parameter_order is True, values\(\) "
"only accepts a list of 2-tuples",
table1.update(preserve_parameter_order=True).values,
{"description": "foo", "name": "bar"}
)
def test_update_ordereddict(self):
table1 = self.tables.mytable
# Confirm that ordered dicts are treated as normal dicts,
# columns sorted in table order
values = util.OrderedDict((
(table1.c.name, table1.c.name + 'lala'),
(table1.c.myid, func.do_stuff(table1.c.myid, literal('hoho')))))
self.assert_compile(
update(
table1,
(table1.c.myid == func.hoho(4)) & (
table1.c.name == literal('foo') +
table1.c.name +
literal('lala')),
values=values),
'UPDATE mytable '
'SET '
'myid=do_stuff(mytable.myid, :param_1), '
'name=(mytable.name || :name_1) '
'WHERE '
'mytable.myid = hoho(:hoho_1) AND '
'mytable.name = :param_2 || mytable.name || :param_3')
def test_where_empty(self):
table1 = self.tables.mytable
self.assert_compile(
table1.update().where(
and_()),
"UPDATE mytable SET myid=:myid, name=:name, description=:description")
self.assert_compile(
table1.update().where(
or_()),
"UPDATE mytable SET myid=:myid, name=:name, description=:description")
def test_prefix_with(self):
table1 = self.tables.mytable
stmt = table1.update().\
prefix_with('A', 'B', dialect='mysql').\
prefix_with('C', 'D')
self.assert_compile(stmt,
'UPDATE C D mytable SET myid=:myid, name=:name, '
'description=:description')
self.assert_compile(
stmt,
'UPDATE A B C D mytable SET myid=%s, name=%s, description=%s',
dialect=mysql.dialect())
def test_update_to_expression(self):
"""test update from an expression.
this logic is triggered currently by a left side that doesn't
have a key. The current supported use case is updating the index
of a PostgreSQL ARRAY type.
"""
table1 = self.tables.mytable
expr = func.foo(table1.c.myid)
eq_(expr.key, None)
self.assert_compile(table1.update().values({expr: 'bar'}),
'UPDATE mytable SET foo(myid)=:param_1')
def test_update_bound_ordering(self):
"""test that bound parameters between the UPDATE and FROM clauses
order correctly in different SQL compilation scenarios.
"""
table1 = self.tables.mytable
table2 = self.tables.myothertable
sel = select([table2]).where(table2.c.otherid == 5).alias()
upd = table1.update().\
where(table1.c.name == sel.c.othername).\
values(name='foo')
dialect = default.DefaultDialect()
dialect.positional = True
self.assert_compile(
upd,
"UPDATE mytable SET name=:name FROM (SELECT "
"myothertable.otherid AS otherid, "
"myothertable.othername AS othername "
"FROM myothertable "
"WHERE myothertable.otherid = :otherid_1) AS anon_1 "
"WHERE mytable.name = anon_1.othername",
checkpositional=('foo', 5),
dialect=dialect
)
self.assert_compile(
upd,
"UPDATE mytable, (SELECT myothertable.otherid AS otherid, "
"myothertable.othername AS othername "
"FROM myothertable "
"WHERE myothertable.otherid = %s) AS anon_1 SET mytable.name=%s "
"WHERE mytable.name = anon_1.othername",
checkpositional=(5, 'foo'),
dialect=mysql.dialect()
)
class UpdateFromCompileTest(_UpdateFromTestBase, fixtures.TablesTest,
AssertsCompiledSQL):
__dialect__ = 'default'
run_create_tables = run_inserts = run_deletes = None
def test_alias_one(self):
table1 = self.tables.mytable
talias1 = table1.alias('t1')
# this case is nonsensical. the UPDATE is entirely
# against the alias, but we name the table-bound column
# in values. The behavior here isn't really defined
self.assert_compile(
update(talias1, talias1.c.myid == 7).
values({table1.c.name: "fred"}),
'UPDATE mytable AS t1 '
'SET name=:name '
'WHERE t1.myid = :myid_1')
def test_alias_two(self):
table1 = self.tables.mytable
talias1 = table1.alias('t1')
# Here, compared to
# test_alias_one(), here we actually have UPDATE..FROM,
# which is causing the "table1.c.name" param to be handled
# as an "extra table", hence we see the full table name rendered.
self.assert_compile(
update(talias1, table1.c.myid == 7).
values({table1.c.name: 'fred'}),
'UPDATE mytable AS t1 '
'SET name=:mytable_name '
'FROM mytable '
'WHERE mytable.myid = :myid_1',
checkparams={'mytable_name': 'fred', 'myid_1': 7},
)
def test_alias_two_mysql(self):
table1 = self.tables.mytable
talias1 = table1.alias('t1')
self.assert_compile(
update(talias1, table1.c.myid == 7).
values({table1.c.name: 'fred'}),
"UPDATE mytable AS t1, mytable SET mytable.name=%s "
"WHERE mytable.myid = %s",
checkparams={'mytable_name': 'fred', 'myid_1': 7},
dialect='mysql')
def test_update_from_multitable_same_name_mysql(self):
users, addresses = self.tables.users, self.tables.addresses
self.assert_compile(
users.update().
values(name='newname').
values({addresses.c.name: "new address"}).
where(users.c.id == addresses.c.user_id),
"UPDATE users, addresses SET addresses.name=%s, "
"users.name=%s WHERE users.id = addresses.user_id",
checkparams={'addresses_name': 'new address', 'name': 'newname'},
dialect='mysql'
)
def test_render_table(self):
users, addresses = self.tables.users, self.tables.addresses
self.assert_compile(
users.update().
values(name='newname').
where(users.c.id == addresses.c.user_id).
where(addresses.c.email_address == 'e1'),
'UPDATE users '
'SET name=:name FROM addresses '
'WHERE '
'users.id = addresses.user_id AND '
'addresses.email_address = :email_address_1',
checkparams={'email_address_1': 'e1', 'name': 'newname'})
def test_render_multi_table(self):
users = self.tables.users
addresses = self.tables.addresses
dingalings = self.tables.dingalings
checkparams = {
'email_address_1': 'e1',
'id_1': 2,
'name': 'newname'
}
self.assert_compile(
users.update().
values(name='newname').
where(users.c.id == addresses.c.user_id).
where(addresses.c.email_address == 'e1').
where(addresses.c.id == dingalings.c.address_id).
where(dingalings.c.id == 2),
'UPDATE users '
'SET name=:name '
'FROM addresses, dingalings '
'WHERE '
'users.id = addresses.user_id AND '
'addresses.email_address = :email_address_1 AND '
'addresses.id = dingalings.address_id AND '
'dingalings.id = :id_1',
checkparams=checkparams)
def test_render_table_mysql(self):
users, addresses = self.tables.users, self.tables.addresses
self.assert_compile(
users.update().
values(name='newname').
where(users.c.id == addresses.c.user_id).
where(addresses.c.email_address == 'e1'),
'UPDATE users, addresses '
'SET users.name=%s '
'WHERE '
'users.id = addresses.user_id AND '
'addresses.email_address = %s',
checkparams={'email_address_1': 'e1', 'name': 'newname'},
dialect=mysql.dialect())
def test_render_subquery(self):
users, addresses = self.tables.users, self.tables.addresses
checkparams = {
'email_address_1': 'e1',
'id_1': 7,
'name': 'newname'
}
cols = [
addresses.c.id,
addresses.c.user_id,
addresses.c.email_address
]
subq = select(cols).where(addresses.c.id == 7).alias()
self.assert_compile(
users.update().
values(name='newname').
where(users.c.id == subq.c.user_id).
where(subq.c.email_address == 'e1'),
'UPDATE users '
'SET name=:name FROM ('
'SELECT '
'addresses.id AS id, '
'addresses.user_id AS user_id, '
'addresses.email_address AS email_address '
'FROM addresses '
'WHERE addresses.id = :id_1'
') AS anon_1 '
'WHERE users.id = anon_1.user_id '
'AND anon_1.email_address = :email_address_1',
checkparams=checkparams)
class UpdateFromRoundTripTest(_UpdateFromTestBase, fixtures.TablesTest):
__backend__ = True
@testing.requires.update_from
def test_exec_two_table(self):
users, addresses = self.tables.users, self.tables.addresses
testing.db.execute(
addresses.update().
values(email_address=users.c.name).
where(users.c.id == addresses.c.user_id).
where(users.c.name == 'ed'))
expected = [
(1, 7, 'x', 'jack@bean.com'),
(2, 8, 'x', 'ed'),
(3, 8, 'x', 'ed'),
(4, 8, 'x', 'ed'),
(5, 9, 'x', 'fred@fred.com')]
self._assert_addresses(addresses, expected)
@testing.requires.update_from
def test_exec_two_table_plus_alias(self):
users, addresses = self.tables.users, self.tables.addresses
a1 = addresses.alias()
testing.db.execute(
addresses.update().
values(email_address=users.c.name).
where(users.c.id == a1.c.user_id).
where(users.c.name == 'ed').
where(a1.c.id == addresses.c.id)
)
expected = [
(1, 7, 'x', 'jack@bean.com'),
(2, 8, 'x', 'ed'),
(3, 8, 'x', 'ed'),
(4, 8, 'x', 'ed'),
(5, 9, 'x', 'fred@fred.com')]
self._assert_addresses(addresses, expected)
@testing.requires.update_from
def test_exec_three_table(self):
users = self.tables.users
addresses = self.tables.addresses
dingalings = self.tables.dingalings
testing.db.execute(
addresses.update().
values(email_address=users.c.name).
where(users.c.id == addresses.c.user_id).
where(users.c.name == 'ed').
where(addresses.c.id == dingalings.c.address_id).
where(dingalings.c.id == 1))
expected = [
(1, 7, 'x', 'jack@bean.com'),
(2, 8, 'x', 'ed'),
(3, 8, 'x', 'ed@bettyboop.com'),
(4, 8, 'x', 'ed@lala.com'),
(5, 9, 'x', 'fred@fred.com')]
self._assert_addresses(addresses, expected)
@testing.only_on('mysql', 'Multi table update')
def test_exec_multitable(self):
users, addresses = self.tables.users, self.tables.addresses
values = {
addresses.c.email_address: 'updated',
users.c.name: 'ed2'
}
testing.db.execute(
addresses.update().
values(values).
where(users.c.id == addresses.c.user_id).
where(users.c.name == 'ed'))
expected = [
(1, 7, 'x', 'jack@bean.com'),
(2, 8, 'x', 'updated'),
(3, 8, 'x', 'updated'),
(4, 8, 'x', 'updated'),
(5, 9, 'x', 'fred@fred.com')]
self._assert_addresses(addresses, expected)
expected = [
(7, 'jack'),
(8, 'ed2'),
(9, 'fred'),
(10, 'chuck')]
self._assert_users(users, expected)
@testing.only_on('mysql', 'Multi table update')
def test_exec_multitable_same_name(self):
users, addresses = self.tables.users, self.tables.addresses
values = {
addresses.c.name: 'ad_ed2',
users.c.name: 'ed2'
}
testing.db.execute(
addresses.update().
values(values).
where(users.c.id == addresses.c.user_id).
where(users.c.name == 'ed'))
expected = [
(1, 7, 'x', 'jack@bean.com'),
(2, 8, 'ad_ed2', 'ed@wood.com'),
(3, 8, 'ad_ed2', 'ed@bettyboop.com'),
(4, 8, 'ad_ed2', 'ed@lala.com'),
(5, 9, 'x', 'fred@fred.com')]
self._assert_addresses(addresses, expected)
expected = [
(7, 'jack'),
(8, 'ed2'),
(9, 'fred'),
(10, 'chuck')]
self._assert_users(users, expected)
def _assert_addresses(self, addresses, expected):
stmt = addresses.select().order_by(addresses.c.id)
eq_(testing.db.execute(stmt).fetchall(), expected)
def _assert_users(self, users, expected):
stmt = users.select().order_by(users.c.id)
eq_(testing.db.execute(stmt).fetchall(), expected)
class UpdateFromMultiTableUpdateDefaultsTest(_UpdateFromTestBase,
fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table('users', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(30), nullable=False),
Column('some_update', String(30), onupdate='im the update'))
Table('addresses', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('user_id', None, ForeignKey('users.id')),
Column('email_address', String(50), nullable=False),
)
Table('foobar', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('user_id', None, ForeignKey('users.id')),
Column('data', String(30)),
Column('some_update', String(30), onupdate='im the other update')
)
@classmethod
def fixtures(cls):
return dict(
users=(
('id', 'name', 'some_update'),
(8, 'ed', 'value'),
(9, 'fred', 'value'),
),
addresses=(
('id', 'user_id', 'email_address'),
(2, 8, 'ed@wood.com'),
(3, 8, 'ed@bettyboop.com'),
(4, 9, 'fred@fred.com')
),
foobar=(
('id', 'user_id', 'data'),
(2, 8, 'd1'),
(3, 8, 'd2'),
(4, 9, 'd3')
)
)
@testing.only_on('mysql', 'Multi table update')
def test_defaults_second_table(self):
users, addresses = self.tables.users, self.tables.addresses
values = {
addresses.c.email_address: 'updated',
users.c.name: 'ed2'
}
ret = testing.db.execute(
addresses.update().
values(values).
where(users.c.id == addresses.c.user_id).
where(users.c.name == 'ed'))
eq_(set(ret.prefetch_cols()), set([users.c.some_update]))
expected = [
(2, 8, 'updated'),
(3, 8, 'updated'),
(4, 9, 'fred@fred.com')]
self._assert_addresses(addresses, expected)
expected = [
(8, 'ed2', 'im the update'),
(9, 'fred', 'value')]
self._assert_users(users, expected)
@testing.only_on('mysql', 'Multi table update')
def test_defaults_second_table_same_name(self):
users, foobar = self.tables.users, self.tables.foobar
values = {
foobar.c.data: foobar.c.data + 'a',
users.c.name: 'ed2'
}
ret = testing.db.execute(
users.update().
values(values).
where(users.c.id == foobar.c.user_id).
where(users.c.name == 'ed'))
eq_(
set(ret.prefetch_cols()),
set([users.c.some_update, foobar.c.some_update])
)
expected = [
(2, 8, 'd1a', 'im the other update'),
(3, 8, 'd2a', 'im the other update'),
(4, 9, 'd3', None)]
self._assert_foobar(foobar, expected)
expected = [
(8, 'ed2', 'im the update'),
(9, 'fred', 'value')]
self._assert_users(users, expected)
@testing.only_on('mysql', 'Multi table update')
def test_no_defaults_second_table(self):
users, addresses = self.tables.users, self.tables.addresses
ret = testing.db.execute(
addresses.update().
values({'email_address': users.c.name}).
where(users.c.id == addresses.c.user_id).
where(users.c.name == 'ed'))
eq_(ret.prefetch_cols(), [])
expected = [
(2, 8, 'ed'),
(3, 8, 'ed'),
(4, 9, 'fred@fred.com')]
self._assert_addresses(addresses, expected)
# users table not actually updated, so no onupdate
expected = [
(8, 'ed', 'value'),
(9, 'fred', 'value')]
self._assert_users(users, expected)
def _assert_foobar(self, foobar, expected):
stmt = foobar.select().order_by(foobar.c.id)
eq_(testing.db.execute(stmt).fetchall(), expected)
def _assert_addresses(self, addresses, expected):
stmt = addresses.select().order_by(addresses.c.id)
eq_(testing.db.execute(stmt).fetchall(), expected)
def _assert_users(self, users, expected):
stmt = users.select().order_by(users.c.id)
eq_(testing.db.execute(stmt).fetchall(), expected)
| |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine, func, between
from sqlalchemy.engine.url import URL
from datetime import datetime
from geoalchemy2 import Geometry
db = SQLAlchemy()
##############################################################################
# Model definitions
class User(db.Model):
""" User information. """
__tablename__ = 'users'
user_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
firstname = db.Column(db.String(70), nullable=False)
lastname = db.Column(db.String(70), nullable=False)
email = db.Column(db.String(255), nullable=False, unique=True)
password = db.Column(db.String(100), nullable=False)
def __repr__(self):
""" Shows information about the user. """
return '<User id=%s firstname=%s lastname=%s email=%s>' % (self.user_id, self.firstname, self.lastname, self.email)
class UnitDetails(db.Model):
""" Details of the unit for sale or up for rent. """
__tablename__ = 'unitdetails'
detail_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
neighborhood = db.Column(db.String(150), nullable=False)
bedrooms = db.Column(db.Float, nullable=True)
bathrooms = db.Column(db.Float, nullable=True)
sqft = db.Column(db.Integer, nullable=True)
latitude = db.Column(db.Float, nullable=False)
longitude = db.Column(db.Float, nullable=False)
latlng = db.Column(Geometry(geometry_type='POINT'), nullable=False)
def __repr__(self):
""" Shows unit details of the unit for rent or sale. """
return '<UnitDetails id=%s neighborhood=%s latitude=%s longitude=%s latlng=%s>' % (self.detail_id, self.neighborhood, self.latitude, self.longitude, self.latlng)
class Listing(db.Model):
""" Unit sale listings. """
__tablename__ = 'listings'
zpid = db.Column(db.Integer, primary_key=True)
price = db.Column(db.Integer, nullable=False)
hoa = db.Column(db.Integer, nullable=True)
street = db.Column(db.String(255), nullable=False)
city = db.Column(db.String(100), nullable=False)
state = db.Column(db.String(100), nullable=False)
zipcode = db.Column(db.Integer, nullable=False)
photo_url = db.Column(db.String(2083), nullable=True)
detail_id = db.Column(db.Integer, db.ForeignKey('unitdetails.detail_id'), nullable=False)
unitdetails = db.relationship('UnitDetails', backref='listings')
def __repr__(self):
""" Shows information about the unit for sale. """
return '<Listing id=%s price=%s detail_id=%s>' % (self.zpid, self.price, self.detail_id)
class Rental(db.Model):
""" Unit rental listings. """
__tablename__ = 'rentals'
cl_id = db.Column(db.String(150), primary_key=True)
price = db.Column(db.Integer, nullable=False)
date_posted = db.Column(db.TIMESTAMP(timezone=True), nullable=False)
detail_id = db.Column(db.Integer, db.ForeignKey('unitdetails.detail_id'), nullable=False)
unitdetails = db.relationship('UnitDetails', backref='rentals')
def __repr__(self):
""" Shows information about the unit for rent. """
return '<Rental id=%s price=%s data_posted=%s detail_id=%s>' % (self.cl_id, self.price, self.date_posted, self.detail_id)
class Favorite(db.Model):
""" Favorite homes. """
__tablename__ = 'favorites'
favorite_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.user_id'), nullable=False)
zpid = db.Column(db.Integer, db.ForeignKey('listings.zpid'), nullable=False)
date_saved = db.Column(db.DateTime, default=datetime.utcnow, nullable=False)
users = db.relationship('User', backref='favorites')
listings = db.relationship('Listing', backref='favorites')
def __repr__(self):
""" Shows the user's favorite homes. """
return '<Favorite id=%s user_id=%s zpid=%s date_saved=%s>' % (self.favorite_id, self.user_id, self.zpid, self.date_saved)
##############################################################################
# Sample data
def sample_data():
""" Create sample data for test database. """
# In case this is run more than once, empty out existing data
User.query.delete()
Favorite.query.delete()
UnitDetails.query.delete()
Listing.query.delete()
Rental.query.delete()
# Add sample users
fiyero = User(firstname='fiyero', lastname='tigelaar', email='fiyerotigelaar@example.com', password='$2b$12$zDfkTi4MZeyuxkzHF6XbhOlfTJ4uy31cRVq6IZyPQ950Qb6KLp1AC')
wizard = User(firstname='wizard', lastname='ofoz', email='wizardofoz@example.com', password='$2b$12$dQd.R13zS/PZKRBY/IH6guPusbAfNStx1pk.yQZ3FSyBpwD6bLoHK')
elphaba = User(firstname='elphaba', lastname='thropp', email='elphabathropp@example.com', password='$2b$12$7HoZkpd6xInvo0YO2td6VOL/E138mvzlXeA9Xan8daapqRoxfmScC')
glinda = User(firstname='glinda', lastname='good', email='glindagood@example.com', password='$2b$12$FPHmgRWWGFF7B/OOoTipbeLnqLY0A2nU/6Kbtcn012ZEFr6XRvC8C')
# Add sample unit details
detail1 = UnitDetails(neighborhood='inner sunset / UCSF', bedrooms=2, bathrooms=2.5, sqft=2500, latitude=37.7651614, longitude=-122.4601482, latlng='POINT(37.7651614 -122.4601482)')
detail2 = UnitDetails(neighborhood='tenderloin', bedrooms=0, bathrooms=1, sqft=390, latitude=37.78526, longitude=-122.411953, latlng='POINT(37.78526 -122.411953)')
detail3 = UnitDetails(neighborhood='pacific heights', bedrooms=1, bathrooms=1, sqft=650, latitude=37.7958617, longitude=-122.3945241, latlng='POINT(37.7958617 -122.3945241)')
detail4 = UnitDetails(neighborhood='noe valley', bedrooms=4, bathrooms=3, sqft=1740, latitude=37.7503705, longitude=-122.436254, latlng='POINT(37.7503705 -122.436254)')
detail5 = UnitDetails(neighborhood='lower nob hill', bedrooms=2, bathrooms=1.5, sqft=1190, latitude=37.7872375, longitude=-122.4139991, latlng='POINT(37.7872375 -122.4139991)')
detail6 = UnitDetails(neighborhood='russian hill', bedrooms=2, bathrooms=1, sqft=1400, latitude=37.7960949, longitude=-122.4133919, latlng='POINT(37.7960949 -122.4133919)')
detail7 = UnitDetails(neighborhood='pacific heights', bedrooms=1, bathrooms=1, sqft=760, latitude=37.789962, longitude=-122.4256378, latlng='POINT(37.789962 -122.4256378)')
detail8 = UnitDetails(neighborhood='inner sunset / UCSF', bedrooms=4, bathrooms=1.5, sqft=1940, latitude=37.7639145, longitude=-122.4695433, latlng='POINT(37.7639145 -122.4695433)')
detail9 = UnitDetails(neighborhood='lower nob hill', bedrooms=5, bathrooms=3, sqft=2180, latitude=37.7912167, longitude=-122.4157727, latlng='POINT(37.7912167 -122.4157727)')
detail10 = UnitDetails(neighborhood='downtown / civic / van ness', bedrooms=2, bathrooms=2, sqft=2500, latitude=37.7815058, longitude=-122.4204841, latlng='POINT(37.7815058 -122.4204841)')
# Add sample listings
listing1 = Listing(zpid=19273591, price=1105500, street='123 Yellow Brick Rd', city='Emerald City', state='EA', zipcode=10078, unitdetails=detail1)
listing2 = Listing(zpid=98759925, price=550900, street='99 Yellow Brick Rd', city='Emerald City', state='WE', zipcode=35929, unitdetails=detail2)
listing3 = Listing(zpid=98723598, price=664000, hoa=500, street='85 Yellow Brick Rd', city='Emerald City', state='NO', zipcode=20585, unitdetails=detail3)
listing4 = Listing(zpid=28938876, price=2540800, hoa=275, street='26 Yellow Brick Rd', city='Emerald City', state='SO', zipcode=49852, unitdetails=detail4)
listing5 = Listing(zpid=38478998, price=980430, hoa=350, street='34 Yellow Brick Rd', city='Emerald City', state='EA', zipcode=35990, unitdetails=detail5)
# Add sample rentals
rental1 = Rental(cl_id=6007117641, price=2890, date_posted='2017-02-17 07:27:17+00', unitdetails=detail6)
rental2 = Rental(cl_id=6007117642, price=1885, date_posted='2017-02-17 07:03:38+00', unitdetails=detail7)
rental3 = Rental(cl_id=6007117643, price=5460, date_posted='2017-02-14 22:30:45+00', unitdetails=detail8)
rental4 = Rental(cl_id=6007117644, price=6700, date_posted='2017-02-15 04:58:04+00', unitdetails=detail9)
rental5 = Rental(cl_id=6007117645, price=3155, date_posted='2017-02-18 00:19:55+00', unitdetails=detail10)
# Add sample favorites
favorite1 = Favorite(user_id=fiyero, zpid=listing3, date_saved='2017-01-18 21:11:35.537000')
favorite2 = Favorite(user_id=elphaba, zpid=listing3, date_saved='2017-02-01 17:51:43.235000')
favorite3 = Favorite(user_id=elphaba, zpid=listing1, date_saved='2017-02-10 11:08:51.067000')
favorite4 = Favorite(user_id=elphaba, zpid=listing5, date_saved='2017-02-13 12:36:12.473000')
favorite5 = Favorite(user_id=glinda, zpid=listing3, date_saved='2017-02-16 14:27:36.182000')
# Add and commit to test database
db.session.add_all([fiyero, wizard, elphaba, glinda,
detail1, detail2, detail3, detail4, detail5,
detail6, detail7, detail8, detail9, detail10,
# listing1, listing2, listing3, listing4, listing5,
# rental1, rental2, rental3, rental4, rental5,
# favorite1, favorite2, favorite3, favorite4, favorite5
])
db.session.commit()
##############################################################################
# Helper functions
def connect_to_db_scrapy():
""" Connects the database to Scrapy via a session. """
engine = create_engine('postgres:///investable', echo=False, encoding='utf8')
Session = sessionmaker(bind=engine)
session = Session()
return session
def connect_to_db_flask(app, db_uri=None):
"""Connect the database to Flask app."""
# Configure to use PostgreSQL database
app.config['SQLALCHEMY_DATABASE_URI'] = db_uri or 'postgres:///investable'
app.config['SQLALCHEMY_ECHO'] = False
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.app = app
db.init_app(app)
if __name__ == "__main__":
# Import app from server if opening from this file
from server import app
connect_to_db_flask(app)
print "Connected to DB."
# In case tables haven't been created, create them
db.create_all()
| |
'''
The MIT License (MIT)
Copyright (c) 2013 Dave P.
'''
import sys
VER = sys.version_info[0]
if VER >= 3:
import socketserver
from http.server import BaseHTTPRequestHandler
from io import StringIO, BytesIO
else:
import SocketServer
from BaseHTTPServer import BaseHTTPRequestHandler
from StringIO import StringIO
import hashlib
import base64
import socket
import struct
import ssl
import errno
import codecs
from collections import deque
from select import select
__all__ = ['WebSocket',
'SimpleWebSocketServer',
'SimpleSSLWebSocketServer']
def _check_unicode(val):
if VER >= 3:
return isinstance(val, str)
else:
return isinstance(val, unicode)
class HTTPRequest(BaseHTTPRequestHandler):
def __init__(self, request_text):
if VER >= 3:
self.rfile = BytesIO(request_text)
else:
self.rfile = StringIO(request_text)
self.raw_requestline = self.rfile.readline()
self.error_code = self.error_message = None
self.parse_request()
_VALID_STATUS_CODES = [1000, 1001, 1002, 1003, 1007, 1008,
1009, 1010, 1011, 3000, 3999, 4000, 4999]
HANDSHAKE_STR = (
"HTTP/1.1 101 Switching Protocols\r\n"
"Upgrade: WebSocket\r\n"
"Connection: Upgrade\r\n"
"Sec-WebSocket-Accept: %(acceptstr)s\r\n\r\n"
)
GUID_STR = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
STREAM = 0x0
TEXT = 0x1
BINARY = 0x2
CLOSE = 0x8
PING = 0x9
PONG = 0xA
HEADERB1 = 1
HEADERB2 = 3
LENGTHSHORT = 4
LENGTHLONG = 5
MASK = 6
PAYLOAD = 7
MAXHEADER = 65536
MAXPAYLOAD = 33554432
class WebSocket(object):
def __init__(self, server, sock, address):
self.server = server
self.client = sock
self.address = address
self.handshaked = False
self.headerbuffer = bytearray()
self.headertoread = 2048
self.fin = 0
self.data = bytearray()
self.opcode = 0
self.hasmask = 0
self.maskarray = None
self.length = 0
self.lengtharray = None
self.index = 0
self.request = None
self.usingssl = False
self.frag_start = False
self.frag_type = BINARY
self.frag_buffer = None
self.frag_decoder = codecs.getincrementaldecoder('utf-8')(errors='strict')
self.closed = False
self.sendq = deque()
self.state = HEADERB1
# restrict the size of header and payload for security reasons
self.maxheader = MAXHEADER
self.maxpayload = MAXPAYLOAD
def handleMessage(self):
"""
Called when websocket frame is received.
To access the frame data call self.data.
If the frame is Text then self.data is a unicode object.
If the frame is Binary then self.data is a bytearray object.
"""
pass
def handleConnected(self):
"""
Called when a websocket client connects to the server.
"""
pass
def handleClose(self):
"""
Called when a websocket server gets a Close frame from a client.
"""
pass
def _handlePacket(self):
if self.opcode == CLOSE:
pass
elif self.opcode == STREAM:
pass
elif self.opcode == TEXT:
pass
elif self.opcode == BINARY:
pass
elif self.opcode == PONG or self.opcode == PING:
if len(self.data) > 125:
raise Exception('control frame length can not be > 125')
else:
# unknown or reserved opcode so just close
raise Exception('unknown opcode')
if self.opcode == CLOSE:
status = 1000
reason = u''
length = len(self.data)
if length == 0:
pass
elif length >= 2:
status = struct.unpack_from('!H', self.data[:2])[0]
reason = self.data[2:]
if status not in _VALID_STATUS_CODES:
status = 1002
if len(reason) > 0:
try:
reason = reason.decode('utf8', errors='strict')
except:
status = 1002
else:
status = 1002
self.close(status, reason)
return
elif self.fin == 0:
if self.opcode != STREAM:
if self.opcode == PING or self.opcode == PONG:
raise Exception('control messages can not be fragmented')
self.frag_type = self.opcode
self.frag_start = True
self.frag_decoder.reset()
if self.frag_type == TEXT:
self.frag_buffer = []
utf_str = self.frag_decoder.decode(self.data, final = False)
if utf_str:
self.frag_buffer.append(utf_str)
else:
self.frag_buffer = bytearray()
self.frag_buffer.extend(self.data)
else:
if self.frag_start is False:
raise Exception('fragmentation protocol error')
if self.frag_type == TEXT:
utf_str = self.frag_decoder.decode(self.data, final = False)
if utf_str:
self.frag_buffer.append(utf_str)
else:
self.frag_buffer.extend(self.data)
else:
if self.opcode == STREAM:
if self.frag_start is False:
raise Exception('fragmentation protocol error')
if self.frag_type == TEXT:
utf_str = self.frag_decoder.decode(self.data, final = True)
self.frag_buffer.append(utf_str)
self.data = u''.join(self.frag_buffer)
else:
self.frag_buffer.extend(self.data)
self.data = self.frag_buffer
self.handleMessage()
self.frag_decoder.reset()
self.frag_type = BINARY
self.frag_start = False
self.frag_buffer = None
elif self.opcode == PING:
self._sendMessage(False, PONG, self.data)
elif self.opcode == PONG:
pass
else:
if self.frag_start is True:
raise Exception('fragmentation protocol error')
if self.opcode == TEXT:
try:
self.data = self.data.decode('utf8', errors='strict')
except Exception as exp:
raise Exception('invalid utf-8 payload')
self.handleMessage()
def _handleData(self):
# do the HTTP header and handshake
if self.handshaked is False:
data = self.client.recv(self.headertoread)
if not data:
raise Exception('remote socket closed')
else:
# accumulate
self.headerbuffer.extend(data)
if len(self.headerbuffer) >= self.maxheader:
raise Exception('header exceeded allowable size')
# indicates end of HTTP header
if b'\r\n\r\n' in self.headerbuffer:
self.request = HTTPRequest(self.headerbuffer)
# handshake rfc 6455
try:
key = self.request.headers['Sec-WebSocket-Key']
k = key.encode('ascii') + GUID_STR.encode('ascii')
k_s = base64.b64encode(hashlib.sha1(k).digest()).decode('ascii')
hStr = HANDSHAKE_STR % {'acceptstr': k_s}
self.sendq.append((BINARY, hStr.encode('ascii')))
self.handshaked = True
self.handleConnected()
except Exception as e:
raise Exception('handshake failed: %s', str(e))
# else do normal data
else:
data = self.client.recv(16384)
if not data:
raise Exception("remote socket closed")
if VER >= 3:
for d in data:
self._parseMessage(d)
else:
for d in data:
self._parseMessage(ord(d))
def close(self, status = 1000, reason = u''):
"""
Send Close frame to the client. The underlying socket is only closed
when the client acknowledges the Close frame.
status is the closing identifier.
reason is the reason for the close.
"""
try:
if self.closed is False:
close_msg = bytearray()
close_msg.extend(struct.pack("!H", status))
if _check_unicode(reason):
close_msg.extend(reason.encode('utf-8'))
else:
close_msg.extend(reason)
self._sendMessage(False, CLOSE, close_msg)
finally:
self.closed = True
def _sendBuffer(self, buff, send_all = False):
size = len(buff)
tosend = size
already_sent = 0
while tosend > 0:
try:
# i should be able to send a bytearray
sent = self.client.send(buff[already_sent:])
if sent == 0:
raise RuntimeError('socket connection broken')
already_sent += sent
tosend -= sent
except socket.error as e:
# if we have full buffers then wait for them to drain and try again
if e.errno in [errno.EAGAIN, errno.EWOULDBLOCK]:
if send_all:
continue
return buff[already_sent:]
else:
raise e
return None
def sendFragmentStart(self, data):
"""
Send the start of a data fragment stream to a websocket client.
Subsequent data should be sent using sendFragment().
A fragment stream is completed when sendFragmentEnd() is called.
If data is a unicode object then the frame is sent as Text.
If the data is a bytearray object then the frame is sent as Binary.
"""
opcode = BINARY
if _check_unicode(data):
opcode = TEXT
self._sendMessage(True, opcode, data)
def sendFragment(self, data):
"""
see sendFragmentStart()
If data is a unicode object then the frame is sent as Text.
If the data is a bytearray object then the frame is sent as Binary.
"""
self._sendMessage(True, STREAM, data)
def sendFragmentEnd(self, data):
"""
see sendFragmentEnd()
If data is a unicode object then the frame is sent as Text.
If the data is a bytearray object then the frame is sent as Binary.
"""
self._sendMessage(False, STREAM, data)
def sendMessage(self, data):
"""
Send websocket data frame to the client.
If data is a unicode object then the frame is sent as Text.
If the data is a bytearray object then the frame is sent as Binary.
"""
opcode = BINARY
if _check_unicode(data):
opcode = TEXT
self._sendMessage(False, opcode, data)
def _sendMessage(self, fin, opcode, data):
payload = bytearray()
b1 = 0
b2 = 0
if fin is False:
b1 |= 0x80
b1 |= opcode
if _check_unicode(data):
data = data.encode('utf-8')
length = len(data)
payload.append(b1)
if length <= 125:
b2 |= length
payload.append(b2)
elif length >= 126 and length <= 65535:
b2 |= 126
payload.append(b2)
payload.extend(struct.pack("!H", length))
else:
b2 |= 127
payload.append(b2)
payload.extend(struct.pack("!Q", length))
if length > 0:
payload.extend(data)
self.sendq.append((opcode, payload))
def _parseMessage(self, byte):
# read in the header
if self.state == HEADERB1:
self.fin = byte & 0x80
self.opcode = byte & 0x0F
self.state = HEADERB2
self.index = 0
self.length = 0
self.lengtharray = bytearray()
self.data = bytearray()
rsv = byte & 0x70
if rsv != 0:
raise Exception('RSV bit must be 0')
elif self.state == HEADERB2:
mask = byte & 0x80
length = byte & 0x7F
if self.opcode == PING and length > 125:
raise Exception('ping packet is too large')
if mask == 128:
self.hasmask = True
else:
self.hasmask = False
if length <= 125:
self.length = length
# if we have a mask we must read it
if self.hasmask is True:
self.maskarray = bytearray()
self.state = MASK
else:
# if there is no mask and no payload we are done
if self.length <= 0:
try:
self._handlePacket()
finally:
self.state = self.HEADERB1
self.data = bytearray()
# we have no mask and some payload
else:
#self.index = 0
self.data = bytearray()
self.state = PAYLOAD
elif length == 126:
self.lengtharray = bytearray()
self.state = LENGTHSHORT
elif length == 127:
self.lengtharray = bytearray()
self.state = LENGTHLONG
elif self.state == LENGTHSHORT:
self.lengtharray.append(byte)
if len(self.lengtharray) > 2:
raise Exception('short length exceeded allowable size')
if len(self.lengtharray) == 2:
self.length = struct.unpack_from('!H', self.lengtharray)[0]
if self.hasmask is True:
self.maskarray = bytearray()
self.state = MASK
else:
# if there is no mask and no payload we are done
if self.length <= 0:
try:
self._handlePacket()
finally:
self.state = HEADERB1
self.data = bytearray()
# we have no mask and some payload
else:
#self.index = 0
self.data = bytearray()
self.state = PAYLOAD
elif self.state == LENGTHLONG:
self.lengtharray.append(byte)
if len(self.lengtharray) > 8:
raise Exception('long length exceeded allowable size')
if len(self.lengtharray) == 8:
self.length = struct.unpack_from('!Q', self.lengtharray)[0]
if self.hasmask is True:
self.maskarray = bytearray()
self.state = MASK
else:
# if there is no mask and no payload we are done
if self.length <= 0:
try:
self._handlePacket()
finally:
self.state = HEADERB1
self.data = bytearray()
# we have no mask and some payload
else:
#self.index = 0
self.data = bytearray()
self.state = PAYLOAD
# MASK STATE
elif self.state == MASK:
self.maskarray.append(byte)
if len(self.maskarray) > 4:
raise Exception('mask exceeded allowable size')
if len(self.maskarray) == 4:
# if there is no mask and no payload we are done
if self.length <= 0:
try:
self._handlePacket()
finally:
self.state = HEADERB1
self.data = bytearray()
# we have no mask and some payload
else:
#self.index = 0
self.data = bytearray()
self.state = PAYLOAD
# PAYLOAD STATE
elif self.state == PAYLOAD:
if self.hasmask is True:
self.data.append( byte ^ self.maskarray[self.index % 4] )
else:
self.data.append( byte )
# if length exceeds allowable size then we except and remove the connection
if len(self.data) >= self.maxpayload:
raise Exception('payload exceeded allowable size')
# check if we have processed length bytes; if so we are done
if (self.index+1) == self.length:
try:
self._handlePacket()
finally:
#self.index = 0
self.state = HEADERB1
self.data = bytearray()
else:
self.index += 1
class SimpleWebSocketServer(object):
def __init__(self, host, port, websocketclass, selectInterval = 0.1):
self.websocketclass = websocketclass
self.serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.serversocket.bind((host, port))
self.serversocket.listen(5)
self.selectInterval = selectInterval
self.connections = {}
self.listeners = [self.serversocket]
self.keep_running = True
def _decorateSocket(self, sock):
return sock
def _constructWebSocket(self, sock, address):
return self.websocketclass(self, sock, address)
def close(self):
self.serversocket.close()
for desc, conn in self.connections.items():
conn.close()
conn.handleClose()
def stop(self):
self.keep_running=False
def serveforever(self):
while self.keep_running:
writers = []
for fileno in self.listeners:
if fileno == self.serversocket:
continue
client = self.connections[fileno]
if client.sendq:
writers.append(fileno)
if self.selectInterval:
rList, wList, xList = select(self.listeners, writers, self.listeners, self.selectInterval)
else:
rList, wList, xList = select(self.listeners, writers, self.listeners)
for ready in wList:
client = self.connections[ready]
try:
while client.sendq:
opcode, payload = client.sendq.popleft()
remaining = client._sendBuffer(payload)
if remaining is not None:
client.sendq.appendleft((opcode, remaining))
break
else:
if opcode == CLOSE:
raise Exception('received client close')
except Exception as n:
client.client.close()
client.handleClose()
del self.connections[ready]
self.listeners.remove(ready)
for ready in rList:
if ready == self.serversocket:
try:
sock, address = self.serversocket.accept()
newsock = self._decorateSocket(sock)
newsock.setblocking(0)
fileno = newsock.fileno()
self.connections[fileno] = self._constructWebSocket(newsock, address)
self.listeners.append(fileno)
except Exception as n:
if sock is not None:
sock.close()
else:
if ready not in self.connections:
continue
client = self.connections[ready]
try:
client._handleData()
except Exception as n:
client.client.close()
client.handleClose()
del self.connections[ready]
self.listeners.remove(ready)
for failed in xList:
if failed == self.serversocket:
self.close()
raise Exception('server socket failed')
else:
if failed not in self.connections:
continue
client = self.connections[failed]
client.client.close()
client.handleClose()
del self.connections[failed]
self.listeners.remove(failed)
class SimpleSSLWebSocketServer(SimpleWebSocketServer):
def __init__(self, host, port, websocketclass, certfile,
keyfile, version = ssl.PROTOCOL_TLSv1, selectInterval = 0.1):
SimpleWebSocketServer.__init__(self, host, port,
websocketclass, selectInterval)
self.context = ssl.SSLContext(version)
self.context.load_cert_chain(certfile, keyfile)
def close(self):
super(SimpleSSLWebSocketServer, self).close()
def _decorateSocket(self, sock):
sslsock = self.context.wrap_socket(sock, server_side=True)
return sslsock
def _constructWebSocket(self, sock, address):
ws = self.websocketclass(self, sock, address)
ws.usingssl = True
return ws
def serveforever(self):
super(SimpleSSLWebSocketServer, self).serveforever()
| |
# GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides functions for iterating and finding windows/elements"""
from __future__ import unicode_literals
import re
import ctypes
import six
from . import win32functions
from . import win32structures
from . import findbestmatch
from . import controls
from .backend import registry
# TODO: we should filter out invalid elements before returning
#=========================================================================
class WindowNotFoundError(Exception):
"""No window could be found"""
pass
#=========================================================================
class WindowAmbiguousError(Exception):
"""There was more then one window that matched"""
pass
#=========================================================================
class ElementNotFoundError(Exception):
"""No element could be found"""
pass
#=========================================================================
class ElementAmbiguousError(Exception):
"""There was more then one element that matched"""
pass
#=========================================================================
def find_element(**kwargs):
"""
Call find_elements and ensure that only one element is returned
Calls find_elements with exactly the same arguments as it is called with
so please see :py:func:`find_elements` for the full parameters description.
"""
elements = find_elements(**kwargs)
if not elements:
raise ElementNotFoundError(kwargs)
if len(elements) > 1:
exception = ElementAmbiguousError(
"There are {0} elements that match the criteria {1}".format(
len(elements),
six.text_type(kwargs),
)
)
exception.elements = elements
raise exception
return elements[0]
#=========================================================================
def find_window(**kwargs):
"""
Call find_elements and ensure that only handle of one element is returned
Calls find_elements with exactly the same arguments as it is called with
so please see :py:func:`find_elements` for the full parameters description.
"""
try:
kwargs['backend'] = 'win32'
element = find_element(**kwargs)
return element.handle
except ElementNotFoundError:
raise WindowNotFoundError
except ElementAmbiguousError:
raise WindowAmbiguousError
#=========================================================================
def find_elements(class_name=None,
class_name_re=None,
parent=None,
process=None,
title=None,
title_re=None,
top_level_only=True,
visible_only=True,
enabled_only=False,
best_match=None,
handle=None,
ctrl_index=None,
found_index=None,
predicate_func=None,
active_only=False,
control_id=None,
control_type=None,
auto_id=None,
framework_id=None,
backend=None,
depth=None
):
"""
Find elements based on criteria passed in
WARNING! Direct usage of this function is not recommended! It's a very low level API.
Better use Application and WindowSpecification objects described in the
Getting Started Guide.
Possible values are:
* **class_name** Elements with this window class
* **class_name_re** Elements whose class matches this regular expression
* **parent** Elements that are children of this
* **process** Elements running in this process
* **title** Elements with this text
* **title_re** Elements whose text matches this regular expression
* **top_level_only** Top level elements only (default=**True**)
* **visible_only** Visible elements only (default=**True**)
* **enabled_only** Enabled elements only (default=False)
* **best_match** Elements with a title similar to this
* **handle** The handle of the element to return
* **ctrl_index** The index of the child element to return
* **found_index** The index of the filtered out child element to return
* **predicate_func** A user provided hook for a custom element validation
* **active_only** Active elements only (default=False)
* **control_id** Elements with this control id
* **control_type** Elements with this control type (string; for UIAutomation elements)
* **auto_id** Elements with this automation id (for UIAutomation elements)
* **framework_id** Elements with this framework id (for UIAutomation elements)
* **backend** Back-end name to use while searching (default=None means current active backend)
"""
if backend is None:
backend = registry.active_backend.name
backend_obj = registry.backends[backend]
# allow a handle to be passed in
# if it is present - just return it
if handle is not None:
return [backend_obj.element_info_class(handle), ]
if isinstance(parent, backend_obj.generic_wrapper_class):
parent = parent.element_info
elif isinstance(parent, six.integer_types):
# check if parent is a handle of element (in case of searching native controls)
parent = backend_obj.element_info_class(parent)
if top_level_only:
# find the top level elements
element = backend_obj.element_info_class()
# vryabov: we don't use title=title below, because it fixes issue 779:
# https://github.com/pywinauto/pywinauto/issues/779
elements = element.children(process=process,
class_name=class_name,
control_type=control_type,
cache_enable=True)
# if we have been given a parent
if parent:
elements = [elem for elem in elements if elem.parent == parent]
# looking for child elements
else:
# if not given a parent look for all children of the desktop
if not parent:
parent = backend_obj.element_info_class()
# look for ALL children of that parent
# vryabov: we don't use title=title below, because it fixes issue 779:
# https://github.com/pywinauto/pywinauto/issues/779
elements = parent.descendants(class_name=class_name,
control_type=control_type,
cache_enable=True,
depth=depth)
# if the ctrl_index has been specified then just return
# that control
if ctrl_index is not None:
return [elements[ctrl_index], ]
# early stop
if not elements:
if found_index is not None:
if found_index > 0:
raise ElementNotFoundError("found_index is specified as {0}, but no windows found".format(
found_index))
return elements
if framework_id is not None and elements:
elements = [elem for elem in elements if elem.framework_id == framework_id]
if control_id is not None and elements:
elements = [elem for elem in elements if elem.control_id == control_id]
if active_only:
# TODO: re-write to use ElementInfo interface
gui_info = win32structures.GUITHREADINFO()
gui_info.cbSize = ctypes.sizeof(gui_info)
# get all the active elements (not just the specified process)
ret = win32functions.GetGUIThreadInfo(0, ctypes.byref(gui_info))
if not ret:
raise ctypes.WinError()
found_active = False
for elem in elements:
if elem.handle == gui_info.hwndActive:
found_active = True
elements = [elem, ]
break
if not found_active:
elements = []
if class_name is not None:
elements = [elem for elem in elements if elem.class_name == class_name]
if class_name_re is not None:
class_name_regex = re.compile(class_name_re)
elements = [elem for elem in elements if class_name_regex.match(elem.class_name)]
if process is not None:
elements = [elem for elem in elements if elem.process_id == process]
if auto_id is not None and elements:
elements = [elem for elem in elements if elem.automation_id == auto_id]
if title is not None:
# TODO: some magic is happenning here
if elements:
elements[0].rich_text
elements = [elem for elem in elements if elem.rich_text == title]
elif title_re is not None:
title_regex = re.compile(title_re)
def _title_match(w):
"""Match a window title to the regexp"""
t = w.rich_text
if t is not None:
return title_regex.match(t)
return False
elements = [elem for elem in elements if _title_match(elem)]
if visible_only:
elements = [elem for elem in elements if elem.visible]
if enabled_only:
elements = [elem for elem in elements if elem.enabled]
if best_match is not None:
# Build a list of wrapped controls.
# Speed up the loop by setting up local pointers
wrapped_elems = []
add_to_wrp_elems = wrapped_elems.append
wrp_cls = backend_obj.generic_wrapper_class
for elem in elements:
try:
add_to_wrp_elems(wrp_cls(elem))
except (controls.InvalidWindowHandle,
controls.InvalidElement):
# skip invalid handles - they have dissapeared
# since the list of elements was retrieved
continue
elements = findbestmatch.find_best_control_matches(best_match, wrapped_elems)
# convert found elements back to ElementInfo
backup_elements = elements[:]
elements = []
for elem in backup_elements:
if hasattr(elem, "element_info"):
elem.element_info.set_cache_strategy(cached=False)
elements.append(elem.element_info)
else:
elements.append(backend_obj.element_info_class(elem.handle))
else:
for elem in elements:
elem.set_cache_strategy(cached=False)
if predicate_func is not None:
elements = [elem for elem in elements if predicate_func(elem)]
# found_index is the last criterion to filter results
if found_index is not None:
if found_index < len(elements):
elements = elements[found_index:found_index + 1]
else:
raise ElementNotFoundError("found_index is specified as {0}, but {1} window/s found".format(
found_index, len(elements)))
return elements
#=========================================================================
def find_windows(**kwargs):
"""
Find elements based on criteria passed in and return list of their handles
Calls find_elements with exactly the same arguments as it is called with
so please see :py:func:`find_elements` for the full parameters description.
"""
try:
kwargs['backend'] = 'win32'
elements = find_elements(**kwargs)
return [elem.handle for elem in elements]
except ElementNotFoundError:
raise WindowNotFoundError
#=========================================================================
def enum_windows():
"""Return a list of handles of all the top level windows"""
windows = []
# The callback function that will be called for each HWND
# all we do is append the wrapped handle
def enum_window_proc(hwnd, lparam):
"""Called for each window - adds handles to a list"""
windows.append(hwnd)
return True
# define the type of the child procedure
enum_win_proc_t = ctypes.WINFUNCTYPE(
ctypes.c_int, ctypes.c_long, ctypes.c_long)
# 'construct' the callback with our function
proc = enum_win_proc_t(enum_window_proc)
# loop over all the children (callback called for each)
win32functions.EnumWindows(proc, 0)
# return the collected wrapped windows
return windows
| |
#
# Copyright 2018 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from collections import OrderedDict
from functools import partial
import empyrical as ep
import numpy as np
import pandas as pd
import scipy as sp
import scipy.stats as stats
from sklearn import linear_model
from .deprecate import deprecated
from .interesting_periods import PERIODS
from .txn import get_turnover
from .utils import APPROX_BDAYS_PER_MONTH, APPROX_BDAYS_PER_YEAR
from .utils import DAILY
DEPRECATION_WARNING = ("Risk functions in pyfolio.timeseries are deprecated "
"and will be removed in a future release. Please "
"install the empyrical package instead.")
def var_cov_var_normal(P, c, mu=0, sigma=1):
"""
Variance-covariance calculation of daily Value-at-Risk in a
portfolio.
Parameters
----------
P : float
Portfolio value.
c : float
Confidence level.
mu : float, optional
Mean.
Returns
-------
float
Variance-covariance.
"""
alpha = sp.stats.norm.ppf(1 - c, mu, sigma)
return P - P * (alpha + 1)
@deprecated(msg=DEPRECATION_WARNING)
def max_drawdown(returns):
"""
Determines the maximum drawdown of a strategy.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
Returns
-------
float
Maximum drawdown.
Note
-----
See https://en.wikipedia.org/wiki/Drawdown_(economics) for more details.
"""
return ep.max_drawdown(returns)
@deprecated(msg=DEPRECATION_WARNING)
def annual_return(returns, period=DAILY):
"""
Determines the mean annual growth rate of returns.
Parameters
----------
returns : pd.Series
Periodic returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Can be 'monthly', 'weekly', or 'daily'.
- Defaults to 'daily'.
Returns
-------
float
Annual Return as CAGR (Compounded Annual Growth Rate).
"""
return ep.annual_return(returns, period=period)
@deprecated(msg=DEPRECATION_WARNING)
def annual_volatility(returns, period=DAILY):
"""
Determines the annual volatility of a strategy.
Parameters
----------
returns : pd.Series
Periodic returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing volatility. Can be 'monthly' or 'weekly' or 'daily'.
- Defaults to 'daily'.
Returns
-------
float
Annual volatility.
"""
return ep.annual_volatility(returns, period=period)
@deprecated(msg=DEPRECATION_WARNING)
def calmar_ratio(returns, period=DAILY):
"""
Determines the Calmar ratio, or drawdown ratio, of a strategy.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Can be 'monthly', 'weekly', or 'daily'.
- Defaults to 'daily'.
Returns
-------
float
Calmar ratio (drawdown ratio) as float. Returns np.nan if there is no
calmar ratio.
Note
-----
See https://en.wikipedia.org/wiki/Calmar_ratio for more details.
"""
return ep.calmar_ratio(returns, period=period)
@deprecated(msg=DEPRECATION_WARNING)
def omega_ratio(returns, annual_return_threshhold=0.0):
"""
Determines the Omega ratio of a strategy.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
annual_return_threshold : float, optional
Minimum acceptable return of the investor. Annual threshold over which
returns are considered positive or negative. It is converted to a
value appropriate for the period of the returns for this ratio.
E.g. An annual minimum acceptable return of 100 translates to a daily
minimum acceptable return of 0.01848.
(1 + 100) ** (1. / 252) - 1 = 0.01848
Daily returns must exceed this value to be considered positive. The
daily return yields the desired annual return when compounded over
the average number of business days in a year.
(1 + 0.01848) ** 252 - 1 = 99.93
- Defaults to 0.0
Returns
-------
float
Omega ratio.
Note
-----
See https://en.wikipedia.org/wiki/Omega_ratio for more details.
"""
return ep.omega_ratio(returns,
required_return=annual_return_threshhold)
@deprecated(msg=DEPRECATION_WARNING)
def sortino_ratio(returns, required_return=0, period=DAILY):
"""
Determines the Sortino ratio of a strategy.
Parameters
----------
returns : pd.Series or pd.DataFrame
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
required_return: float / series
minimum acceptable return
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Can be 'monthly', 'weekly', or 'daily'.
- Defaults to 'daily'.
Returns
-------
depends on input type
series ==> float
DataFrame ==> np.array
Annualized Sortino ratio.
"""
return ep.sortino_ratio(returns, required_return=required_return)
@deprecated(msg=DEPRECATION_WARNING)
def downside_risk(returns, required_return=0, period=DAILY):
"""
Determines the downside deviation below a threshold
Parameters
----------
returns : pd.Series or pd.DataFrame
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
required_return: float / series
minimum acceptable return
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Can be 'monthly', 'weekly', or 'daily'.
- Defaults to 'daily'.
Returns
-------
depends on input type
series ==> float
DataFrame ==> np.array
Annualized downside deviation
"""
return ep.downside_risk(returns,
required_return=required_return,
period=period)
@deprecated(msg=DEPRECATION_WARNING)
def sharpe_ratio(returns, risk_free=0, period=DAILY):
"""
Determines the Sharpe ratio of a strategy.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
risk_free : int, float
Constant risk-free return throughout the period.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Can be 'monthly', 'weekly', or 'daily'.
- Defaults to 'daily'.
Returns
-------
float
Sharpe ratio.
np.nan
If insufficient length of returns or if if adjusted returns are 0.
Note
-----
See https://en.wikipedia.org/wiki/Sharpe_ratio for more details.
"""
return ep.sharpe_ratio(returns, risk_free=risk_free, period=period)
@deprecated(msg=DEPRECATION_WARNING)
def alpha_beta(returns, factor_returns):
"""
Calculates both alpha and beta.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
factor_returns : pd.Series
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- This is in the same style as returns.
Returns
-------
float
Alpha.
float
Beta.
"""
return ep.alpha_beta(returns, factor_returns=factor_returns)
@deprecated(msg=DEPRECATION_WARNING)
def alpha(returns, factor_returns):
"""
Calculates annualized alpha.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
factor_returns : pd.Series
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- This is in the same style as returns.
Returns
-------
float
Alpha.
"""
return ep.alpha(returns, factor_returns=factor_returns)
@deprecated(msg=DEPRECATION_WARNING)
def beta(returns, factor_returns):
"""
Calculates beta.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
factor_returns : pd.Series
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- This is in the same style as returns.
Returns
-------
float
Beta.
"""
return ep.beta(returns, factor_returns)
@deprecated(msg=DEPRECATION_WARNING)
def stability_of_timeseries(returns):
"""
Determines R-squared of a linear fit to the cumulative
log returns. Computes an ordinary least squares linear fit,
and returns R-squared.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
Returns
-------
float
R-squared.
"""
return ep.stability_of_timeseries(returns)
@deprecated(msg=DEPRECATION_WARNING)
def tail_ratio(returns):
"""
Determines the ratio between the right (95%) and left tail (5%).
For example, a ratio of 0.25 means that losses are four times
as bad as profits.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
Returns
-------
float
tail ratio
"""
return ep.tail_ratio(returns)
def common_sense_ratio(returns):
"""
Common sense ratio is the multiplication of the tail ratio and the
Gain-to-Pain-Ratio -- sum(profits) / sum(losses).
See http://bit.ly/1ORzGBk for more information on motivation of
this metric.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
Returns
-------
float
common sense ratio
"""
return ep.tail_ratio(returns) * \
(1 + ep.annual_return(returns))
def normalize(returns, starting_value=1):
"""
Normalizes a returns timeseries based on the first value.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
starting_value : float, optional
The starting returns (default 1).
Returns
-------
pd.Series
Normalized returns.
"""
return starting_value * (returns / returns.iloc[0])
@deprecated(msg=DEPRECATION_WARNING)
def cum_returns(returns, starting_value=0):
"""
Compute cumulative returns from simple returns.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
starting_value : float, optional
The starting returns (default 1).
Returns
-------
pandas.Series
Series of cumulative returns.
Notes
-----
For increased numerical accuracy, convert input to log returns
where it is possible to sum instead of multiplying.
"""
return ep.cum_returns(returns, starting_value=starting_value)
@deprecated(msg=DEPRECATION_WARNING)
def aggregate_returns(returns, convert_to):
"""
Aggregates returns by week, month, or year.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
convert_to : str
Can be 'weekly', 'monthly', or 'yearly'.
Returns
-------
pd.Series
Aggregated returns.
"""
return ep.aggregate_returns(returns, convert_to=convert_to)
def rolling_beta(returns, factor_returns,
rolling_window=APPROX_BDAYS_PER_MONTH * 6):
"""
Determines the rolling beta of a strategy.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series or pd.DataFrame
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- If DataFrame is passed, computes rolling beta for each column.
- This is in the same style as returns.
rolling_window : int, optional
The size of the rolling window, in days, over which to compute
beta (default 6 months).
Returns
-------
pd.Series
Rolling beta.
Note
-----
See https://en.wikipedia.org/wiki/Beta_(finance) for more details.
"""
if factor_returns.ndim > 1:
# Apply column-wise
return factor_returns.apply(partial(rolling_beta, returns),
rolling_window=rolling_window)
else:
out = pd.Series(index=returns.index)
for beg, end in zip(returns.index[0:-rolling_window],
returns.index[rolling_window:]):
out.loc[end] = ep.beta(
returns.loc[beg:end],
factor_returns.loc[beg:end])
return out
def rolling_regression(returns, factor_returns,
rolling_window=APPROX_BDAYS_PER_MONTH * 6,
nan_threshold=0.1):
"""
Computes rolling factor betas using a multivariate linear regression
(separate linear regressions is problematic because the factors may be
confounded).
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.DataFrame
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- Computes rolling beta for each column.
- This is in the same style as returns.
rolling_window : int, optional
The days window over which to compute the beta. Defaults to 6 months.
nan_threshold : float, optional
If there are more than this fraction of NaNs, the rolling regression
for the given date will be skipped.
Returns
-------
pandas.DataFrame
DataFrame containing rolling beta coefficients to SMB, HML and UMD
"""
# We need to drop NaNs to regress
ret_no_na = returns.dropna()
columns = ['alpha'] + factor_returns.columns.tolist()
rolling_risk = pd.DataFrame(columns=columns,
index=ret_no_na.index)
rolling_risk.index.name = 'dt'
for beg, end in zip(ret_no_na.index[:-rolling_window],
ret_no_na.index[rolling_window:]):
returns_period = ret_no_na[beg:end]
factor_returns_period = factor_returns.loc[returns_period.index]
if np.all(factor_returns_period.isnull().mean()) < nan_threshold:
factor_returns_period_dnan = factor_returns_period.dropna()
reg = linear_model.LinearRegression(fit_intercept=True).fit(
factor_returns_period_dnan,
returns_period.loc[factor_returns_period_dnan.index])
rolling_risk.loc[end, factor_returns.columns] = reg.coef_
rolling_risk.loc[end, 'alpha'] = reg.intercept_
return rolling_risk
def gross_lev(positions):
"""
Calculates the gross leverage of a strategy.
Parameters
----------
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
Returns
-------
pd.Series
Gross leverage.
"""
exposure = positions.drop('cash', axis=1).abs().sum(axis=1)
return exposure / positions.sum(axis=1)
def value_at_risk(returns, period=None, sigma=2.0):
"""
Get value at risk (VaR).
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
period : str, optional
Period over which to calculate VaR. Set to 'weekly',
'monthly', or 'yearly', otherwise defaults to period of
returns (typically daily).
sigma : float, optional
Standard deviations of VaR, default 2.
"""
if period is not None:
returns_agg = ep.aggregate_returns(returns, period)
else:
returns_agg = returns.copy()
value_at_risk = returns_agg.mean() - sigma * returns_agg.std()
return value_at_risk
SIMPLE_STAT_FUNCS = [
ep.annual_return,
ep.cum_returns_final,
ep.annual_volatility,
ep.sharpe_ratio,
ep.calmar_ratio,
ep.stability_of_timeseries,
ep.max_drawdown,
ep.omega_ratio,
ep.sortino_ratio,
stats.skew,
stats.kurtosis,
ep.tail_ratio,
value_at_risk
]
FACTOR_STAT_FUNCS = [
ep.alpha,
ep.beta,
]
STAT_FUNC_NAMES = {
'annual_return': 'Annual return',
'cum_returns_final': 'Cumulative returns',
'annual_volatility': 'Annual volatility',
'sharpe_ratio': 'Sharpe ratio',
'calmar_ratio': 'Calmar ratio',
'stability_of_timeseries': 'Stability',
'max_drawdown': 'Max drawdown',
'omega_ratio': 'Omega ratio',
'sortino_ratio': 'Sortino ratio',
'skew': 'Skew',
'kurtosis': 'Kurtosis',
'tail_ratio': 'Tail ratio',
'common_sense_ratio': 'Common sense ratio',
'value_at_risk': 'Daily value at risk',
'alpha': 'Alpha',
'beta': 'Beta',
}
def perf_stats(returns, factor_returns=None, positions=None,
transactions=None, turnover_denom='AGB'):
"""
Calculates various performance metrics of a strategy, for use in
plotting.show_perf_stats.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series, optional
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- This is in the same style as returns.
- If None, do not compute alpha, beta, and information ratio.
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet.
turnover_denom : str
Either AGB or portfolio_value, default AGB.
- See full explanation in txn.get_turnover.
Returns
-------
pd.Series
Performance metrics.
"""
stats = pd.Series()
for stat_func in SIMPLE_STAT_FUNCS:
stats[STAT_FUNC_NAMES[stat_func.__name__]] = stat_func(returns)
if positions is not None:
stats['Gross leverage'] = gross_lev(positions).mean()
if transactions is not None:
stats['Daily turnover'] = get_turnover(positions,
transactions,
turnover_denom).mean()
if factor_returns is not None:
for stat_func in FACTOR_STAT_FUNCS:
res = stat_func(returns, factor_returns)
stats[STAT_FUNC_NAMES[stat_func.__name__]] = res
return stats
def perf_stats_bootstrap(returns, factor_returns=None, return_stats=True,
**kwargs):
"""Calculates various bootstrapped performance metrics of a strategy.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series, optional
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- This is in the same style as returns.
- If None, do not compute alpha, beta, and information ratio.
return_stats : boolean (optional)
If True, returns a DataFrame of mean, median, 5 and 95 percentiles
for each perf metric.
If False, returns a DataFrame with the bootstrap samples for
each perf metric.
Returns
-------
pd.DataFrame
if return_stats is True:
- Distributional statistics of bootstrapped sampling
distribution of performance metrics.
if return_stats is False:
- Bootstrap samples for each performance metric.
"""
bootstrap_values = OrderedDict()
for stat_func in SIMPLE_STAT_FUNCS:
stat_name = STAT_FUNC_NAMES[stat_func.__name__]
bootstrap_values[stat_name] = calc_bootstrap(stat_func,
returns)
if factor_returns is not None:
for stat_func in FACTOR_STAT_FUNCS:
stat_name = STAT_FUNC_NAMES[stat_func.__name__]
bootstrap_values[stat_name] = calc_bootstrap(
stat_func,
returns,
factor_returns=factor_returns)
bootstrap_values = pd.DataFrame(bootstrap_values)
if return_stats:
stats = bootstrap_values.apply(calc_distribution_stats)
return stats.T[['mean', 'median', '5%', '95%']]
else:
return bootstrap_values
def calc_bootstrap(func, returns, *args, **kwargs):
"""Performs a bootstrap analysis on a user-defined function returning
a summary statistic.
Parameters
----------
func : function
Function that either takes a single array (commonly returns)
or two arrays (commonly returns and factor returns) and
returns a single value (commonly a summary
statistic). Additional args and kwargs are passed as well.
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series, optional
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- This is in the same style as returns.
n_samples : int, optional
Number of bootstrap samples to draw. Default is 1000.
Increasing this will lead to more stable / accurate estimates.
Returns
-------
numpy.ndarray
Bootstrapped sampling distribution of passed in func.
"""
n_samples = kwargs.pop('n_samples', 1000)
out = np.empty(n_samples)
factor_returns = kwargs.pop('factor_returns', None)
for i in range(n_samples):
idx = np.random.randint(len(returns), size=len(returns))
returns_i = returns.iloc[idx].reset_index(drop=True)
if factor_returns is not None:
factor_returns_i = factor_returns.iloc[idx].reset_index(drop=True)
out[i] = func(returns_i, factor_returns_i,
*args, **kwargs)
else:
out[i] = func(returns_i,
*args, **kwargs)
return out
def calc_distribution_stats(x):
"""Calculate various summary statistics of data.
Parameters
----------
x : numpy.ndarray or pandas.Series
Array to compute summary statistics for.
Returns
-------
pandas.Series
Series containing mean, median, std, as well as 5, 25, 75 and
95 percentiles of passed in values.
"""
return pd.Series({'mean': np.mean(x),
'median': np.median(x),
'std': np.std(x),
'5%': np.percentile(x, 5),
'25%': np.percentile(x, 25),
'75%': np.percentile(x, 75),
'95%': np.percentile(x, 95),
'IQR': np.subtract.reduce(
np.percentile(x, [75, 25])),
})
def get_max_drawdown_underwater(underwater):
"""
Determines peak, valley, and recovery dates given an 'underwater'
DataFrame.
An underwater DataFrame is a DataFrame that has precomputed
rolling drawdown.
Parameters
----------
underwater : pd.Series
Underwater returns (rolling drawdown) of a strategy.
Returns
-------
peak : datetime
The maximum drawdown's peak.
valley : datetime
The maximum drawdown's valley.
recovery : datetime
The maximum drawdown's recovery.
"""
valley = underwater.idxmin() # end of the period
# Find first 0
peak = underwater[:valley][underwater[:valley] == 0].index[-1]
# Find last 0
try:
recovery = underwater[valley:][underwater[valley:] == 0].index[0]
except IndexError:
recovery = np.nan # drawdown not recovered
return peak, valley, recovery
def get_max_drawdown(returns):
"""
Determines the maximum drawdown of a strategy.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
Returns
-------
float
Maximum drawdown.
Note
-----
See https://en.wikipedia.org/wiki/Drawdown_(economics) for more details.
"""
returns = returns.copy()
df_cum = cum_returns(returns, 1.0)
running_max = np.maximum.accumulate(df_cum)
underwater = df_cum / running_max - 1
return get_max_drawdown_underwater(underwater)
def get_top_drawdowns(returns, top=10):
"""
Finds top drawdowns, sorted by drawdown amount.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
top : int, optional
The amount of top drawdowns to find (default 10).
Returns
-------
drawdowns : list
List of drawdown peaks, valleys, and recoveries. See get_max_drawdown.
"""
returns = returns.copy()
df_cum = ep.cum_returns(returns, 1.0)
running_max = np.maximum.accumulate(df_cum)
underwater = df_cum / running_max - 1
drawdowns = []
for _ in range(top):
peak, valley, recovery = get_max_drawdown_underwater(underwater)
# Slice out draw-down period
if not pd.isnull(recovery):
underwater.drop(underwater[peak: recovery].index[1:-1],
inplace=True)
else:
# drawdown has not ended yet
underwater = underwater.loc[:peak]
drawdowns.append((peak, valley, recovery))
if ((len(returns) == 0)
or (len(underwater) == 0)
or (np.min(underwater) == 0)):
break
return drawdowns
def gen_drawdown_table(returns, top=10):
"""
Places top drawdowns in a table.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
top : int, optional
The amount of top drawdowns to find (default 10).
Returns
-------
df_drawdowns : pd.DataFrame
Information about top drawdowns.
"""
df_cum = ep.cum_returns(returns, 1.0)
drawdown_periods = get_top_drawdowns(returns, top=top)
df_drawdowns = pd.DataFrame(index=list(range(top)),
columns=['Net drawdown in %',
'Peak date',
'Valley date',
'Recovery date',
'Duration'])
for i, (peak, valley, recovery) in enumerate(drawdown_periods):
if pd.isnull(recovery):
df_drawdowns.loc[i, 'Duration'] = np.nan
else:
df_drawdowns.loc[i, 'Duration'] = len(pd.date_range(peak,
recovery,
freq='B'))
df_drawdowns.loc[i, 'Peak date'] = (peak.to_pydatetime()
.strftime('%Y-%m-%d'))
df_drawdowns.loc[i, 'Valley date'] = (valley.to_pydatetime()
.strftime('%Y-%m-%d'))
if isinstance(recovery, float):
df_drawdowns.loc[i, 'Recovery date'] = recovery
else:
df_drawdowns.loc[i, 'Recovery date'] = (recovery.to_pydatetime()
.strftime('%Y-%m-%d'))
df_drawdowns.loc[i, 'Net drawdown in %'] = (
(df_cum.loc[peak] - df_cum.loc[valley]) / df_cum.loc[peak]) * 100
df_drawdowns['Peak date'] = pd.to_datetime(df_drawdowns['Peak date'])
df_drawdowns['Valley date'] = pd.to_datetime(df_drawdowns['Valley date'])
df_drawdowns['Recovery date'] = pd.to_datetime(
df_drawdowns['Recovery date'])
return df_drawdowns
def rolling_volatility(returns, rolling_vol_window):
"""
Determines the rolling volatility of a strategy.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
rolling_vol_window : int
Length of rolling window, in days, over which to compute.
Returns
-------
pd.Series
Rolling volatility.
"""
return returns.rolling(rolling_vol_window).std() \
* np.sqrt(APPROX_BDAYS_PER_YEAR)
def rolling_sharpe(returns, rolling_sharpe_window):
"""
Determines the rolling Sharpe ratio of a strategy.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
rolling_sharpe_window : int
Length of rolling window, in days, over which to compute.
Returns
-------
pd.Series
Rolling Sharpe ratio.
Note
-----
See https://en.wikipedia.org/wiki/Sharpe_ratio for more details.
"""
return returns.rolling(rolling_sharpe_window).mean() \
/ returns.rolling(rolling_sharpe_window).std() \
* np.sqrt(APPROX_BDAYS_PER_YEAR)
def simulate_paths(is_returns, num_days,
starting_value=1, num_samples=1000, random_seed=None):
"""
Gnerate alternate paths using available values from in-sample returns.
Parameters
----------
is_returns : pandas.core.frame.DataFrame
Non-cumulative in-sample returns.
num_days : int
Number of days to project the probability cone forward.
starting_value : int or float
Starting value of the out of sample period.
num_samples : int
Number of samples to draw from the in-sample daily returns.
Each sample will be an array with length num_days.
A higher number of samples will generate a more accurate
bootstrap cone.
random_seed : int
Seed for the pseudorandom number generator used by the pandas
sample method.
Returns
-------
samples : numpy.ndarray
"""
samples = np.empty((num_samples, num_days))
seed = np.random.RandomState(seed=random_seed)
for i in range(num_samples):
samples[i, :] = is_returns.sample(num_days, replace=True,
random_state=seed)
return samples
def summarize_paths(samples, cone_std=(1., 1.5, 2.), starting_value=1.):
"""
Gnerate the upper and lower bounds of an n standard deviation
cone of forecasted cumulative returns.
Parameters
----------
samples : numpy.ndarray
Alternative paths, or series of possible outcomes.
cone_std : list of int/float
Number of standard devations to use in the boundaries of
the cone. If multiple values are passed, cone bounds will
be generated for each value.
Returns
-------
samples : pandas.core.frame.DataFrame
"""
cum_samples = ep.cum_returns(samples.T,
starting_value=starting_value).T
cum_mean = cum_samples.mean(axis=0)
cum_std = cum_samples.std(axis=0)
if isinstance(cone_std, (float, int)):
cone_std = [cone_std]
cone_bounds = pd.DataFrame(columns=pd.Float64Index([]))
for num_std in cone_std:
cone_bounds.loc[:, float(num_std)] = cum_mean + cum_std * num_std
cone_bounds.loc[:, float(-num_std)] = cum_mean - cum_std * num_std
return cone_bounds
def forecast_cone_bootstrap(is_returns, num_days, cone_std=(1., 1.5, 2.),
starting_value=1, num_samples=1000,
random_seed=None):
"""
Determines the upper and lower bounds of an n standard deviation
cone of forecasted cumulative returns. Future cumulative mean and
standard devation are computed by repeatedly sampling from the
in-sample daily returns (i.e. bootstrap). This cone is non-parametric,
meaning it does not assume that returns are normally distributed.
Parameters
----------
is_returns : pd.Series
In-sample daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
num_days : int
Number of days to project the probability cone forward.
cone_std : int, float, or list of int/float
Number of standard devations to use in the boundaries of
the cone. If multiple values are passed, cone bounds will
be generated for each value.
starting_value : int or float
Starting value of the out of sample period.
num_samples : int
Number of samples to draw from the in-sample daily returns.
Each sample will be an array with length num_days.
A higher number of samples will generate a more accurate
bootstrap cone.
random_seed : int
Seed for the pseudorandom number generator used by the pandas
sample method.
Returns
-------
pd.DataFrame
Contains upper and lower cone boundaries. Column names are
strings corresponding to the number of standard devations
above (positive) or below (negative) the projected mean
cumulative returns.
"""
samples = simulate_paths(
is_returns=is_returns,
num_days=num_days,
starting_value=starting_value,
num_samples=num_samples,
random_seed=random_seed
)
cone_bounds = summarize_paths(
samples=samples,
cone_std=cone_std,
starting_value=starting_value
)
return cone_bounds
def extract_interesting_date_ranges(returns, periods=None):
"""
Extracts returns based on interesting events. See
gen_date_range_interesting.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
Returns
-------
ranges : OrderedDict
Date ranges, with returns, of all valid events.
"""
if periods is None:
periods = PERIODS
returns_dupe = returns.copy()
returns_dupe.index = returns_dupe.index.map(pd.Timestamp)
ranges = OrderedDict()
for name, (start, end) in periods.items():
try:
period = returns_dupe.loc[start:end]
if len(period) == 0:
continue
ranges[name] = period
except BaseException:
continue
return ranges
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# File: alsamixer_webui.py
# Date: 24. 1. 2016
# Author: Jiri Skorpil <jiri.sko@gmail.com>
# Desc.: ALSA Mixer WebUI - main application
#
import sys
import re
import os
import errno
from subprocess import call, Popen, PIPE
import socket
import json
from flask import Flask, Response
import argparse
try:
# Python 2.x
import ConfigParser
except ImportError:
# Python 3.x
import configparser as ConfigParser
CONFIG_FILE = '/etc/amixer-webui.conf'
DEFAULT_HOST = '0.0.0.0'
DEFAULT_PORT = '8080'
class Handler(Flask):
card = None
equal = False
PULSE_AUDIO_DEVICE_NUMBER = 99999
def __init__(self, *args, **kwargs):
Flask.__init__(self, *args, **kwargs)
def __get_amixer_command__(self):
command = ["amixer"]
if self.card == self.PULSE_AUDIO_DEVICE_NUMBER:
command += ["-D", "pulse"]
elif self.card is not None:
command += ["-c", "%d" % self.card]
if self.equal is True:
command += ["-D", "equal"]
return command
@staticmethod
def __get_channel_name__(desc, name, i):
for control in desc:
lines = control.split("\n")
control_name = re.sub("',[0-9]+", "", lines[0][1:])
if control_name not in name:
continue
for line in lines[1:]:
if name.split(" ")[-2] in line:
names = line.split(": ")[1].split(" - ")
return names[i]
return None
def __get_cards__(self):
system_cards = []
try:
with open("/proc/asound/cards", 'rt') as f:
for l in f.readlines():
if ']:' in l:
system_cards.append(l.strip())
except IOError as e:
if e.errno != errno.ENOENT:
raise e
cards = {}
for i in system_cards:
card_number = i.split(" [")[0].strip()
card_detail = Popen(["amixer", "-c", card_number, "info"], stdout=PIPE).communicate()[0]
cards[card_number] = self.__decode_string(card_detail).split("\n")[1].split(":")[1].replace("'", "").strip()
pulse = Popen(["amixer", "-D", "pulse", "info"], stdout=PIPE)
pulse.communicate()
if pulse.wait() == 0:
cards[self.PULSE_AUDIO_DEVICE_NUMBER] = "PulseAudio"
return cards
def __get_controls__(self):
try:
amixer = Popen(self.__get_amixer_command__(), stdout=PIPE)
amixer_channels = Popen(["grep", "-e", "control", "-e", "channels"], stdin=amixer.stdout, stdout=PIPE)
amixer_chandesc = self.__decode_string(amixer_channels.communicate()[0]).split("Simple mixer control ")[1:]
amixer_contents = self.__decode_string(
Popen(self.__get_amixer_command__() + ["contents"], stdout=PIPE).communicate()[0])
except OSError:
return []
interfaces = []
for i in amixer_contents.split("numid=")[1:]:
lines = i.split("\n")
interface = {
"id": int(lines[0].split(",")[0]),
"iface": lines[0].split(",")[1].replace("iface=", ""),
"name": lines[0].split(",")[2].replace("name=", "").replace("'", ""),
"type": lines[1].split(",")[0].replace(" ; type=", ""),
"access": lines[1].split(",")[1].replace("access=", ""),
}
if interface["type"] == "ENUMERATED":
items = {}
for line in lines[2:-2]:
pcs = line.split(" '")
id = pcs[0].replace(" ; Item #", "")
name = pcs[1][:-1]
items[id] = name
interface["items"] = items
interface["values"] = []
for value in lines[-2].replace(" : values=", "").split(","):
interface["values"].append(int(value))
elif interface["type"] == "BOOLEAN":
interface["values"] = []
for value in lines[-2].replace(" : values=", "").split(","):
interface["values"].append(True if value == "on" else False)
elif interface["type"] == "INTEGER":
interface["min"] = int(lines[1].split(",")[3].replace("min=", ""))
interface["max"] = int(lines[1].split(",")[4].replace("max=", ""))
interface["step"] = int(lines[1].split(",")[5].replace("step=", ""))
line = ""
for j in reversed(lines):
if " : values=" in j:
line = j
break
interface["values"] = []
interface["channels"] = []
i = 0
for value in line.replace(" : values=", "").split(","):
interface["values"].append(value)
channel_desc = self.__get_channel_name__(amixer_chandesc, interface["name"], i)
if channel_desc is not None:
interface["channels"].append(channel_desc)
i += 1
if len(interface["channels"]) != len(interface["values"]):
interface.pop("channels", None)
interfaces.append(interface)
return interfaces
def __get_equalizer__(self):
self.equal = True
data = self.__get_controls__()
self.equal = False
return data
def __change_volume__(self, num_id, volumes_path):
volumes = []
for volume in volumes_path:
if volume != "" and is_digit(volume):
volumes.append(volume)
command = self.__get_amixer_command__() + ["cset", "numid=%s" % num_id, "--", ",".join(volumes)]
call(command)
@staticmethod
def __decode_string(string):
return string.decode("utf-8")
def is_digit(n):
try:
int(n)
return True
except ValueError:
return False
app = Handler(__name__, static_folder='htdocs', static_url_path='')
@app.route('/')
def index():
"""Sends HTML file (GET /)"""
return app.send_static_file('index.html')
@app.route('/hostname/')
def get_hostname():
"""Sends server's hostname [plain text:String]"""
return socket.gethostname()
@app.route('/cards/')
def get_cards():
"""Sends list of sound cards [JSON object - <number:Number>:<name:String>]"""
data = json.dumps(app.__get_cards__())
resp = Response(response=data, status=200, mimetype="application/json")
return resp
@app.route('/card/')
def get_card():
"""Sends number of selected sound card [JSON - <Number|null>]"""
data = json.dumps(app.card)
resp = Response(response=data, status=200, mimetype="application/json")
return resp
@app.route('/controls/')
def get_controls():
"""Sends list of controls of selected sound card [JSON - list of objects: {
--- common keys ---
access: <String>
id: <Number>
iface: <String>
name: <String>
type: <ENUMERATED|BOOLEAN|INTEGER:String>
--- for type ENUMERATED ---
items: <Object {<number:Number>:<name:String>}>
values: [<Number> - selected item]
--- for type BOOLEAN ---
values: [true|false]
--- for type INTEGER ---
channels: <Array of String> - channel names
min: <Number>
max: <Number>
step: <Number>
values: <Array of Number> - channel values (order corresponds with order in `channels` key)
}]"""
data = json.dumps(app.__get_controls__())
resp = Response(response=data, status=200, mimetype="application/json")
return resp
@app.route('/equalizer/')
def get_equalizer():
"""Sends list of equalizer controls [same as /controls/ but contains only controls of INTEGER type]"""
data = json.dumps(app.__get_equalizer__())
resp = Response(response=data, status=200, mimetype="application/json")
return resp
@app.route('/control/<int:control_id>/<int:status>/', methods=['PUT'])
def put_control(control_id, status):
"""Turns BOOLEAN control on or off"""
if control_id <= 0:
return ''
if status != 0 and status != 1:
return ''
call(app.__get_amixer_command__() + ["cset", "numid=%s" % control_id, "--", 'on' if status == 1 else 'off'])
if os.geteuid() == 0:
call(["alsactl", "store"])
return ''
@app.route('/source/<int:control_id>/<int:item>/', methods=['PUT'])
def put_source(control_id, item):
"""Changes active ENUMERATED item"""
if control_id <= 0:
return ''
call(app.__get_amixer_command__() + ["cset", "numid=%s" % control_id, "--", str(item)])
if os.geteuid() == 0:
call(["alsactl", "store"])
return ''
@app.route('/volume/<int:control_id>/<path:volume_path>', methods=['PUT'])
def put_volume(control_id, volume_path):
"""Changes INTEGER channel volumes"""
app.__change_volume__(control_id, volume_path.split('/'))
if os.geteuid() == 0:
call(["alsactl", "store"])
return ''
@app.route('/equalizer/<int:control_id>/<path:level_path>', methods=['PUT'])
def put_equalizer(control_id, level_path):
"""Changes equalizer channel values"""
app.equal = True
card = app.card
app.card = None
app.__change_volume__(control_id, level_path.split('/'))
app.equal = False
app.card = card
if os.geteuid() == 0:
call(["alsactl", "store"])
return ''
@app.route('/card/<int:card_id>/', methods=['PUT'])
def put_card(card_id):
"""Changes selected sound card"""
app.card = card_id
return ''
@app.after_request
def set_server_header(response):
response.headers["Server"] = "ALSA Mixer webserver"
return response
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-l", "--host", type=str)
parser.add_argument("-p", "--port", type=int)
parser.add_argument("-d", "--debug", action="store_true")
args = parser.parse_args()
if os.path.isfile(CONFIG_FILE):
config = ConfigParser.RawConfigParser()
config.read(CONFIG_FILE)
if args.host is None:
args.host = config.get('amixer-webui', 'host')
if args.port is None:
port = config.get('amixer-webui', 'port')
if is_digit(port):
args.port = int(port)
if args.host == "":
args.host = DEFAULT_HOST
if args.port is None:
args.port = DEFAULT_PORT
try:
app.run(**vars(args))
except socket.error as e:
if e.errno == errno.EPIPE:
main()
else:
raise e
if __name__ == "__main__":
main()
sys.exit(0)
# end of alsamixer_webui.py
| |
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testable usage examples for Google BigQuery API wrapper
Each example function takes a ``client`` argument (which must be an instance
of :class:`google.cloud.bigquery.client.Client`) and uses it to perform a task
with the API.
To facilitate running the examples as system tests, each example is also passed
a ``to_delete`` list; the function adds to the list any objects created which
need to be deleted during teardown.
"""
import os
import time
import pytest
import six
try:
import fastparquet
except (ImportError, AttributeError):
fastparquet = None
try:
import pandas
except (ImportError, AttributeError):
pandas = None
try:
import pyarrow
except (ImportError, AttributeError):
pyarrow = None
from google.api_core.exceptions import InternalServerError
from google.api_core.exceptions import ServiceUnavailable
from google.api_core.exceptions import TooManyRequests
from google.cloud import bigquery
from google.cloud import storage
from test_utils.retry import RetryErrors
ORIGINAL_FRIENDLY_NAME = "Original friendly name"
ORIGINAL_DESCRIPTION = "Original description"
LOCALLY_CHANGED_FRIENDLY_NAME = "Locally-changed friendly name"
LOCALLY_CHANGED_DESCRIPTION = "Locally-changed description"
UPDATED_FRIENDLY_NAME = "Updated friendly name"
UPDATED_DESCRIPTION = "Updated description"
SCHEMA = [
bigquery.SchemaField("full_name", "STRING", mode="REQUIRED"),
bigquery.SchemaField("age", "INTEGER", mode="REQUIRED"),
]
ROWS = [
("Phred Phlyntstone", 32),
("Bharney Rhubble", 33),
("Wylma Phlyntstone", 29),
("Bhettye Rhubble", 27),
]
QUERY = (
"SELECT name FROM `bigquery-public-data.usa_names.usa_1910_2013` "
'WHERE state = "TX"'
)
retry_429 = RetryErrors(TooManyRequests)
retry_storage_errors = RetryErrors(
(TooManyRequests, InternalServerError, ServiceUnavailable)
)
@pytest.fixture(scope="module")
def client():
return bigquery.Client()
@pytest.fixture
def to_delete(client):
doomed = []
yield doomed
for item in doomed:
if isinstance(item, (bigquery.Dataset, bigquery.DatasetReference)):
retry_429(client.delete_dataset)(item, delete_contents=True)
elif isinstance(item, storage.Bucket):
retry_storage_errors(item.delete)()
else:
retry_429(item.delete)()
def _millis():
return int(time.time() * 1000)
class _CloseOnDelete(object):
def __init__(self, wrapped):
self._wrapped = wrapped
def delete(self):
self._wrapped.close()
def test_create_client_default_credentials():
"""Create a BigQuery client with Application Default Credentials"""
# [START bigquery_client_default_credentials]
from google.cloud import bigquery
# If you don't specify credentials when constructing the client, the
# client library will look for credentials in the environment.
client = bigquery.Client()
# [END bigquery_client_default_credentials]
assert client is not None
def test_create_table_nested_repeated_schema(client, to_delete):
dataset_id = "create_table_nested_repeated_{}".format(_millis())
dataset_ref = client.dataset(dataset_id)
dataset = bigquery.Dataset(dataset_ref)
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_nested_repeated_schema]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_ref = client.dataset('my_dataset')
schema = [
bigquery.SchemaField("id", "STRING", mode="NULLABLE"),
bigquery.SchemaField("first_name", "STRING", mode="NULLABLE"),
bigquery.SchemaField("last_name", "STRING", mode="NULLABLE"),
bigquery.SchemaField("dob", "DATE", mode="NULLABLE"),
bigquery.SchemaField(
"addresses",
"RECORD",
mode="REPEATED",
fields=[
bigquery.SchemaField("status", "STRING", mode="NULLABLE"),
bigquery.SchemaField("address", "STRING", mode="NULLABLE"),
bigquery.SchemaField("city", "STRING", mode="NULLABLE"),
bigquery.SchemaField("state", "STRING", mode="NULLABLE"),
bigquery.SchemaField("zip", "STRING", mode="NULLABLE"),
bigquery.SchemaField("numberOfYears", "STRING", mode="NULLABLE"),
],
),
]
table_ref = dataset_ref.table("my_table")
table = bigquery.Table(table_ref, schema=schema)
table = client.create_table(table) # API request
print("Created table {}".format(table.full_table_id))
# [END bigquery_nested_repeated_schema]
def test_create_table_cmek(client, to_delete):
dataset_id = "create_table_cmek_{}".format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_create_table_cmek]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
table_ref = client.dataset(dataset_id).table("my_table")
table = bigquery.Table(table_ref)
# Set the encryption key to use for the table.
# TODO: Replace this key with a key you have created in Cloud KMS.
kms_key_name = "projects/{}/locations/{}/keyRings/{}/cryptoKeys/{}".format(
"cloud-samples-tests", "us", "test", "test"
)
table.encryption_configuration = bigquery.EncryptionConfiguration(
kms_key_name=kms_key_name
)
table = client.create_table(table) # API request
assert table.encryption_configuration.kms_key_name == kms_key_name
# [END bigquery_create_table_cmek]
def test_create_partitioned_table(client, to_delete):
dataset_id = "create_table_partitioned_{}".format(_millis())
dataset_ref = bigquery.Dataset(client.dataset(dataset_id))
dataset = client.create_dataset(dataset_ref)
to_delete.append(dataset)
# [START bigquery_create_table_partitioned]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_ref = client.dataset('my_dataset')
table_ref = dataset_ref.table("my_partitioned_table")
schema = [
bigquery.SchemaField("name", "STRING"),
bigquery.SchemaField("post_abbr", "STRING"),
bigquery.SchemaField("date", "DATE"),
]
table = bigquery.Table(table_ref, schema=schema)
table.time_partitioning = bigquery.TimePartitioning(
type_=bigquery.TimePartitioningType.DAY,
field="date", # name of column to use for partitioning
expiration_ms=7776000000,
) # 90 days
table = client.create_table(table)
print(
"Created table {}, partitioned on column {}".format(
table.table_id, table.time_partitioning.field
)
)
# [END bigquery_create_table_partitioned]
assert table.time_partitioning.type_ == "DAY"
assert table.time_partitioning.field == "date"
assert table.time_partitioning.expiration_ms == 7776000000
@pytest.mark.skip(
reason=(
"update_table() is flaky "
"https://github.com/GoogleCloudPlatform/google-cloud-python/issues/5589"
)
)
def test_manage_table_labels(client, to_delete):
dataset_id = "label_table_dataset_{}".format(_millis())
table_id = "label_table_{}".format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
table = bigquery.Table(dataset.table(table_id), schema=SCHEMA)
table = client.create_table(table)
# [START bigquery_label_table]
# from google.cloud import bigquery
# client = bigquery.Client()
# table_ref = client.dataset('my_dataset').table('my_table')
# table = client.get_table(table_ref) # API request
assert table.labels == {}
labels = {"color": "green"}
table.labels = labels
table = client.update_table(table, ["labels"]) # API request
assert table.labels == labels
# [END bigquery_label_table]
# [START bigquery_get_table_labels]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
# table_id = 'my_table'
dataset_ref = client.dataset(dataset_id)
table_ref = dataset_ref.table(table_id)
table = client.get_table(table_ref) # API Request
# View table labels
print("Table ID: {}".format(table_id))
print("Labels:")
if table.labels:
for label, value in table.labels.items():
print("\t{}: {}".format(label, value))
else:
print("\tTable has no labels defined.")
# [END bigquery_get_table_labels]
assert table.labels == labels
# [START bigquery_delete_label_table]
# from google.cloud import bigquery
# client = bigquery.Client()
# table_ref = client.dataset('my_dataset').table('my_table')
# table = client.get_table(table_ref) # API request
# This example table starts with one label
assert table.labels == {"color": "green"}
# To delete a label from a table, set its value to None
table.labels["color"] = None
table = client.update_table(table, ["labels"]) # API request
assert table.labels == {}
# [END bigquery_delete_label_table]
@pytest.mark.skip(
reason=(
"update_table() is flaky "
"https://github.com/GoogleCloudPlatform/google-cloud-python/issues/5589"
)
)
def test_update_table_description(client, to_delete):
"""Update a table's description."""
dataset_id = "update_table_description_dataset_{}".format(_millis())
table_id = "update_table_description_table_{}".format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
table = bigquery.Table(dataset.table(table_id), schema=SCHEMA)
table.description = "Original description."
table = client.create_table(table)
# [START bigquery_update_table_description]
# from google.cloud import bigquery
# client = bigquery.Client()
# table_ref = client.dataset('my_dataset').table('my_table')
# table = client.get_table(table_ref) # API request
assert table.description == "Original description."
table.description = "Updated description."
table = client.update_table(table, ["description"]) # API request
assert table.description == "Updated description."
# [END bigquery_update_table_description]
@pytest.mark.skip(
reason=(
"update_table() is flaky "
"https://github.com/GoogleCloudPlatform/google-cloud-python/issues/5589"
)
)
def test_update_table_expiration(client, to_delete):
"""Update a table's expiration time."""
dataset_id = "update_table_expiration_dataset_{}".format(_millis())
table_id = "update_table_expiration_table_{}".format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
table = bigquery.Table(dataset.table(table_id), schema=SCHEMA)
table = client.create_table(table)
# [START bigquery_update_table_expiration]
import datetime
import pytz
# from google.cloud import bigquery
# client = bigquery.Client()
# table_ref = client.dataset('my_dataset').table('my_table')
# table = client.get_table(table_ref) # API request
assert table.expires is None
# set table to expire 5 days from now
expiration = datetime.datetime.now(pytz.utc) + datetime.timedelta(days=5)
table.expires = expiration
table = client.update_table(table, ["expires"]) # API request
# expiration is stored in milliseconds
margin = datetime.timedelta(microseconds=1000)
assert expiration - margin <= table.expires <= expiration + margin
# [END bigquery_update_table_expiration]
@pytest.mark.skip(
reason=(
"update_table() is flaky "
"https://github.com/GoogleCloudPlatform/google-cloud-python/issues/5589"
)
)
def test_relax_column(client, to_delete):
"""Updates a schema field from required to nullable."""
dataset_id = "relax_column_dataset_{}".format(_millis())
table_id = "relax_column_table_{}".format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
dataset = client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_relax_column]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
# table_id = 'my_table'
original_schema = [
bigquery.SchemaField("full_name", "STRING", mode="REQUIRED"),
bigquery.SchemaField("age", "INTEGER", mode="REQUIRED"),
]
table_ref = client.dataset(dataset_id).table(table_id)
table = bigquery.Table(table_ref, schema=original_schema)
table = client.create_table(table)
assert all(field.mode == "REQUIRED" for field in table.schema)
# SchemaField properties cannot be edited after initialization.
# To make changes, construct new SchemaField objects.
relaxed_schema = [
bigquery.SchemaField("full_name", "STRING", mode="NULLABLE"),
bigquery.SchemaField("age", "INTEGER", mode="NULLABLE"),
]
table.schema = relaxed_schema
table = client.update_table(table, ["schema"])
assert all(field.mode == "NULLABLE" for field in table.schema)
# [END bigquery_relax_column]
@pytest.mark.skip(
reason=(
"update_table() is flaky "
"https://github.com/GoogleCloudPlatform/google-cloud-python/issues/5589"
)
)
def test_update_table_cmek(client, to_delete):
"""Patch a table's metadata."""
dataset_id = "update_table_cmek_{}".format(_millis())
table_id = "update_table_cmek_{}".format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
table = bigquery.Table(dataset.table(table_id))
original_kms_key_name = "projects/{}/locations/{}/keyRings/{}/cryptoKeys/{}".format(
"cloud-samples-tests", "us", "test", "test"
)
table.encryption_configuration = bigquery.EncryptionConfiguration(
kms_key_name=original_kms_key_name
)
table = client.create_table(table)
# [START bigquery_update_table_cmek]
# from google.cloud import bigquery
# client = bigquery.Client()
assert table.encryption_configuration.kms_key_name == original_kms_key_name
# Set a new encryption key to use for the destination.
# TODO: Replace this key with a key you have created in KMS.
updated_kms_key_name = (
"projects/cloud-samples-tests/locations/us/keyRings/test/cryptoKeys/otherkey"
)
table.encryption_configuration = bigquery.EncryptionConfiguration(
kms_key_name=updated_kms_key_name
)
table = client.update_table(table, ["encryption_configuration"]) # API request
assert table.encryption_configuration.kms_key_name == updated_kms_key_name
assert original_kms_key_name != updated_kms_key_name
# [END bigquery_update_table_cmek]
@pytest.mark.skip(
reason=(
"update_table() is flaky "
"https://github.com/GoogleCloudPlatform/google-cloud-python/issues/5589"
)
)
def test_manage_views(client, to_delete):
project = client.project
source_dataset_id = "source_dataset_{}".format(_millis())
source_dataset_ref = client.dataset(source_dataset_id)
source_dataset = bigquery.Dataset(source_dataset_ref)
source_dataset = client.create_dataset(source_dataset)
to_delete.append(source_dataset)
job_config = bigquery.LoadJobConfig()
job_config.schema = [
bigquery.SchemaField("name", "STRING"),
bigquery.SchemaField("post_abbr", "STRING"),
]
job_config.skip_leading_rows = 1
uri = "gs://cloud-samples-data/bigquery/us-states/us-states.csv"
source_table_id = "us_states"
load_job = client.load_table_from_uri(
uri, source_dataset.table(source_table_id), job_config=job_config
)
load_job.result()
shared_dataset_id = "shared_dataset_{}".format(_millis())
shared_dataset_ref = client.dataset(shared_dataset_id)
shared_dataset = bigquery.Dataset(shared_dataset_ref)
shared_dataset = client.create_dataset(shared_dataset)
to_delete.append(shared_dataset)
# [START bigquery_create_view]
# from google.cloud import bigquery
# client = bigquery.Client()
# project = 'my-project'
# source_dataset_id = 'my_source_dataset'
# source_table_id = 'us_states'
# shared_dataset_ref = client.dataset('my_shared_dataset')
# This example shows how to create a shared view of a source table of
# US States. The source table contains all 50 states, while the view will
# contain only states with names starting with 'W'.
view_ref = shared_dataset_ref.table("my_shared_view")
view = bigquery.Table(view_ref)
sql_template = 'SELECT name, post_abbr FROM `{}.{}.{}` WHERE name LIKE "W%"'
view.view_query = sql_template.format(project, source_dataset_id, source_table_id)
view = client.create_table(view) # API request
print("Successfully created view at {}".format(view.full_table_id))
# [END bigquery_create_view]
# [START bigquery_update_view_query]
# from google.cloud import bigquery
# client = bigquery.Client()
# project = 'my-project'
# source_dataset_id = 'my_source_dataset'
# source_table_id = 'us_states'
# shared_dataset_ref = client.dataset('my_shared_dataset')
# This example shows how to update a shared view of a source table of
# US States. The view's query will be updated to contain only states with
# names starting with 'M'.
view_ref = shared_dataset_ref.table("my_shared_view")
view = bigquery.Table(view_ref)
sql_template = 'SELECT name, post_abbr FROM `{}.{}.{}` WHERE name LIKE "M%"'
view.view_query = sql_template.format(project, source_dataset_id, source_table_id)
view = client.update_table(view, ["view_query"]) # API request
# [END bigquery_update_view_query]
# [START bigquery_get_view]
# from google.cloud import bigquery
# client = bigquery.Client()
# shared_dataset_id = 'my_shared_dataset'
view_ref = client.dataset(shared_dataset_id).table("my_shared_view")
view = client.get_table(view_ref) # API Request
# Display view properties
print("View at {}".format(view.full_table_id))
print("View Query:\n{}".format(view.view_query))
# [END bigquery_get_view]
assert view.view_query is not None
analyst_group_email = "example-analyst-group@google.com"
# [START bigquery_grant_view_access]
# from google.cloud import bigquery
# client = bigquery.Client()
# Assign access controls to the dataset containing the view
# shared_dataset_id = 'my_shared_dataset'
# analyst_group_email = 'data_analysts@example.com'
shared_dataset = client.get_dataset(
client.dataset(shared_dataset_id)
) # API request
access_entries = shared_dataset.access_entries
access_entries.append(
bigquery.AccessEntry("READER", "groupByEmail", analyst_group_email)
)
shared_dataset.access_entries = access_entries
shared_dataset = client.update_dataset(
shared_dataset, ["access_entries"]
) # API request
# Authorize the view to access the source dataset
# project = 'my-project'
# source_dataset_id = 'my_source_dataset'
source_dataset = client.get_dataset(
client.dataset(source_dataset_id)
) # API request
view_reference = {
"projectId": project,
"datasetId": shared_dataset_id,
"tableId": "my_shared_view",
}
access_entries = source_dataset.access_entries
access_entries.append(bigquery.AccessEntry(None, "view", view_reference))
source_dataset.access_entries = access_entries
source_dataset = client.update_dataset(
source_dataset, ["access_entries"]
) # API request
# [END bigquery_grant_view_access]
def test_load_table_from_file(client, to_delete):
"""Upload table data from a CSV file."""
dataset_id = "load_table_from_file_dataset_{}".format(_millis())
table_id = "load_table_from_file_table_{}".format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
dataset.location = "US"
client.create_dataset(dataset)
to_delete.append(dataset)
snippets_dir = os.path.abspath(os.path.dirname(__file__))
filename = os.path.join(
snippets_dir, "..", "..", "bigquery", "tests", "data", "people.csv"
)
# [START bigquery_load_from_file]
# from google.cloud import bigquery
# client = bigquery.Client()
# filename = '/path/to/file.csv'
# dataset_id = 'my_dataset'
# table_id = 'my_table'
dataset_ref = client.dataset(dataset_id)
table_ref = dataset_ref.table(table_id)
job_config = bigquery.LoadJobConfig()
job_config.source_format = bigquery.SourceFormat.CSV
job_config.skip_leading_rows = 1
job_config.autodetect = True
with open(filename, "rb") as source_file:
job = client.load_table_from_file(source_file, table_ref, job_config=job_config)
job.result() # Waits for table load to complete.
print("Loaded {} rows into {}:{}.".format(job.output_rows, dataset_id, table_id))
# [END bigquery_load_from_file]
table = client.get_table(table_ref)
rows = list(client.list_rows(table)) # API request
assert len(rows) == 2
# Order is not preserved, so compare individually
row1 = bigquery.Row(("Wylma Phlyntstone", 29), {"full_name": 0, "age": 1})
assert row1 in rows
row2 = bigquery.Row(("Phred Phlyntstone", 32), {"full_name": 0, "age": 1})
assert row2 in rows
def test_load_table_from_uri_avro(client, to_delete, capsys):
dataset_id = "load_table_from_uri_avro_{}".format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_load_table_gcs_avro]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
dataset_ref = client.dataset(dataset_id)
job_config = bigquery.LoadJobConfig()
job_config.source_format = bigquery.SourceFormat.AVRO
uri = "gs://cloud-samples-data/bigquery/us-states/us-states.avro"
load_job = client.load_table_from_uri(
uri, dataset_ref.table("us_states"), job_config=job_config
) # API request
print("Starting job {}".format(load_job.job_id))
load_job.result() # Waits for table load to complete.
print("Job finished.")
destination_table = client.get_table(dataset_ref.table("us_states"))
print("Loaded {} rows.".format(destination_table.num_rows))
# [END bigquery_load_table_gcs_avro]
out, _ = capsys.readouterr()
assert "Loaded 50 rows." in out
def test_load_table_from_uri_csv(client, to_delete, capsys):
dataset_id = "load_table_from_uri_csv_{}".format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_load_table_gcs_csv]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
dataset_ref = client.dataset(dataset_id)
job_config = bigquery.LoadJobConfig()
job_config.schema = [
bigquery.SchemaField("name", "STRING"),
bigquery.SchemaField("post_abbr", "STRING"),
]
job_config.skip_leading_rows = 1
# The source format defaults to CSV, so the line below is optional.
job_config.source_format = bigquery.SourceFormat.CSV
uri = "gs://cloud-samples-data/bigquery/us-states/us-states.csv"
load_job = client.load_table_from_uri(
uri, dataset_ref.table("us_states"), job_config=job_config
) # API request
print("Starting job {}".format(load_job.job_id))
load_job.result() # Waits for table load to complete.
print("Job finished.")
destination_table = client.get_table(dataset_ref.table("us_states"))
print("Loaded {} rows.".format(destination_table.num_rows))
# [END bigquery_load_table_gcs_csv]
out, _ = capsys.readouterr()
assert "Loaded 50 rows." in out
def test_load_table_from_uri_json(client, to_delete, capsys):
dataset_id = "load_table_from_uri_json_{}".format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
dataset.location = "US"
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_load_table_gcs_json]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
dataset_ref = client.dataset(dataset_id)
job_config = bigquery.LoadJobConfig()
job_config.schema = [
bigquery.SchemaField("name", "STRING"),
bigquery.SchemaField("post_abbr", "STRING"),
]
job_config.source_format = bigquery.SourceFormat.NEWLINE_DELIMITED_JSON
uri = "gs://cloud-samples-data/bigquery/us-states/us-states.json"
load_job = client.load_table_from_uri(
uri,
dataset_ref.table("us_states"),
location="US", # Location must match that of the destination dataset.
job_config=job_config,
) # API request
print("Starting job {}".format(load_job.job_id))
load_job.result() # Waits for table load to complete.
print("Job finished.")
destination_table = client.get_table(dataset_ref.table("us_states"))
print("Loaded {} rows.".format(destination_table.num_rows))
# [END bigquery_load_table_gcs_json]
out, _ = capsys.readouterr()
assert "Loaded 50 rows." in out
def test_load_table_from_uri_cmek(client, to_delete):
dataset_id = "load_table_from_uri_cmek_{}".format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
dataset.location = "US"
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_load_table_gcs_json_cmek]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
dataset_ref = client.dataset(dataset_id)
job_config = bigquery.LoadJobConfig()
job_config.autodetect = True
job_config.source_format = bigquery.SourceFormat.NEWLINE_DELIMITED_JSON
# Set the encryption key to use for the destination.
# TODO: Replace this key with a key you have created in KMS.
kms_key_name = "projects/{}/locations/{}/keyRings/{}/cryptoKeys/{}".format(
"cloud-samples-tests", "us", "test", "test"
)
encryption_config = bigquery.EncryptionConfiguration(kms_key_name=kms_key_name)
job_config.destination_encryption_configuration = encryption_config
uri = "gs://cloud-samples-data/bigquery/us-states/us-states.json"
load_job = client.load_table_from_uri(
uri,
dataset_ref.table("us_states"),
location="US", # Location must match that of the destination dataset.
job_config=job_config,
) # API request
assert load_job.job_type == "load"
load_job.result() # Waits for table load to complete.
assert load_job.state == "DONE"
table = client.get_table(dataset_ref.table("us_states"))
assert table.encryption_configuration.kms_key_name == kms_key_name
# [END bigquery_load_table_gcs_json_cmek]
def test_load_table_from_uri_parquet(client, to_delete, capsys):
dataset_id = "load_table_from_uri_parquet_{}".format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_load_table_gcs_parquet]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
dataset_ref = client.dataset(dataset_id)
job_config = bigquery.LoadJobConfig()
job_config.source_format = bigquery.SourceFormat.PARQUET
uri = "gs://cloud-samples-data/bigquery/us-states/us-states.parquet"
load_job = client.load_table_from_uri(
uri, dataset_ref.table("us_states"), job_config=job_config
) # API request
print("Starting job {}".format(load_job.job_id))
load_job.result() # Waits for table load to complete.
print("Job finished.")
destination_table = client.get_table(dataset_ref.table("us_states"))
print("Loaded {} rows.".format(destination_table.num_rows))
# [END bigquery_load_table_gcs_parquet]
out, _ = capsys.readouterr()
assert "Loaded 50 rows." in out
def test_load_table_from_uri_orc(client, to_delete, capsys):
dataset_id = "load_table_from_uri_orc_{}".format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_load_table_gcs_orc]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
dataset_ref = client.dataset(dataset_id)
job_config = bigquery.LoadJobConfig()
job_config.source_format = bigquery.SourceFormat.ORC
uri = "gs://cloud-samples-data/bigquery/us-states/us-states.orc"
load_job = client.load_table_from_uri(
uri, dataset_ref.table("us_states"), job_config=job_config
) # API request
print("Starting job {}".format(load_job.job_id))
load_job.result() # Waits for table load to complete.
print("Job finished.")
destination_table = client.get_table(dataset_ref.table("us_states"))
print("Loaded {} rows.".format(destination_table.num_rows))
# [END bigquery_load_table_gcs_orc]
out, _ = capsys.readouterr()
assert "Loaded 50 rows." in out
def test_load_table_from_uri_autodetect(client, to_delete, capsys):
"""Load table from a GCS URI using various formats and auto-detected schema
Each file format has its own tested load from URI sample. Because most of
the code is common for autodetect, append, and truncate, this sample
includes snippets for all supported formats but only calls a single load
job.
This code snippet is made up of shared code, then format-specific code,
followed by more shared code. Note that only the last format in the
format-specific code section will be tested in this test.
"""
dataset_id = "load_table_from_uri_auto_{}".format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
# Shared code
# [START bigquery_load_table_gcs_csv_autodetect]
# [START bigquery_load_table_gcs_json_autodetect]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
dataset_ref = client.dataset(dataset_id)
job_config = bigquery.LoadJobConfig()
job_config.autodetect = True
# [END bigquery_load_table_gcs_csv_autodetect]
# [END bigquery_load_table_gcs_json_autodetect]
# Format-specific code
# [START bigquery_load_table_gcs_csv_autodetect]
job_config.skip_leading_rows = 1
# The source format defaults to CSV, so the line below is optional.
job_config.source_format = bigquery.SourceFormat.CSV
uri = "gs://cloud-samples-data/bigquery/us-states/us-states.csv"
# [END bigquery_load_table_gcs_csv_autodetect]
# unset csv-specific attribute
del job_config._properties["load"]["skipLeadingRows"]
# [START bigquery_load_table_gcs_json_autodetect]
job_config.source_format = bigquery.SourceFormat.NEWLINE_DELIMITED_JSON
uri = "gs://cloud-samples-data/bigquery/us-states/us-states.json"
# [END bigquery_load_table_gcs_json_autodetect]
# Shared code
# [START bigquery_load_table_gcs_csv_autodetect]
# [START bigquery_load_table_gcs_json_autodetect]
load_job = client.load_table_from_uri(
uri, dataset_ref.table("us_states"), job_config=job_config
) # API request
print("Starting job {}".format(load_job.job_id))
load_job.result() # Waits for table load to complete.
print("Job finished.")
destination_table = client.get_table(dataset_ref.table("us_states"))
print("Loaded {} rows.".format(destination_table.num_rows))
# [END bigquery_load_table_gcs_csv_autodetect]
# [END bigquery_load_table_gcs_json_autodetect]
out, _ = capsys.readouterr()
assert "Loaded 50 rows." in out
def test_load_table_from_uri_truncate(client, to_delete, capsys):
"""Replaces table data with data from a GCS URI using various formats
Each file format has its own tested load from URI sample. Because most of
the code is common for autodetect, append, and truncate, this sample
includes snippets for all supported formats but only calls a single load
job.
This code snippet is made up of shared code, then format-specific code,
followed by more shared code. Note that only the last format in the
format-specific code section will be tested in this test.
"""
dataset_id = "load_table_from_uri_trunc_{}".format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
job_config = bigquery.LoadJobConfig()
job_config.schema = [
bigquery.SchemaField("name", "STRING"),
bigquery.SchemaField("post_abbr", "STRING"),
]
table_ref = dataset.table("us_states")
body = six.BytesIO(b"Washington,WA")
client.load_table_from_file(body, table_ref, job_config=job_config).result()
previous_rows = client.get_table(table_ref).num_rows
assert previous_rows > 0
# Shared code
# [START bigquery_load_table_gcs_avro_truncate]
# [START bigquery_load_table_gcs_csv_truncate]
# [START bigquery_load_table_gcs_json_truncate]
# [START bigquery_load_table_gcs_parquet_truncate]
# [START bigquery_load_table_gcs_orc_truncate]
# from google.cloud import bigquery
# client = bigquery.Client()
# table_ref = client.dataset('my_dataset').table('existing_table')
job_config = bigquery.LoadJobConfig()
job_config.write_disposition = bigquery.WriteDisposition.WRITE_TRUNCATE
# [END bigquery_load_table_gcs_avro_truncate]
# [END bigquery_load_table_gcs_csv_truncate]
# [END bigquery_load_table_gcs_json_truncate]
# [END bigquery_load_table_gcs_parquet_truncate]
# [END bigquery_load_table_gcs_orc_truncate]
# Format-specific code
# [START bigquery_load_table_gcs_avro_truncate]
job_config.source_format = bigquery.SourceFormat.AVRO
uri = "gs://cloud-samples-data/bigquery/us-states/us-states.avro"
# [END bigquery_load_table_gcs_avro_truncate]
# [START bigquery_load_table_gcs_csv_truncate]
job_config.skip_leading_rows = 1
# The source format defaults to CSV, so the line below is optional.
job_config.source_format = bigquery.SourceFormat.CSV
uri = "gs://cloud-samples-data/bigquery/us-states/us-states.csv"
# [END bigquery_load_table_gcs_csv_truncate]
# unset csv-specific attribute
del job_config._properties["load"]["skipLeadingRows"]
# [START bigquery_load_table_gcs_json_truncate]
job_config.source_format = bigquery.SourceFormat.NEWLINE_DELIMITED_JSON
uri = "gs://cloud-samples-data/bigquery/us-states/us-states.json"
# [END bigquery_load_table_gcs_json_truncate]
# [START bigquery_load_table_gcs_parquet_truncate]
job_config.source_format = bigquery.SourceFormat.PARQUET
uri = "gs://cloud-samples-data/bigquery/us-states/us-states.parquet"
# [END bigquery_load_table_gcs_parquet_truncate]
# [START bigquery_load_table_gcs_orc_truncate]
job_config.source_format = bigquery.SourceFormat.ORC
uri = "gs://cloud-samples-data/bigquery/us-states/us-states.orc"
# [END bigquery_load_table_gcs_orc_truncate]
# Shared code
# [START bigquery_load_table_gcs_avro_truncate]
# [START bigquery_load_table_gcs_csv_truncate]
# [START bigquery_load_table_gcs_json_truncate]
# [START bigquery_load_table_gcs_parquet_truncate]
# [START bigquery_load_table_gcs_orc_truncate]
load_job = client.load_table_from_uri(
uri, table_ref, job_config=job_config
) # API request
print("Starting job {}".format(load_job.job_id))
load_job.result() # Waits for table load to complete.
print("Job finished.")
destination_table = client.get_table(table_ref)
print("Loaded {} rows.".format(destination_table.num_rows))
# [END bigquery_load_table_gcs_avro_truncate]
# [END bigquery_load_table_gcs_csv_truncate]
# [END bigquery_load_table_gcs_json_truncate]
# [END bigquery_load_table_gcs_parquet_truncate]
# [END bigquery_load_table_gcs_orc_truncate]
out, _ = capsys.readouterr()
assert "Loaded 50 rows." in out
def test_load_table_add_column(client, to_delete):
dataset_id = "load_table_add_column_{}".format(_millis())
dataset_ref = client.dataset(dataset_id)
dataset = bigquery.Dataset(dataset_ref)
dataset.location = "US"
dataset = client.create_dataset(dataset)
to_delete.append(dataset)
snippets_dir = os.path.abspath(os.path.dirname(__file__))
filepath = os.path.join(
snippets_dir, "..", "..", "bigquery", "tests", "data", "people.csv"
)
table_ref = dataset_ref.table("my_table")
old_schema = [bigquery.SchemaField("full_name", "STRING", mode="REQUIRED")]
table = client.create_table(bigquery.Table(table_ref, schema=old_schema))
# [START bigquery_add_column_load_append]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_ref = client.dataset('my_dataset')
# filepath = 'path/to/your_file.csv'
# Retrieves the destination table and checks the length of the schema
table_id = "my_table"
table_ref = dataset_ref.table(table_id)
table = client.get_table(table_ref)
print("Table {} contains {} columns.".format(table_id, len(table.schema)))
# Configures the load job to append the data to the destination table,
# allowing field addition
job_config = bigquery.LoadJobConfig()
job_config.write_disposition = bigquery.WriteDisposition.WRITE_APPEND
job_config.schema_update_options = [
bigquery.SchemaUpdateOption.ALLOW_FIELD_ADDITION
]
# In this example, the existing table contains only the 'full_name' column.
# 'REQUIRED' fields cannot be added to an existing schema, so the
# additional column must be 'NULLABLE'.
job_config.schema = [
bigquery.SchemaField("full_name", "STRING", mode="REQUIRED"),
bigquery.SchemaField("age", "INTEGER", mode="NULLABLE"),
]
job_config.source_format = bigquery.SourceFormat.CSV
job_config.skip_leading_rows = 1
with open(filepath, "rb") as source_file:
job = client.load_table_from_file(
source_file,
table_ref,
location="US", # Must match the destination dataset location.
job_config=job_config,
) # API request
job.result() # Waits for table load to complete.
print(
"Loaded {} rows into {}:{}.".format(
job.output_rows, dataset_id, table_ref.table_id
)
)
# Checks the updated length of the schema
table = client.get_table(table)
print("Table {} now contains {} columns.".format(table_id, len(table.schema)))
# [END bigquery_add_column_load_append]
assert len(table.schema) == 2
assert table.num_rows > 0
def test_load_table_relax_column(client, to_delete):
dataset_id = "load_table_relax_column_{}".format(_millis())
dataset_ref = client.dataset(dataset_id)
dataset = bigquery.Dataset(dataset_ref)
dataset.location = "US"
dataset = client.create_dataset(dataset)
to_delete.append(dataset)
snippets_dir = os.path.abspath(os.path.dirname(__file__))
filepath = os.path.join(
snippets_dir, "..", "..", "bigquery", "tests", "data", "people.csv"
)
table_ref = dataset_ref.table("my_table")
old_schema = [
bigquery.SchemaField("full_name", "STRING", mode="REQUIRED"),
bigquery.SchemaField("age", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("favorite_color", "STRING", mode="REQUIRED"),
]
table = client.create_table(bigquery.Table(table_ref, schema=old_schema))
# [START bigquery_relax_column_load_append]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_ref = client.dataset('my_dataset')
# filepath = 'path/to/your_file.csv'
# Retrieves the destination table and checks the number of required fields
table_id = "my_table"
table_ref = dataset_ref.table(table_id)
table = client.get_table(table_ref)
original_required_fields = sum(field.mode == "REQUIRED" for field in table.schema)
# In this example, the existing table has 3 required fields.
print("{} fields in the schema are required.".format(original_required_fields))
# Configures the load job to append the data to a destination table,
# allowing field relaxation
job_config = bigquery.LoadJobConfig()
job_config.write_disposition = bigquery.WriteDisposition.WRITE_APPEND
job_config.schema_update_options = [
bigquery.SchemaUpdateOption.ALLOW_FIELD_RELAXATION
]
# In this example, the existing table contains three required fields
# ('full_name', 'age', and 'favorite_color'), while the data to load
# contains only the first two fields.
job_config.schema = [
bigquery.SchemaField("full_name", "STRING", mode="REQUIRED"),
bigquery.SchemaField("age", "INTEGER", mode="REQUIRED"),
]
job_config.source_format = bigquery.SourceFormat.CSV
job_config.skip_leading_rows = 1
with open(filepath, "rb") as source_file:
job = client.load_table_from_file(
source_file,
table_ref,
location="US", # Must match the destination dataset location.
job_config=job_config,
) # API request
job.result() # Waits for table load to complete.
print(
"Loaded {} rows into {}:{}.".format(
job.output_rows, dataset_id, table_ref.table_id
)
)
# Checks the updated number of required fields
table = client.get_table(table)
current_required_fields = sum(field.mode == "REQUIRED" for field in table.schema)
print("{} fields in the schema are now required.".format(current_required_fields))
# [END bigquery_relax_column_load_append]
assert original_required_fields - current_required_fields == 1
assert len(table.schema) == 3
assert table.schema[2].mode == "NULLABLE"
assert table.num_rows > 0
def test_extract_table(client, to_delete):
bucket_name = "extract_shakespeare_{}".format(_millis())
storage_client = storage.Client()
bucket = retry_storage_errors(storage_client.create_bucket)(bucket_name)
to_delete.append(bucket)
# [START bigquery_extract_table]
# from google.cloud import bigquery
# client = bigquery.Client()
# bucket_name = 'my-bucket'
project = "bigquery-public-data"
dataset_id = "samples"
table_id = "shakespeare"
destination_uri = "gs://{}/{}".format(bucket_name, "shakespeare.csv")
dataset_ref = client.dataset(dataset_id, project=project)
table_ref = dataset_ref.table(table_id)
extract_job = client.extract_table(
table_ref,
destination_uri,
# Location must match that of the source table.
location="US",
) # API request
extract_job.result() # Waits for job to complete.
print(
"Exported {}:{}.{} to {}".format(project, dataset_id, table_id, destination_uri)
)
# [END bigquery_extract_table]
blob = retry_storage_errors(bucket.get_blob)("shakespeare.csv")
assert blob.exists
assert blob.size > 0
to_delete.insert(0, blob)
def test_extract_table_json(client, to_delete):
bucket_name = "extract_shakespeare_json_{}".format(_millis())
storage_client = storage.Client()
bucket = retry_storage_errors(storage_client.create_bucket)(bucket_name)
to_delete.append(bucket)
# [START bigquery_extract_table_json]
# from google.cloud import bigquery
# client = bigquery.Client()
# bucket_name = 'my-bucket'
destination_uri = "gs://{}/{}".format(bucket_name, "shakespeare.json")
dataset_ref = client.dataset("samples", project="bigquery-public-data")
table_ref = dataset_ref.table("shakespeare")
job_config = bigquery.job.ExtractJobConfig()
job_config.destination_format = bigquery.DestinationFormat.NEWLINE_DELIMITED_JSON
extract_job = client.extract_table(
table_ref,
destination_uri,
job_config=job_config,
# Location must match that of the source table.
location="US",
) # API request
extract_job.result() # Waits for job to complete.
# [END bigquery_extract_table_json]
blob = retry_storage_errors(bucket.get_blob)("shakespeare.json")
assert blob.exists
assert blob.size > 0
to_delete.insert(0, blob)
def test_extract_table_compressed(client, to_delete):
bucket_name = "extract_shakespeare_compress_{}".format(_millis())
storage_client = storage.Client()
bucket = retry_storage_errors(storage_client.create_bucket)(bucket_name)
to_delete.append(bucket)
# [START bigquery_extract_table_compressed]
# from google.cloud import bigquery
# client = bigquery.Client()
# bucket_name = 'my-bucket'
destination_uri = "gs://{}/{}".format(bucket_name, "shakespeare.csv.gz")
dataset_ref = client.dataset("samples", project="bigquery-public-data")
table_ref = dataset_ref.table("shakespeare")
job_config = bigquery.job.ExtractJobConfig()
job_config.compression = bigquery.Compression.GZIP
extract_job = client.extract_table(
table_ref,
destination_uri,
# Location must match that of the source table.
location="US",
job_config=job_config,
) # API request
extract_job.result() # Waits for job to complete.
# [END bigquery_extract_table_compressed]
blob = retry_storage_errors(bucket.get_blob)("shakespeare.csv.gz")
assert blob.exists
assert blob.size > 0
to_delete.insert(0, blob)
def test_client_query_total_rows(client, capsys):
"""Run a query and just check for how many rows."""
# [START bigquery_query_total_rows]
# from google.cloud import bigquery
# client = bigquery.Client()
query = (
"SELECT name FROM `bigquery-public-data.usa_names.usa_1910_2013` "
'WHERE state = "TX" '
"LIMIT 100"
)
query_job = client.query(
query,
# Location must match that of the dataset(s) referenced in the query.
location="US",
) # API request - starts the query
results = query_job.result() # Wait for query to complete.
print("Got {} rows.".format(results.total_rows))
# [END bigquery_query_total_rows]
out, _ = capsys.readouterr()
assert "Got 100 rows." in out
def test_manage_job(client):
sql = """
SELECT corpus
FROM `bigquery-public-data.samples.shakespeare`
GROUP BY corpus;
"""
location = "us"
job = client.query(sql, location=location)
job_id = job.job_id
# [START bigquery_cancel_job]
# TODO(developer): Uncomment the lines below and replace with your values.
# from google.cloud import bigquery
# client = bigquery.Client()
# job_id = 'bq-job-123x456-123y123z123c' # replace with your job ID
# location = 'us' # replace with your location
job = client.cancel_job(job_id, location=location)
# [END bigquery_cancel_job]
# [START bigquery_get_job]
# TODO(developer): Uncomment the lines below and replace with your values.
# from google.cloud import bigquery
# client = bigquery.Client()
# job_id = 'bq-job-123x456-123y123z123c' # replace with your job ID
# location = 'us' # replace with your location
job = client.get_job(job_id, location=location) # API request
# Print selected job properties
print("Details for job {} running in {}:".format(job_id, location))
print(
"\tType: {}\n\tState: {}\n\tCreated: {}".format(
job.job_type, job.state, job.created
)
)
# [END bigquery_get_job]
def test_query_external_gcs_permanent_table(client, to_delete):
dataset_id = "query_external_gcs_{}".format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_query_external_gcs_perm]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
# Configure the external data source
dataset_ref = client.dataset(dataset_id)
table_id = "us_states"
schema = [
bigquery.SchemaField("name", "STRING"),
bigquery.SchemaField("post_abbr", "STRING"),
]
table = bigquery.Table(dataset_ref.table(table_id), schema=schema)
external_config = bigquery.ExternalConfig("CSV")
external_config.source_uris = [
"gs://cloud-samples-data/bigquery/us-states/us-states.csv"
]
external_config.options.skip_leading_rows = 1 # optionally skip header row
table.external_data_configuration = external_config
# Create a permanent table linked to the GCS file
table = client.create_table(table) # API request
# Example query to find states starting with 'W'
sql = 'SELECT * FROM `{}.{}` WHERE name LIKE "W%"'.format(dataset_id, table_id)
query_job = client.query(sql) # API request
w_states = list(query_job) # Waits for query to finish
print("There are {} states with names starting with W.".format(len(w_states)))
# [END bigquery_query_external_gcs_perm]
assert len(w_states) == 4
def test_ddl_create_view(client, to_delete, capsys):
"""Create a view via a DDL query."""
project = client.project
dataset_id = "ddl_view_{}".format(_millis())
table_id = "new_view"
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_ddl_create_view]
# from google.cloud import bigquery
# project = 'my-project'
# dataset_id = 'my_dataset'
# table_id = 'new_view'
# client = bigquery.Client(project=project)
sql = """
CREATE VIEW `{}.{}.{}`
OPTIONS(
expiration_timestamp=TIMESTAMP_ADD(
CURRENT_TIMESTAMP(), INTERVAL 48 HOUR),
friendly_name="new_view",
description="a view that expires in 2 days",
labels=[("org_unit", "development")]
)
AS SELECT name, state, year, number
FROM `bigquery-public-data.usa_names.usa_1910_current`
WHERE state LIKE 'W%'
""".format(
project, dataset_id, table_id
)
job = client.query(sql) # API request.
job.result() # Waits for the query to finish.
print(
'Created new view "{}.{}.{}".'.format(
job.destination.project,
job.destination.dataset_id,
job.destination.table_id,
)
)
# [END bigquery_ddl_create_view]
out, _ = capsys.readouterr()
assert 'Created new view "{}.{}.{}".'.format(project, dataset_id, table_id) in out
# Test that listing query result rows succeeds so that generic query
# processing tools work with DDL statements.
rows = list(job)
assert len(rows) == 0
if pandas is not None:
df = job.to_dataframe()
assert len(df) == 0
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_query_results_as_dataframe(client):
# [START bigquery_query_results_dataframe]
# from google.cloud import bigquery
# client = bigquery.Client()
sql = """
SELECT name, SUM(number) as count
FROM `bigquery-public-data.usa_names.usa_1910_current`
GROUP BY name
ORDER BY count DESC
LIMIT 10
"""
df = client.query(sql).to_dataframe()
# [END bigquery_query_results_dataframe]
assert isinstance(df, pandas.DataFrame)
assert len(list(df)) == 2 # verify the number of columns
assert len(df) == 10 # verify the number of rows
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_list_rows_as_dataframe(client):
# [START bigquery_list_rows_dataframe]
# from google.cloud import bigquery
# client = bigquery.Client()
dataset_ref = client.dataset("samples", project="bigquery-public-data")
table_ref = dataset_ref.table("shakespeare")
table = client.get_table(table_ref)
df = client.list_rows(table).to_dataframe()
# [END bigquery_list_rows_dataframe]
assert isinstance(df, pandas.DataFrame)
assert len(list(df)) == len(table.schema) # verify the number of columns
assert len(df) == table.num_rows # verify the number of rows
if __name__ == "__main__":
pytest.main()
| |
# pythonequations is a collection of equations expressed as Python classes
# Copyright (C) 2008 James R. Phillips
# 2548 Vera Cruz Drive
# Birmingham, AL 35235 USA
# email: zunzun@zunzun.com
#
# License: BSD-style (see LICENSE.txt in main source directory)
# Version info: $Id: Power.py 274 2010-09-29 13:16:14Z zunzun.com $
import pythonequations, pythonequations.EquationBaseClasses, pythonequations.ExtraCodeForEquationBaseClasses
import numpy
numpy.seterr(all = 'raise') # numpy raises warnings, convert to exceptions to trap them
class PowerRoot2D(pythonequations.EquationBaseClasses.Equation2D):
RequiresAutoGeneratedGrowthAndDecayForms = True
RequiresAutoGeneratedOffsetForm = True
RequiresAutoGeneratedReciprocalForm = True
RequiresAutoGeneratedInverseForms = True
_name = "Root"
_HTML = "y = a<SUP>(1.0/x)</SUP>"
coefficientDesignatorTuple = ("a")
CannotAcceptDataWithZeroX = True
function_cpp_code = 'temp = pow(coeff[0], _id[_cwo[0]+i]);'
def CreateCacheGenerationList(self):
self.CacheGenerationList = []
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_PowX(NameOrValueFlag=1, args=[-1.0]), [-1.0]])
def SpecificCodeCPP(self):
s = "\ttemp = pow(a, (1.0/x_in));\n"
return s
class Geometric_Modified2D(pythonequations.EquationBaseClasses.Equation2D):
RequiresAutoGeneratedGrowthAndDecayForms = True
RequiresAutoGeneratedOffsetForm = True
RequiresAutoGeneratedReciprocalForm = True
RequiresAutoGeneratedInverseForms = True
_name = "Geometric Modified"
_HTML = "y = a * x<SUP>(b/x)</SUP>"
coefficientDesignatorTuple = ("a", "b")
CannotAcceptDataWithZeroX = True
function_cpp_code = 'temp = coeff[0] * pow(_id[_cwo[0]+i], coeff[1] * _id[_cwo[1]+i]);'
def CreateCacheGenerationList(self):
self.CacheGenerationList = []
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_X(NameOrValueFlag=1), []])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_PowX(NameOrValueFlag=1, args=[-1.0]), [-1.0]])
def SpecificCodeCPP(self):
s = "\ttemp = a * pow(x_in, (b/x_in));\n"
return s
class StandardGeometric2D(pythonequations.EquationBaseClasses.Equation2D):
RequiresAutoGeneratedGrowthAndDecayForms = True
RequiresAutoGeneratedOffsetForm = True
RequiresAutoGeneratedReciprocalForm = True
RequiresAutoGeneratedInverseForms = True
_name = "Standard Geometric"
_HTML = "y = a * x<SUP>bx</SUP>"
coefficientDesignatorTuple = ("a", "b")
function_cpp_code = 'temp = coeff[0] * pow(_id[_cwo[0]+i], coeff[1] * _id[_cwo[0]+i]);'
def CreateCacheGenerationList(self):
self.CacheGenerationList = []
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_X(NameOrValueFlag=1), []])
def SpecificCodeCPP(self):
s = "\ttemp = a * pow(x_in, (b*x_in));\n"
return s
class XShiftedPower2D(pythonequations.EquationBaseClasses.Equation2D):
RequiresAutoGeneratedGrowthAndDecayForms = True
RequiresAutoGeneratedOffsetForm = True
RequiresAutoGeneratedReciprocalForm = True
RequiresAutoGeneratedInverseForms = True
_name = "X Shifted Power"
_HTML = "y = a * (x-b)<SUP>c</SUP>"
coefficientDesignatorTuple = ("a", "b", "c")
function_cpp_code = 'temp = coeff[0] * pow(_id[_cwo[0]+i] - coeff[1], coeff[2]);'
def CreateCacheGenerationList(self):
self.CacheGenerationList = []
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_X(NameOrValueFlag=1), []])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_Ones(NameOrValueFlag=1), []])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_Ones(NameOrValueFlag=1), []])
def SpecificCodeCPP(self):
s = "\ttemp = a * pow((x_in-b), c);\n"
return s
class SimplePower2D(pythonequations.EquationBaseClasses.Equation2D):
RequiresAutoGeneratedGrowthAndDecayForms = True
RequiresAutoGeneratedOffsetForm = True
RequiresAutoGeneratedReciprocalForm = False
RequiresAutoGeneratedInverseForms = False
_name = "Simple Power"
_HTML = "y = x<SUP>a</SUP>"
coefficientDesignatorTuple = ("a")
function_cpp_code = 'temp = pow(_id[_cwo[0]+i], coeff[0]);'
def CreateCacheGenerationList(self):
self.CacheGenerationList = []
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_X(NameOrValueFlag=1), []])
def SpecificCodeCPP(self):
s = "\ttemp = pow(x_in, a);\n"
return s
class StandardPower2D(pythonequations.EquationBaseClasses.Equation2D):
RequiresAutoGeneratedGrowthAndDecayForms = True
RequiresAutoGeneratedOffsetForm = True
RequiresAutoGeneratedReciprocalForm = True
RequiresAutoGeneratedInverseForms = True
_name = "Standard Power"
_HTML = "y = a * x<SUP>b</SUP>"
coefficientDesignatorTuple = ("a", "b")
function_cpp_code = 'temp = coeff[0] * pow(_id[_cwo[0]+i], coeff[1]);'
def CreateCacheGenerationList(self):
self.CacheGenerationList = []
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_X(NameOrValueFlag=1), []])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_Ones(NameOrValueFlag=1), []])
def SpecificCodeCPP(self):
s = "\ttemp = a * pow(x_in, b);\n"
return s
class PowerA_Modified2D(pythonequations.EquationBaseClasses.Equation2D):
RequiresAutoGeneratedGrowthAndDecayForms = True
RequiresAutoGeneratedOffsetForm = True
RequiresAutoGeneratedReciprocalForm = True
RequiresAutoGeneratedInverseForms = True
_name = "Power A Modified"
_HTML = "y = a * b<SUP>x</SUP>"
coefficientDesignatorTuple = ("a", "b")
function_cpp_code = 'temp = coeff[0] * pow(coeff[1], _id[_cwo[0]+i]);'
def CreateCacheGenerationList(self):
self.CacheGenerationList = []
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_X(NameOrValueFlag=1), []])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_Ones(NameOrValueFlag=1), []])
def SpecificCodeCPP(self):
s = "\ttemp = a * pow(b, x_in);\n"
return s
class PowerA_Modified_Transform2D(pythonequations.EquationBaseClasses.Equation2D):
RequiresAutoGeneratedGrowthAndDecayForms = True
RequiresAutoGeneratedOffsetForm = True
RequiresAutoGeneratedReciprocalForm = True
RequiresAutoGeneratedInverseForms = True
_name = "Power A Modified Transform"
_HTML = "y = a * b<SUP>cx + d</SUP>"
coefficientDesignatorTuple = ("a", "b", 'c', 'd')
function_cpp_code = 'temp = coeff[0] * pow(coeff[1], coeff[2] * _id[_cwo[0]+i] + coeff[3]);'
def CreateCacheGenerationList(self):
self.CacheGenerationList = []
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_X(NameOrValueFlag=1), []])
def SpecificCodeCPP(self):
s = "\ttemp = a * pow(b, c * x_in + d);\n"
return s
class PowerB_Modified2D(pythonequations.EquationBaseClasses.Equation2D):
RequiresAutoGeneratedGrowthAndDecayForms = True
RequiresAutoGeneratedOffsetForm = True
RequiresAutoGeneratedReciprocalForm = True
RequiresAutoGeneratedInverseForms = True
_name = "Power B Modified"
_HTML = "y = a<SUP>ln(x)</SUP>"
coefficientDesignatorTuple = ("a")
CannotAcceptDataWithZeroX = True
CannotAcceptDataWithNegativeX = True
function_cpp_code = 'temp = pow(coeff[0], _id[_cwo[0]+i]);'
def CreateCacheGenerationList(self):
self.CacheGenerationList = []
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_LogX(NameOrValueFlag=1), []])
def SpecificCodeCPP(self):
s = "\ttemp = pow(a, log(x_in));\n"
return s
class PowerB_Modified_Transform2D(pythonequations.EquationBaseClasses.Equation2D):
RequiresAutoGeneratedGrowthAndDecayForms = True
RequiresAutoGeneratedOffsetForm = True
RequiresAutoGeneratedReciprocalForm = True
RequiresAutoGeneratedInverseForms = True
_name = "Power B Modified Transform"
_HTML = "y = a<SUP>ln(bx + c)</SUP>"
coefficientDesignatorTuple = ("a", 'b', 'c')
function_cpp_code = 'temp = pow(coeff[0], log(coeff[1] * _id[_cwo[0]+i] + coeff[2]));'
def CreateCacheGenerationList(self):
self.CacheGenerationList = []
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_X(NameOrValueFlag=1), []])
def SpecificCodeCPP(self):
s = "\ttemp = pow(a, log(b * x_in + c));\n"
return s
class PowerC_Modified2D(pythonequations.EquationBaseClasses.Equation2D):
RequiresAutoGeneratedGrowthAndDecayForms = True
RequiresAutoGeneratedOffsetForm = True
RequiresAutoGeneratedReciprocalForm = True
RequiresAutoGeneratedInverseForms = True
_name = "Power C Modified"
_HTML = "y = (a + x)<SUP>b</SUP>"
coefficientDesignatorTuple = ("a", "b")
function_cpp_code = 'temp = pow(coeff[0] + _id[_cwo[0]+i], coeff[1]);'
def CreateCacheGenerationList(self):
self.CacheGenerationList = []
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_X(NameOrValueFlag=1), []])
def SpecificCodeCPP(self):
s = "\ttemp = pow(a + x_in, b);\n"
return s
class PowerC_Modified_Transform2D(pythonequations.EquationBaseClasses.Equation2D):
RequiresAutoGeneratedGrowthAndDecayForms = True
RequiresAutoGeneratedOffsetForm = True
RequiresAutoGeneratedReciprocalForm = True
RequiresAutoGeneratedInverseForms = True
_name = "Power C Modified Transform"
_HTML = "y = (a + bx)<SUP>c</SUP>"
coefficientDesignatorTuple = ("a", "b", 'c')
function_cpp_code = 'temp = pow(coeff[0] + coeff[1] * _id[_cwo[0]+i], coeff[2]);'
def CreateCacheGenerationList(self):
self.CacheGenerationList = []
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_X(NameOrValueFlag=1), []])
def SpecificCodeCPP(self):
s = "\ttemp = pow(a + b * x_in, c);\n"
return s
class PowerLawExponentialCutoff2D(pythonequations.EquationBaseClasses.Equation2D):
RequiresAutoGeneratedGrowthAndDecayForms = True
RequiresAutoGeneratedOffsetForm = True
RequiresAutoGeneratedReciprocalForm = True
RequiresAutoGeneratedInverseForms = True
_name = "Power Law With Exponential Cutoff"
_HTML = "p(k) = C * k<SUP>(-T)</SUP> * exp(-k/K)"
coefficientDesignatorTuple = ("C", "T", "K")
function_cpp_code = 'temp = coeff[0] * pow(_id[_cwo[2]+i], -1.0 * coeff[1]) * exp(-1.0 * _id[_cwo[2]+i] / coeff[2]);'
def CreateCacheGenerationList(self):
self.CacheGenerationList = []
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_Ones(NameOrValueFlag=1), []])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_Ones(NameOrValueFlag=1), []])
self.CacheGenerationList.append([pythonequations.ExtraCodeForEquationBaseClasses.CG_X(NameOrValueFlag=1), []])
def SpecificCodeCPP(self):
s = "\ttemp = C * pow(x_in, -1.0 * T) * exp(-1.0 * x_in / K);\n"
return s
| |
from plugin import LINUX, WINDOWS, plugin, require
from colorama import Fore
import subprocess
@require(platform=LINUX)
@plugin("wifi")
class WifiPasswordGetterLINUX:
"""
A Jarvis plugin for Linux, that will find and display all the profiles of the
wifis that you have connected to and then display the password if selected.
"""
def __call__(self, jarvis, s):
profiles = self.get_wifi_profiles()
choice = self.show_options(jarvis, profiles)
if choice == "exit":
return
password = self.display_password(profiles[choice - 1])
strip_password = password.split("=", 1)[1]
jarvis.say("Wifi Name: " + profiles[choice - 1] +
'\nPassword: ' + strip_password)
def get_wifi_profiles(self):
"""
Returns the names of the connected wifis.
"""
out = subprocess.Popen(["ls",
"/etc/NetworkManager/system-connections/"],
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(res, stderr) = out.communicate()
data = res.split('\n')
return data
def show_options(self, jarvis, arr):
"""
Displays the names of the connected wifis and returns the number of the selected.
Parameters
----------
jarvis: JarvisAPI
An instance of the JarvisAPI class.
arr: list
The list with the wifi names.
"""
count = 1
for x in range(len(arr) - 1):
option = arr[x]
jarvis.say(str(count) + ": " + option)
count = count + 1
jarvis.say(str(count) + ": Exit")
choice = self.get_choice("Please select a number or Exit: ",
count, count, jarvis)
if choice == -1:
return "exit"
else:
return choice
def get_choice(self, input_text, max_valid_value, terminator, jarvis):
"""
Returns the number of the selected wifi.
Parameters
----------
input_text: str
The text to be printed when asking for input.
max_valid_value: int
The max valid value for the choices.
terminator: int
The value to terminate the procedure.
jarvis: JarvisAPI
An instance of the JarvisAPI class.
"""
while True:
try:
inserted_value = int(jarvis.input(input_text, Fore.GREEN))
if inserted_value == terminator:
return -1
elif inserted_value <= max_valid_value:
return inserted_value
else:
jarvis.say(
"Invalid input! Enter a number from the"
"choices provided.", Fore.YELLOW)
except ValueError:
jarvis.say(
"Invalid input! Enter a number from the choices provided.",
Fore.YELLOW)
jarvis.say("")
def display_password(self, ssid):
"""
Returns the password of the selected wifi.
Parameters
----------
ssid: str
The name of the selected wifi.
"""
path = "/etc/NetworkManager/system-connections/"
display = subprocess.Popen([f"sudo grep -r '^psk=' {path}{ssid}"],
shell=True, universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(new_res, stderr) = display.communicate()
return new_res
@require(platform=WINDOWS)
@plugin("wifi")
class WifiPasswordGetterWINDOWS:
"""
A Jarvis plugin for Windows, that will find and display all the profiles of the
wifis that you have connected to and then display the password, if selected or
display instantly the password of the requested wifi, e.g. wifi or wifi wifi_name.
"""
def __call__(self, jarvis, s):
if s:
self.display_password(jarvis, s)
else:
profiles = self.get_wifi_profiles()
if len(profiles) == 0:
jarvis.say("No connected wifi found!", Fore.YELLOW)
return
choice = self.show_options(jarvis, profiles)
if choice == -1:
return
self.display_password(jarvis, profiles[choice - 1])
def get_wifi_profiles(self):
"""
Returns the names of the connected wifis.
"""
meta_data = subprocess.check_output(
['netsh', 'wlan', 'show', 'profiles'])
data = meta_data.decode('utf-8', errors="backslashreplace")
data = data.split('\n')
profiles = []
for i in data:
if "All User Profile" in i:
i = i.split(":")
i = i[1]
i = i[1:-1]
profiles.append(i)
return profiles
def show_options(self, jarvis, profiles):
"""
Displays the names of the connected wifis and returns the number of the selected.
Parameters
----------
jarvis: JarvisAPI
An instance of the JarvisAPI class.
profiles: list
The list with the wifi names.
"""
count = 1
for profile in profiles:
jarvis.say(str(count) + ": " + profile)
count = count + 1
jarvis.say(str(count) + ": Exit")
choice = self.get_choice(jarvis, "Please select a number or Exit: ",
count)
return choice
def get_choice(self, jarvis, input_text, max_valid_value):
"""
Returns the number of the selected wifi.
Parameters
----------
jarvis: JarvisAPI
An instance of the JarvisAPI class.
input_text: str
The text to be printed when asking for input.
max_valid_value: int
The max valid value for the choices.
"""
while True:
try:
inserted_value = int(jarvis.input(input_text, Fore.GREEN))
if inserted_value == max_valid_value:
return -1
elif max_valid_value > inserted_value >= 1:
return inserted_value
else:
jarvis.say(
"Invalid input! Enter a number from the"
" choices provided.", Fore.YELLOW)
except ValueError:
jarvis.say(
"Invalid input! Enter a number from the choices provided.",
Fore.YELLOW)
def display_password(self, jarvis, profile):
"""
Displays the name and the password of the selected wifi.
Parameters
----------
profile: str
The name of the selected wifi.
jarvis: JarvisAPI
An instance of the JarvisAPI class.
"""
try:
results = subprocess.check_output(
['netsh', 'wlan', 'show', 'profile', profile, 'key=clear'])
results = results.decode('utf-8', errors="backslashreplace")
results = results.split('\n')
results = [b.split(":")[1][1:-1]
for b in results if "Key Content" in b]
try:
jarvis.say("Wifi Name: " + profile +
'\nPassword: ' + results[0])
except IndexError:
jarvis.say("Wifi Name: " + profile +
'\nPassword: ' + "UNKNOWN")
except subprocess.CalledProcessError:
jarvis.say(
"Unable to get the password for this wifi. Make sure you enter the correct wifi name!",
Fore.YELLOW)
| |
#!/usr/bin/env python
import codecs
from datetime import datetime
import json
import time
import urllib
import subprocess
from flask import Markup, g, render_template, request
from slimit import minify
from smartypants import smartypants
from jinja2 import contextfunction, Template
import app_config
import copytext
class BetterJSONEncoder(json.JSONEncoder):
"""
A JSON encoder that intelligently handles datetimes.
"""
def default(self, obj):
if isinstance(obj, datetime):
encoded_object = obj.isoformat()
else:
encoded_object = json.JSONEncoder.default(self, obj)
return encoded_object
class Includer(object):
"""
Base class for Javascript and CSS psuedo-template-tags.
See `make_context` for an explanation of `asset_depth`.
"""
def __init__(self, asset_depth=0, root_path='www'):
self.includes = []
self.tag_string = None
self.asset_depth = asset_depth
self.root_path = root_path
def push(self, path):
self.includes.append(path)
return ''
def _compress(self):
raise NotImplementedError()
def _relativize_path(self, path):
relative_path = path
depth = len(request.path.split('/')) - (2 + self.asset_depth)
while depth > 0:
relative_path = '../%s' % relative_path
depth -= 1
return relative_path
def render(self, path):
if getattr(g, 'compile_includes', False):
if path in g.compiled_includes:
timestamp_path = g.compiled_includes[path]
else:
# Add a querystring to the rendered filename to prevent caching
timestamp_path = '%s?%i' % (path, int(time.time()))
out_path = '%s/%s' % (self.root_path, path)
if path not in g.compiled_includes:
print 'Rendering %s' % out_path
with codecs.open(out_path, 'w', encoding='utf-8') as f:
f.write(self._compress())
# See "fab render"
g.compiled_includes[path] = timestamp_path
markup = Markup(self.tag_string % self._relativize_path(timestamp_path))
else:
response = ','.join(self.includes)
response = '\n'.join([
self.tag_string % self._relativize_path(src) for src in self.includes
])
markup = Markup(response)
del self.includes[:]
return markup
class JavascriptIncluder(Includer):
"""
Psuedo-template tag that handles collecting Javascript and serving appropriate clean or compressed versions.
"""
def __init__(self, *args, **kwargs):
Includer.__init__(self, *args, **kwargs)
self.tag_string = '<script type="text/javascript" src="%s"></script>'
def _compress(self):
output = []
src_paths = []
for src in self.includes:
src_paths.append('%s/%s' % (self.root_path, src))
with codecs.open('%s/%s' % (self.root_path, src), encoding='utf-8') as f:
if not src.endswith('.min.js'):
print '- compressing %s' % src
output.append(minify(f.read()))
else:
print '- appending already compressed %s' % src
output.append(f.read())
context = make_context()
context['paths'] = src_paths
return '\n'.join(output)
class CSSIncluder(Includer):
"""
Psuedo-template tag that handles collecting CSS and serving appropriate clean or compressed versions.
"""
def __init__(self, *args, **kwargs):
Includer.__init__(self, *args, **kwargs)
self.tag_string = '<link rel="stylesheet" type="text/css" href="%s" />'
def _compress(self):
output = []
src_paths = []
for src in self.includes:
css_path = '%s/%s' % (self.root_path, src)
src_paths.append(css_path)
try:
compressed_src = subprocess.check_output(["node_modules/less/bin/lessc", "-x", css_path])
output.append(compressed_src)
except:
print 'It looks like "lessc" isn\'t installed. Try running: "npm install"'
raise
context = make_context()
context['paths'] = src_paths
return '\n'.join(output)
def flatten_app_config():
"""
Returns a copy of app_config containing only
configuration variables.
"""
config = {}
# Only all-caps [constant] vars get included
for k, v in app_config.__dict__.items():
if k.upper() == k:
config[k] = v
return config
def make_context(asset_depth=0, root_path='www'):
"""
Create a base-context for rendering views.
Includes app_config and JS/CSS includers.
`asset_depth` indicates how far into the url hierarchy
the assets are hosted. If 0, then they are at the root.
If 1 then at /foo/, etc.
"""
context = flatten_app_config()
context['JS'] = JavascriptIncluder(
asset_depth=asset_depth,
root_path=root_path
)
context['CSS'] = CSSIncluder(
asset_depth=asset_depth,
root_path=root_path
)
return context
def urlencode_filter(s):
"""
Filter to urlencode strings.
"""
if type(s) == 'Markup':
s = s.unescape()
# Evaulate COPY elements
if type(s) is not unicode:
s = unicode(s)
s = s.encode('utf8')
s = urllib.quote_plus(s)
return Markup(s)
def smarty_filter(s):
"""
Filter to smartypants strings.
"""
if type(s) == 'Markup':
s = s.unescape()
# Evaulate COPY elements
if type(s) is not unicode:
s = unicode(s)
s = s.encode('utf-8')
s = smartypants(s)
try:
return Markup(s)
except:
print 'This string failed to encode: %s' % s
return Markup(s)
@contextfunction
def render_with_context(context, text):
"""
Render a template within a template!
"""
template = Template(text.__unicode__())
return template.render(**context)
| |
from lampost.db.dbo import DBOAspect
from lampost.db.dbofield import DBOField
from lampost.di.app import on_app_start
from lampost.di.config import on_config_change, config_value
from lampost.di.resource import Injected, module_inject
from lampost.gameops.action import action_handler, ActionError
from lampost.meta.auto import AutoField
from lampmud.lpmud import attributes
from lampmud.lpmud.combat.core import calc_consider
from lampmud.lpmud.combat.fight import Fight
from lampmud.model.entity import Entity
from lampmud.mud.tools import combat_log
log = Injected('log')
ev = Injected('dispatcher')
acs = Injected('action_system')
module_inject(__name__)
@on_app_start
@on_config_change
def _config():
global refresh_interval
global refresh_rates
refresh_interval = config_value('refresh_interval')
refresh_rates = config_value('refresh_rates')
class EntityLP(Entity):
health = 0
stamina = 0
mental = 0
action = 0
auto_fight = True
weapon = None
last_opponent = None
_refreshing = AutoField(False)
_current_action = AutoField()
_next_command = AutoField()
_action_target = AutoField()
def _on_attach(self):
self.effects = set()
self.defenses = set()
self.equip_slots = {}
for article in self.inven:
if getattr(article, 'current_slot', None):
self._do_equip(article, article.current_slot)
self.fight.update_skills()
def _on_detach(self):
self._cancel_actions()
del self.effects
del self.defenses
del self.equip_slots
def check_costs(self, costs):
for pool, cost in costs.items():
if getattr(self, pool, 0) < cost:
raise ActionError("Your condition prevents you from doing that.")
def apply_costs(self, costs):
self.check_costs(costs)
for pool, cost in costs.items():
setattr(self, pool, getattr(self, pool) - cost)
def filter_actions(self, matches):
if not self._current_action:
return matches
return [match for match in matches if not getattr(match.action, 'prep_time', None)]
@action_handler
def start_action(self, action, act_args):
self._current_action = action, act_args
if hasattr(action, 'prepare_action'):
try:
if self.dead:
raise ActionError("Ah, would that you could. Was it so long ago that you had such freedom of movement?")
action.prepare_action(**act_args)
except ActionError as act_err:
self._current_action = None
raise act_err
priority = -len(self.followers)
prep_time = getattr(action, 'prep_time', None)
self._action_target = act_args.get('target', None)
acs.add_action(self, self._current_action, prep_time, self.finish_action, priority)
self.check_follow(action, act_args)
def handle_parse_error(self, error, command):
if self._current_action:
if self._next_command:
self.display_line("You can only do so much at once!")
else:
self._next_command = command
else:
super().handle_parse_error(error, command)
@action_handler
def finish_action(self, system_action, affected):
if system_action != self._current_action:
if self._current_action:
log.warn("Action mismatch")
return
action, action_args = self._current_action
affected.add(self._action_target)
self._current_action = self._action_target = None
super().process_action(action, action_args)
if self._next_command:
self.parse(self._next_command)
self._next_command = None
def resolve_actions(self):
self.check_status()
self.check_fight()
def entity_leave_env(self, entity, exit_action):
super().entity_leave_env(entity, exit_action)
if self._current_action and self._action_target == entity:
self._cancel_actions()
def _refresh(self, *_):
for pool_id, base_pool_id in attributes.pool_keys:
new_value = getattr(self, pool_id) + refresh_rates[pool_id]
setattr(self, pool_id, min(new_value, getattr(self, base_pool_id)))
self._refreshing = False
@property
def weapon_type(self):
if self.weapon:
return self.weapon.weapon_type
def attacked(self, source, attack):
for defense in self.defenses:
defense.apply(self, attack)
if attack.adj_damage <= 0 or attack.adj_accuracy <= 0:
if defense.success_map:
self.broadcast(target=source, **defense.success_map)
else:
source.broadcast(verb=attack.verb, target=self, **attack.fail_map)
return
source.broadcast(verb=attack.verb, target=self, **attack.success_map)
current_pool = getattr(self, attack.damage_pool)
setattr(self, attack.damage_pool, current_pool - attack.adj_damage)
combat_log(source,
lambda: ''.join(['{N} result -- ', attack.damage_pool, ' old: ',
str(current_pool), ' new: ', str(current_pool - attack.adj_damage)]),
self)
def start_combat(self, source):
self.last_opponent = source
self.fight.add(source)
def check_fight(self):
if self.auto_fight and not self._current_action and not self._next_command:
self.fight.select_action()
def end_combat(self, source, victory):
self.fight.end(source, victory)
if self.last_opponent == source:
del self.last_opponent
self.status_change()
def equip_article(self, article):
if article.art_type == 'weapon' and self.weapon:
self.remove_article(self.weapon)
equip_slot = self._find_slot(article.equip_slot)
if self._slot_filled(equip_slot):
self._remove_by_slot(equip_slot)
if self._slot_filled(equip_slot):
raise ActionError('You have no place to put that.')
self._do_equip(article, equip_slot)
def remove_article(self, article):
if article.equip_slot == 'two_hand':
del self.equip_slots['r_hand']
del self.equip_slots['l_hand']
else:
del self.equip_slots[article.current_slot]
article.current_slot = None
if article.art_type == 'weapon':
self.weapon = None
article.on_removed(self)
def considered(self, **_):
return calc_consider(self)
def check_drop(self, article, quantity=None):
if getattr(article, 'current_slot', None):
raise ActionError("You must unequip the item before dropping it.")
def _do_equip(self, article, equip_slot):
self.equip_slots[equip_slot] = article
article.current_slot = equip_slot
article.on_equipped(self)
if article.art_type == 'weapon':
self.weapon = article
def _find_slot(self, equip_slot):
if equip_slot == 'finger':
if self._slot_filled('r_finger'):
return 'r_finger'
return 'l_finger'
elif equip_slot == 'wrist':
if self._slot_filled('r_wrist'):
return 'r_wrist'
return 'l_wrist'
elif equip_slot == 'one-hand':
if self._find_slot('r_hand'):
return 'r_hand'
return 'l_hand'
return equip_slot
def _slot_filled(self, equip_slot):
if equip_slot == 'two-hand':
if self.equip_slots.get('r_hand') or self.equip_slots.get('l_hand'):
return None
return self.equip_slots.get(equip_slot)
def _remove_by_slot(self, equip_slot):
if equip_slot == 'two_hand':
self._remove_by_slot('r_hand')
self._remove_by_slot('l_hand')
return
article = self.equip_slots.get(equip_slot)
if article:
self.remove_article(article)
def check_status(self):
if self.status == 'dead':
pass
elif self.health <= 0:
self._cancel_actions()
self.fight.end_all()
self.die()
elif not self._refreshing and attributes.need_refresh(self):
acs.add_action(self, None, refresh_interval, self._refresh, -1000)
self._refreshing = True
self.status_change()
def _cancel_actions(self):
if self._current_action:
del self._current_action
try:
del self._action_target
except AttributeError:
pass
if self._next_command:
del self._next_command
def die(self):
self._death_effects()
super().die()
def _death_effects(self):
self.status = 'dead'
self.action = 0
self.health = 0
self.stamina = 0
self.mental = 0
def status_change(self):
self.pulse_stamp = ev.current_pulse
@property
def display_status(self):
display_status = super().display_status
for pool_id, base_pool_id in attributes.pool_keys:
display_status[pool_id] = getattr(self, pool_id)
display_status[base_pool_id] = getattr(self, base_pool_id)
return display_status
def combat_status(self):
return ''.join(['{N} STATUS--', ''.join(["{0}: {1} ".format(pool_id, getattr(self, pool_id))
for pool_id, _ in attributes.pool_keys])])
class Skilled(DBOAspect):
skills = DBOField({}, 'untyped')
def _on_attach(self):
self.fight = Fight(self)
def add_skill(self, skill):
if skill.template_key in self.skills:
raise ActionError("Skill already exists.")
self.skills[skill.template_key] = skill
self._apply_skill(skill)
def _apply_skill(self, skill):
if skill.auto_start:
skill.invoke(self)
else:
self.enhance_soul(skill)
try:
self.fight.update_skills()
except AttributeError:
pass
def remove_skill(self, skill_id):
try:
skill = self.skills.pop(skill_id)
if skill.auto_start:
skill.revoke(self)
else:
self.diminish_soul(skill)
self.fight.update_skills()
except KeyError:
raise ActionError('{} does not have that skill'.format(self.name))
| |
# mssql/pyodbc.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql+pyodbc
:name: PyODBC
:dbapi: pyodbc
:connectstring: mssql+pyodbc://<username>:<password>@<dsnname>
:url: http://pypi.python.org/pypi/pyodbc/
Connecting to PyODBC
--------------------
The URL here is to be translated to PyODBC connection strings, as
detailed in `ConnectionStrings <https://code.google.com/p/pyodbc/wiki/ConnectionStrings>`_.
DSN Connections
^^^^^^^^^^^^^^^
A DSN-based connection is **preferred** overall when using ODBC. A
basic DSN-based connection looks like::
engine = create_engine("mssql+pyodbc://scott:tiger@some_dsn")
Which above, will pass the following connection string to PyODBC::
dsn=mydsn;UID=user;PWD=pass
If the username and password are omitted, the DSN form will also add
the ``Trusted_Connection=yes`` directive to the ODBC string.
Hostname Connections
^^^^^^^^^^^^^^^^^^^^
Hostname-based connections are **not preferred**, however are supported.
The ODBC driver name must be explicitly specified::
engine = create_engine("mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=SQL+Server+Native+Client+10.0")
.. versionchanged:: 1.0.0 Hostname-based PyODBC connections now require the
SQL Server driver name specified explicitly. SQLAlchemy cannot
choose an optimal default here as it varies based on platform
and installed drivers.
Other keywords interpreted by the Pyodbc dialect to be passed to
``pyodbc.connect()`` in both the DSN and hostname cases include:
``odbc_autotranslate``, ``ansi``, ``unicode_results``, ``autocommit``.
Pass through exact Pyodbc string
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A PyODBC connection string can also be sent exactly as specified in
`ConnectionStrings <https://code.google.com/p/pyodbc/wiki/ConnectionStrings>`_
into the driver using the parameter ``odbc_connect``. The delimeters must be URL escaped, however,
as illustrated below using ``urllib.quote_plus``::
import urllib
params = urllib.quote_plus("DRIVER={SQL Server Native Client 10.0};SERVER=dagger;DATABASE=test;UID=user;PWD=password")
engine = create_engine("mssql+pyodbc:///?odbc_connect=%s" % params)
Unicode Binds
-------------
The current state of PyODBC on a unix backend with FreeTDS and/or
EasySoft is poor regarding unicode; different OS platforms and versions of
UnixODBC versus IODBC versus FreeTDS/EasySoft versus PyODBC itself
dramatically alter how strings are received. The PyODBC dialect attempts to
use all the information it knows to determine whether or not a Python unicode
literal can be passed directly to the PyODBC driver or not; while SQLAlchemy
can encode these to bytestrings first, some users have reported that PyODBC
mis-handles bytestrings for certain encodings and requires a Python unicode
object, while the author has observed widespread cases where a Python unicode
is completely misinterpreted by PyODBC, particularly when dealing with
the information schema tables used in table reflection, and the value
must first be encoded to a bytestring.
It is for this reason that whether or not unicode literals for bound
parameters be sent to PyODBC can be controlled using the
``supports_unicode_binds`` parameter to ``create_engine()``. When
left at its default of ``None``, the PyODBC dialect will use its
best guess as to whether or not the driver deals with unicode literals
well. When ``False``, unicode literals will be encoded first, and when
``True`` unicode literals will be passed straight through. This is an interim
flag that hopefully should not be needed when the unicode situation stabilizes
for unix + PyODBC.
.. versionadded:: 0.7.7
``supports_unicode_binds`` parameter to ``create_engine()``\ .
Rowcount Support
----------------
Pyodbc only has partial support for rowcount. See the notes at
:ref:`mssql_rowcount_versioning` for important notes when using ORM
versioning.
"""
from .base import MSExecutionContext, MSDialect, VARBINARY
from ...connectors.pyodbc import PyODBCConnector
from ... import types as sqltypes, util, exc
import decimal
import re
class _ms_numeric_pyodbc(object):
"""Turns Decimals with adjusted() < 0 or > 7 into strings.
The routines here are needed for older pyodbc versions
as well as current mxODBC versions.
"""
def bind_processor(self, dialect):
super_process = super(_ms_numeric_pyodbc, self).\
bind_processor(dialect)
if not dialect._need_decimal_fix:
return super_process
def process(value):
if self.asdecimal and \
isinstance(value, decimal.Decimal):
adjusted = value.adjusted()
if adjusted < 0:
return self._small_dec_to_string(value)
elif adjusted > 7:
return self._large_dec_to_string(value)
if super_process:
return super_process(value)
else:
return value
return process
# these routines needed for older versions of pyodbc.
# as of 2.1.8 this logic is integrated.
def _small_dec_to_string(self, value):
return "%s0.%s%s" % (
(value < 0 and '-' or ''),
'0' * (abs(value.adjusted()) - 1),
"".join([str(nint) for nint in value.as_tuple()[1]]))
def _large_dec_to_string(self, value):
_int = value.as_tuple()[1]
if 'E' in str(value):
result = "%s%s%s" % (
(value < 0 and '-' or ''),
"".join([str(s) for s in _int]),
"0" * (value.adjusted() - (len(_int) - 1)))
else:
if (len(_int) - 1) > value.adjusted():
result = "%s%s.%s" % (
(value < 0 and '-' or ''),
"".join(
[str(s) for s in _int][0:value.adjusted() + 1]),
"".join(
[str(s) for s in _int][value.adjusted() + 1:]))
else:
result = "%s%s" % (
(value < 0 and '-' or ''),
"".join(
[str(s) for s in _int][0:value.adjusted() + 1]))
return result
class _MSNumeric_pyodbc(_ms_numeric_pyodbc, sqltypes.Numeric):
pass
class _MSFloat_pyodbc(_ms_numeric_pyodbc, sqltypes.Float):
pass
class _VARBINARY_pyodbc(VARBINARY):
def bind_processor(self, dialect):
if dialect.dbapi is None:
return None
DBAPIBinary = dialect.dbapi.Binary
def process(value):
if value is not None:
return DBAPIBinary(value)
else:
# pyodbc-specific
return dialect.dbapi.BinaryNull
return process
class MSExecutionContext_pyodbc(MSExecutionContext):
_embedded_scope_identity = False
def pre_exec(self):
"""where appropriate, issue "select scope_identity()" in the same
statement.
Background on why "scope_identity()" is preferable to "@@identity":
http://msdn.microsoft.com/en-us/library/ms190315.aspx
Background on why we attempt to embed "scope_identity()" into the same
statement as the INSERT:
http://code.google.com/p/pyodbc/wiki/FAQs#How_do_I_retrieve_autogenerated/identity_values?
"""
super(MSExecutionContext_pyodbc, self).pre_exec()
# don't embed the scope_identity select into an
# "INSERT .. DEFAULT VALUES"
if self._select_lastrowid and \
self.dialect.use_scope_identity and \
len(self.parameters[0]):
self._embedded_scope_identity = True
self.statement += "; select scope_identity()"
def post_exec(self):
if self._embedded_scope_identity:
# Fetch the last inserted id from the manipulated statement
# We may have to skip over a number of result sets with
# no data (due to triggers, etc.)
while True:
try:
# fetchall() ensures the cursor is consumed
# without closing it (FreeTDS particularly)
row = self.cursor.fetchall()[0]
break
except self.dialect.dbapi.Error as e:
# no way around this - nextset() consumes the previous set
# so we need to just keep flipping
self.cursor.nextset()
self._lastrowid = int(row[0])
else:
super(MSExecutionContext_pyodbc, self).post_exec()
class MSDialect_pyodbc(PyODBCConnector, MSDialect):
execution_ctx_cls = MSExecutionContext_pyodbc
colspecs = util.update_copy(
MSDialect.colspecs,
{
sqltypes.Numeric: _MSNumeric_pyodbc,
sqltypes.Float: _MSFloat_pyodbc,
VARBINARY: _VARBINARY_pyodbc,
sqltypes.LargeBinary: _VARBINARY_pyodbc,
}
)
def __init__(self, description_encoding=None, **params):
if 'description_encoding' in params:
self.description_encoding = params.pop('description_encoding')
super(MSDialect_pyodbc, self).__init__(**params)
self.use_scope_identity = self.use_scope_identity and \
self.dbapi and \
hasattr(self.dbapi.Cursor, 'nextset')
self._need_decimal_fix = self.dbapi and \
self._dbapi_version() < (2, 1, 8)
def _get_server_version_info(self, connection):
try:
raw = connection.scalar("SELECT SERVERPROPERTY('ProductVersion')")
except exc.DBAPIError:
# SQL Server docs indicate this function isn't present prior to
# 2008; additionally, unknown combinations of pyodbc aren't
# able to run this query.
return super(MSDialect_pyodbc, self).\
_get_server_version_info(connection)
else:
version = []
r = re.compile('[.\-]')
for n in r.split(raw):
try:
version.append(int(n))
except ValueError:
version.append(n)
return tuple(version)
dialect = MSDialect_pyodbc
| |
# pylint: disable=all
import logging
logging.basicConfig(level=logging.INFO)
import unittest
import sys
import webdriverwrapper.exceptions as exceptions
from webdriverwrapper.wrapper import Firefox, Chrome, ChromeOptions
__all__ = (
'WebdriverTestCase',
'ONE_INSTANCE_FOR_ALL_TESTS',
'ONE_INSTANCE_PER_TESTCASE',
'ONE_INSTANCE_PER_TEST',
)
ONE_INSTANCE_FOR_ALL_TESTS = 0
ONE_INSTANCE_PER_TESTCASE = 1
ONE_INSTANCE_PER_TEST = 2
class WebdriverTestCase(unittest.TestCase):
"""
Base ``TestCase`` used for testing with :py:mod:`unittest`.
Example:
.. code-block:: python
class TestCase(webdriverwrapper.unittest.WebdriverTestCase):
domain = 'www.google.com'
instances_of_driver = webdriverwrapper.unittest.ONE_INSTANCE_PER_TESTCASE
screenshot_path = os.path.join('/', 'tmp', 'testreport')
def _get_driver(self):
return Chrome()
def test_doodle(self):
self.click('gbqfsb')
self.assertTrue(self.contains_text('Doodles'))
def test_search(self):
self.get_elm('gbqf').fill_out_and_submit({
'q': 'hello',
})
self.wait_for_element(id_='resultStats')
"""
domain = None
"""
If you want working relative :py:meth:`go_to <webdriverwrapper.wrapper._WebdriverWrapper.go_to>`
without having to call for first time
:py:meth:`get <selenium.webdriver.remote.webdriver.WebDriver.get>` (because
before that you can't use relative path), set this attribute.
"""
instances_of_driver = ONE_INSTANCE_FOR_ALL_TESTS
"""
Specify when you want to create *fresh* driver. By default there is one
driver for all tests (:py:attr:`.ONE_INSTANCE_FOR_ALL_TESTS`) and you have
to close it by yourself by calling :py:meth:`.quit_driver`.
If you need clear cookies, local storage and everything, then consider to use
new driver for every ``TestCase`` or even every test method.
"""
wait_after_test = False
"""
For debug only. When you set to ``True``, it will wait for pressing enter
after each test before moving to next test. Ideal when you need to check
out for example Chrome console.
But maybe better debuging is with :py:meth:`.break_point`.
"""
screenshot_path = ''
"""
When you set this path, it will make automatically screenshots of failed
tests and saved them to this path.
"""
def __init__(self, *args, **kwds):
super(WebdriverTestCase, self).__init__(*args, **kwds)
self.__class__._number_of_test = 0
self.__class__._count_of_tests = len([m for m in dir(self) if m.startswith('test')])
self.init()
def init(self):
pass
def run(self, result=None):
if result is None:
result = self.defaultTestResult()
result.startTest(self)
test_method = getattr(self, self._testMethodName)
self._test_method = test_method
try:
ok = False
self._set_up()
try:
self.setUp()
except KeyboardInterrupt:
raise
except:
result.addError(self, sys.exc_info())
# setUp can fail because of app in some state returns internal
#+ server error. It's good to know about it - it can say more
#+ than that some element couldn't be found.
try:
self.driver.check_expected_errors(test_method)
self.driver.check_expected_infos(test_method)
except:
result.addError(self, sys.exc_info())
return
try:
test_method()
ok = True
except self.failureException:
self.make_screenshot()
result.addFailure(self, sys.exc_info())
except KeyboardInterrupt:
raise
except:
self.make_screenshot()
result.addError(self, sys.exc_info())
try:
self.driver.check_expected_errors(test_method)
self.driver.check_expected_infos(test_method)
except:
ok = False
result.addError(self, sys.exc_info())
try:
self.tearDown()
except KeyboardInterrupt:
raise
except:
result.addError(self, sys.exc_info())
ok = False
if ok:
result.addSuccess(self)
finally:
if not ok:
self.make_screenshot()
result.stopTest(self)
# Is nice to see at break point if test passed or not.
# So this call have to be after stopTest which print result of test.
self._tear_down()
def make_screenshot(self, screenshot_name=None):
"""
Save screenshot to :py:attr:`.screenshot_path` with given name
``screenshot_name``. If name is not given, then the name is name of
current test (``self.id()``).
.. versionchanged:: 2.2
Use ``make_screenshot`` directly on ``driver`` instead. This method
is used for making screenshot of failed tests and therefor does
nothing if ``screenshot_path`` is not configured. It stays there
only for compatibility.
"""
if not screenshot_name:
# Without name (and possibly path) we cannot make screenshot. Don't
# know where to store it.
if not self.screenshot_path:
return
screenshot_name = self.id()
# Close unexpected alerts (it's blocking and then tests fails completely).
self.driver.close_alert(ignore_exception=True)
self.driver.get_screenshot_as_file('%s/%s.png' % (self.screenshot_path, screenshot_name))
def _set_up(self):
self.__class__._number_of_test += 1
if not hasattr(WebdriverTestCase, 'driver'):
WebdriverTestCase.driver = self._get_driver()
WebdriverTestCase.screenshot_path = self.screenshot_path
WebdriverTestCase._main_window = WebdriverTestCase.driver.current_window_handle
if self.domain:
WebdriverTestCase.driver.get(self.domain)
# Ensure that test starts in main window.
if self.driver.current_window_handle != self._main_window:
self.driver.switch_to_window(self._main_window)
def _get_driver(self):
"""
Create driver. By default it creates Firefox. Change it to your needs.
"""
return Firefox()
def _tear_down(self):
if self.wait_after_test:
self.break_point()
if self.instances_of_driver == ONE_INSTANCE_PER_TEST or (
self.instances_of_driver == ONE_INSTANCE_PER_TESTCASE and
self._number_of_test == self._count_of_tests
):
self.quit_driver()
@staticmethod
def quit_driver():
"""
When you set :py:attr:`.instances_of_driver` to
:py:attr:`.ONE_INSTANCE_FOR_ALL_TESTS` (which is default), then you
have to quit driver by yourself by this method.
"""
if hasattr(WebdriverTestCase, 'driver'):
WebdriverTestCase.driver.quit()
del WebdriverTestCase.driver
def debug(self, msg):
logging.info(msg)
### Aliases to driver.
def break_point(self):
"""
Alias for :py:meth:`~webdriverwrapper.wrapper._WebdriverWrapper.break_point`.
"""
self.driver.break_point()
def check_errors(self, expected_error_page=None, allowed_error_pages=[], expected_error_messages=[], allowed_error_messages=[]):
"""
Alias for :py:meth:`~webdriverwrapper.errors.WebdriverWrapperErrorMixin.check_errors`.
.. versionchanged:: 2.0
Only alias. Code moved to wrapper so it could be used also by pytest.
"""
self.driver.check_errors(expected_error_page, allowed_error_pages, expected_error_messages, allowed_error_messages)
def find_element_by_text(self, text):
"""
Alias for :py:meth:`~webdriverwrapper.wrapper._WebdriverBaseWrapper.find_element_by_text`.
.. versionadded:: 2.0
"""
return self.driver.find_element_by_text(text)
def find_elements_by_text(self, text):
"""
Alias for :py:meth:`~webdriverwrapper.wrapper._WebdriverBaseWrapper.find_elements_by_text`.
"""
return self.driver.find_elements_by_text(text)
def contains_text(self, text):
"""
Alias for :py:meth:`~webdriverwrapper.wrapper._WebdriverBaseWrapper.contains_text`.
"""
return self.driver.contains_text(text)
def get_elm(self, *args, **kwds):
"""
Alias for :py:meth:`~webdriverwrapper.wrapper._WebdriverBaseWrapper.get_elm`.
"""
return self.driver.get_elm(*args, **kwds)
def get_elms(self, *args, **kwds):
"""
Alias for :py:meth:`~webdriverwrapper.wrapper._WebdriverBaseWrapper.get_elms`.
"""
return self.driver.get_elms(*args, **kwds)
def click(self, *args, **kwds):
"""
Alias for :py:meth:`~webdriverwrapper.wrapper._WebdriverBaseWrapper.click`.
"""
self.driver.click(*args, **kwds)
def wait_for_element(self, *args, **kwds):
"""
Alias for :py:meth:`~webdriverwrapper.wrapper._WebdriverWrapper.wait_for_element`.
"""
return self.driver.wait_for_element(*args, **kwds)
def wait(self, timeout=10):
"""
Alias for :py:meth:`~webdriverwrapper.wrapper._WebdriverWrapper.wait`.
"""
return self.driver.wait(timeout)
def go_to(self, *args, **kwds):
"""
Alias for :py:meth:`~webdriverwrapper.wrapper._WebdriverWrapper.go_to`.
"""
self.driver.go_to(*args, **kwds)
def switch_to_window(self, window_name=None, title=None, url=None):
"""
Alias for :py:meth:`~webdriverwrapper.wrapper._WebdriverWrapper.switch_to_window`.
"""
self.driver.switch_to_window(window_name, title, url)
def close_window(self, window_name=None, title=None, url=None):
"""
Alias for :py:meth:`~webdriverwrapper.wrapper._WebdriverWrapper.close_window`.
"""
self.driver.close_window(window_name, title, url)
def close_other_windows(self):
"""
Alias for :py:meth:`~webdriverwrapper.wrapper._WebdriverWrapper.close_other_windows`.
"""
self.driver.close_other_windows()
| |
#!/usr/bin/python3
"""
@author: Gregory Kramida
@licence: Apache v2
Copyright 2016 Gregory Kramida
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import os.path as osp
import argparse as ap
from enum import Enum
from yaml import load, dump
from multistereo.stereo_matcher_app import StereoMatcherApp
import re
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
class Argument(object):
def __init__(self, default,
nargs='?',
arg_type=str,
action='store',
arg_help="Documentation N/A",
console_only=False,
required=False,
shorthand=None):
"""
@rtype: Argument
@type name: str
@param name: argument name -- to be used in both console and config file
@type default: object
@param default: the default value
@type nargs: int | str
@param nargs: number of arguments. See python documentation for ArgumentParser.add_argument.
@type arg_type: type | str
@param arg_type: type of value to expect during parsing
@type action: str | function
@param action: action to perform with the argument value during parsing
@type arg_help: str
@param arg_help: documentation for this argument
@type console_only: bool
@param console_only: whether the argument is for console use only or for both config file & console
@type required: bool
@param required: whether the argument is required
@type shorthand: str
@param shorthand: shorthand to use for argument in console
"""
self.default = default
self.required = required
self.console_only = console_only
self.nargs = nargs
self.type = arg_type
self.action = action
self.help = arg_help
if shorthand is None:
self.shorthand = None
else:
self.shorthand = "-" + shorthand
# TODO: investigate enum inheritance. There is too much duplicate code between this script file and others, like
# sync_based_on_audio.py and multistereo.py
class Setting(Enum):
# ================= SETTING FILE STORAGE ==========================================================================#
settings_file = Argument(None, '?', str, 'store',
"File (absolute or relative-to-execution path) where to save and/or " +
"load settings for the program in YAML format.",
console_only=True, required=False)
save_settings = Argument(False, '?', 'bool_flag', 'store_true',
"Save (or update) setting file.",
console_only=True, required=False)
# ================= WORK FOLDER, INPUT & OUTPUT FILES =============================================================#
folder = Argument("./", '?', str, 'store',
"Path to root folder to work in. If set to '!settings_file_location' and a " +
" settings file is provided, will be set to the location of the settings file.",
console_only=False, required=False)
images = Argument(["left.png", "right.png"], nargs=2,
arg_help="Paths from work folder to left & right stereo images.")
input_calibration = Argument(None,
arg_help="Path from work folder to left & right calibration files.")
output = Argument("disparity.png", arg_help="Name of the output disparity image.")
preview = Argument(False, arg_type='bool_flag', arg_help="Preview the generated disparity map before saving.")
@staticmethod
def generate_missing_shorthands():
for item in Setting:
if item.value.shorthand is None:
item.value.shorthand = "-" + "".join([item[1] for item in re.findall(r"(:?^|_)(\w)", item.name)])
@staticmethod
def generate_defaults_dict():
"""
@rtype: dict
@return: dictionary of Setting defaults
"""
dict = {}
for item in Setting:
dict[item.name] = item.value.default
return dict
@staticmethod
def generate_parser(defaults, console_only=False, description="Description N/A", parents=None):
"""
@rtype: argparse.ArgumentParser
@return: either a console-only or a config_file+console parser using the specified defaults and, optionally,
parents.
@type defaults: dict
@param defaults: dictionary of default settings and their values.
For a conf-file+console parser, these come from the config file. For a console-only parser, these are generated.
@type console_only: bool
@type description: str
@param description: description of the program that uses the parser, to be used in the help file
@type parents: list[argparse.ArgumentParser] | None
"""
if console_only:
parser = ap.ArgumentParser(description=description, formatter_class=ap.RawDescriptionHelpFormatter,
add_help=False)
else:
if parents is None:
raise ValueError("A conf-file+console parser requires at least a console-only parser as a parent.")
parser = ap.ArgumentParser(parents=parents)
for item in Setting:
if (item.value.console_only and console_only) or (not item.value.console_only and not console_only):
if item.value.type == 'bool_flag':
parser.add_argument(item.value.shorthand, '--' + item.name, action=item.value.action,
default=defaults[item.name], required=item.value.required,
help=item.value.help)
else:
parser.add_argument(item.value.shorthand, '--' + item.name, action=item.value.action,
type=item.value.type, nargs=item.value.nargs, required=item.value.required,
default=defaults[item.name], help=item.value.help)
if not console_only:
parser.set_defaults(**defaults)
return parser
def load_app_from_config(path):
"""
Generate app directly from config file, bypassing command line settings (useful for testing in ipython)
"""
Setting.generate_missing_shorthands()
defaults = Setting.generate_defaults_dict()
if osp.isfile(path):
file_stream = open(path, "r", encoding="utf-8")
config_defaults = load(file_stream, Loader=Loader)
file_stream.close()
for key, value in config_defaults.items():
defaults[key] = value
else:
raise ValueError("Settings file not found at: {0:s}".format(path))
args = ap.Namespace()
for key, value in defaults.items():
args.__dict__[key] = value
app = StereoMatcherApp(args)
return app
def main():
Setting.generate_missing_shorthands()
defaults = Setting.generate_defaults_dict()
conf_parser = \
Setting.generate_parser(defaults, console_only=True, description=
"Test stereo algorithms on two image files.")
# ============== STORAGE/RETRIEVAL OF CONSOLE SETTINGS ===========================================#
args, remaining_argv = conf_parser.parse_known_args()
defaults[Setting.save_settings.name] = args.save_settings
if args.settings_file:
defaults[Setting.settings_file.name] = args.settings_file
if osp.isfile(args.settings_file):
file_stream = open(args.settings_file, "r", encoding="utf-8")
config_defaults = load(file_stream, Loader=Loader)
file_stream.close()
if config_defaults:
for key, value in config_defaults.items():
defaults[key] = value
else:
raise ValueError("Settings file not found at: {0:s}".format(args.settings_file))
parser = Setting.generate_parser(defaults, parents=[conf_parser])
args = parser.parse_args(remaining_argv)
# process "special" setting values
if args.folder == "!settings_file_location":
if args.settings_file and osp.isfile(args.settings_file):
args.folder = osp.dirname(args.settings_file)
# save settings if prompted to do so
if args.save_settings and args.settings_file:
setting_dict = vars(args)
file_stream = open(args.settings_file, "w", encoding="utf-8")
file_name = setting_dict[Setting.save_settings.name]
del setting_dict[Setting.save_settings.name]
del setting_dict[Setting.settings_file.name]
dump(setting_dict, file_stream, Dumper=Dumper)
file_stream.close()
setting_dict[Setting.save_settings.name] = file_name
setting_dict[Setting.settings_file.name] = True
app = StereoMatcherApp(args)
app.disparity2()
if __name__ == "__main__":
sys.exit(main())
| |
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
import json
import logging
from functools import reduce
from django.contrib.admin.actions import delete_selected
from django.contrib.admin.views import main
from django.contrib.auth import get_permission_codename
from django.db.models import Q
from django.http import (
HttpResponse,
HttpResponseBadRequest,
HttpResponseForbidden,
HttpResponseNotFound,
HttpResponseServerError,
)
from django.templatetags.static import static
from django.utils.encoding import force_str
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import gettext, gettext_lazy as _
from mptt.exceptions import InvalidMove
from mptt.forms import MPTTAdminForm
from feincms import settings
from feincms.extensions import ExtensionModelAdmin
logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------
def django_boolean_icon(field_val, alt_text=None, title=None):
"""
Return HTML code for a nice representation of true/false.
"""
# Origin: contrib/admin/templatetags/admin_list.py
BOOLEAN_MAPPING = {True: "yes", False: "no", None: "unknown"}
alt_text = alt_text or BOOLEAN_MAPPING[field_val]
if title is not None:
title = 'title="%s" ' % title
else:
title = ""
icon_url = static("feincms/img/icon-%s.gif" % BOOLEAN_MAPPING[field_val])
return mark_safe(f'<img src="{icon_url}" alt="{alt_text}" {title}/>')
def _build_tree_structure(queryset):
"""
Build an in-memory representation of the item tree, trying to keep
database accesses down to a minimum. The returned dictionary looks like
this (as json dump):
{"6": [7, 8, 10]
"7": [12],
"8": [],
...
}
"""
all_nodes = {}
mptt_opts = queryset.model._mptt_meta
items = queryset.order_by(mptt_opts.tree_id_attr, mptt_opts.left_attr).values_list(
"pk", "%s_id" % mptt_opts.parent_attr
)
for p_id, parent_id in items:
all_nodes.setdefault(str(parent_id) if parent_id else 0, []).append(p_id)
return all_nodes
# ------------------------------------------------------------------------
def ajax_editable_boolean_cell(item, attr, text="", override=None):
"""
Generate a html snippet for showing a boolean value on the admin page.
Item is an object, attr is the attribute name we should display. Text
is an optional explanatory text to be included in the output.
This function will emit code to produce a checkbox input with its state
corresponding to the item.attr attribute if no override value is passed.
This input is wired to run a JS ajax updater to toggle the value.
If override is passed in, ignores the attr attribute and returns a
static image for the override boolean with no user interaction possible
(useful for "disabled and you can't change it" situations).
"""
if text:
text = " (%s)" % text
if override is not None:
a = [django_boolean_icon(override, text), text]
else:
value = getattr(item, attr)
a = [
'<input type="checkbox" data-inplace data-inplace-id="%s"'
' data-inplace-attribute="%s" %s>'
% (item.pk, attr, 'checked="checked"' if value else "")
]
a.insert(0, '<div id="wrap_%s_%d">' % (attr, item.pk))
a.append("</div>")
return mark_safe("".join(a))
# ------------------------------------------------------------------------
def ajax_editable_boolean(attr, short_description):
"""
Convenience function: Assign the return value of this method to a variable
of your ModelAdmin class and put the variable name into list_display.
Example::
class MyTreeEditor(TreeEditor):
list_display = ('__str__', 'active_toggle')
active_toggle = ajax_editable_boolean('active', _('is active'))
"""
def _fn(self, item):
return ajax_editable_boolean_cell(item, attr)
_fn.short_description = short_description
_fn.editable_boolean_field = attr
return _fn
# ------------------------------------------------------------------------
class ChangeList(main.ChangeList):
"""
Custom ``ChangeList`` class which ensures that the tree entries are always
ordered in depth-first order (order by ``tree_id``, ``lft``).
"""
def __init__(self, request, *args, **kwargs):
self.user = request.user
super().__init__(request, *args, **kwargs)
def get_queryset(self, *args, **kwargs):
mptt_opts = self.model._mptt_meta
qs = (
super()
.get_queryset(*args, **kwargs)
.order_by(mptt_opts.tree_id_attr, mptt_opts.left_attr)
)
# Force has_filters, so that the expand/collapse in sidebar is visible
self.has_filters = True
return qs
def get_results(self, request):
mptt_opts = self.model._mptt_meta
if settings.FEINCMS_TREE_EDITOR_INCLUDE_ANCESTORS:
clauses = [
Q(
**{
mptt_opts.tree_id_attr: tree_id,
mptt_opts.left_attr + "__lte": lft,
mptt_opts.right_attr + "__gte": rght,
}
)
for lft, rght, tree_id in self.queryset.values_list(
mptt_opts.left_attr, mptt_opts.right_attr, mptt_opts.tree_id_attr
)
]
# We could optimise a bit here by explicitely filtering out
# any clauses that are for parents of nodes included in the
# queryset anyway. (ie: drop all clauses that refer to a node
# that is a parent to another node)
if clauses:
# Note: Django ORM is smart enough to drop additional
# clauses if the initial query set is unfiltered. This
# is good.
self.queryset |= self.model._default_manager.filter(
reduce(lambda p, q: p | q, clauses)
)
super().get_results(request)
# Pre-process permissions because we still have the request here,
# which is not passed in later stages in the tree editor
for item in self.result_list:
item.feincms_changeable = self.model_admin.has_change_permission(
request, item
)
item.feincms_addable = (
item.feincms_changeable
and self.model_admin.has_add_permission(request, item)
)
# ------------------------------------------------------------------------
class TreeEditor(ExtensionModelAdmin):
"""
The ``TreeEditor`` modifies the standard Django administration change list
to a drag-drop enabled interface for django-mptt_-managed Django models.
.. _django-mptt: https://github.com/django-mptt/django-mptt/
"""
form = MPTTAdminForm
if settings.FEINCMS_TREE_EDITOR_INCLUDE_ANCESTORS:
# Make sure that no pagination is displayed. Slicing is disabled
# anyway, therefore this value does not have an influence on the
# queryset
list_per_page = 999999999
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.list_display = list(self.list_display)
if "indented_short_title" not in self.list_display:
if self.list_display[0] == "action_checkbox":
self.list_display[1] = "indented_short_title"
else:
self.list_display[0] = "indented_short_title"
self.list_display_links = ("indented_short_title",)
opts = self.model._meta
self.change_list_template = [
"admin/feincms/%s/%s/tree_editor.html"
% (opts.app_label, opts.object_name.lower()),
"admin/feincms/%s/tree_editor.html" % opts.app_label,
"admin/feincms/tree_editor.html",
]
self.object_change_permission = (
opts.app_label + "." + get_permission_codename("change", opts)
)
self.object_add_permission = (
opts.app_label + "." + get_permission_codename("add", opts)
)
self.object_delete_permission = (
opts.app_label + "." + get_permission_codename("delete", opts)
)
def changeable(self, item):
return getattr(item, "feincms_changeable", True)
def indented_short_title(self, item):
"""
Generate a short title for an object, indent it depending on
the object's depth in the hierarchy.
"""
mptt_opts = item._mptt_meta
r = ""
try:
url = item.get_absolute_url()
except (AttributeError,):
url = None
if url:
r = (
'<input type="hidden" class="medialibrary_file_path"'
' value="%s" id="_refkey_%d" />'
) % (url, item.pk)
changeable_class = ""
if not self.changeable(item):
changeable_class = " tree-item-not-editable"
tree_root_class = ""
if not item.parent_id:
tree_root_class = " tree-root"
r += (
'<span id="page_marker-%d" class="page_marker%s%s"'
' style="width: %dpx;"> </span> '
) % (
item.pk,
changeable_class,
tree_root_class,
14 + getattr(item, mptt_opts.level_attr) * 18,
)
# r += '<span tabindex="0">'
if hasattr(item, "short_title") and callable(item.short_title):
r += escape(item.short_title())
else:
r += escape("%s" % item)
# r += '</span>'
return mark_safe(r)
indented_short_title.short_description = _("title")
def _collect_editable_booleans(self):
"""
Collect all fields marked as editable booleans. We do not
want the user to be able to edit arbitrary fields by crafting
an AJAX request by hand.
"""
if hasattr(self, "_ajax_editable_booleans"):
return
self._ajax_editable_booleans = {}
for field in self.list_display:
# The ajax_editable_boolean return value has to be assigned
# to the ModelAdmin class
try:
item = getattr(self.__class__, field)
except (AttributeError, TypeError):
continue
attr = getattr(item, "editable_boolean_field", None)
if attr:
if hasattr(item, "editable_boolean_result"):
result_func = item.editable_boolean_result
else:
def _fn(attr):
return lambda self, instance: [
ajax_editable_boolean_cell(instance, attr)
]
result_func = _fn(attr)
self._ajax_editable_booleans[attr] = result_func
def _toggle_boolean(self, request):
"""
Handle an AJAX toggle_boolean request
"""
try:
item_id = int(request.POST.get("item_id", None))
attr = str(request.POST.get("attr", None))
except Exception:
return HttpResponseBadRequest("Malformed request")
if not request.user.is_staff:
logger.warning(
'Denied AJAX request by non-staff "%s" to toggle boolean'
" %s for object #%s",
request.user,
attr,
item_id,
)
return HttpResponseForbidden(
_("You do not have permission to modify this object")
)
self._collect_editable_booleans()
if attr not in self._ajax_editable_booleans:
return HttpResponseBadRequest("not a valid attribute %s" % attr)
try:
obj = self.model._default_manager.get(pk=item_id)
except self.model.DoesNotExist:
return HttpResponseNotFound("Object does not exist")
if not self.has_change_permission(request, obj=obj):
logger.warning(
'Denied AJAX request by "%s" to toggle boolean %s for' " object %s",
request.user,
attr,
item_id,
)
return HttpResponseForbidden(
_("You do not have permission to modify this object")
)
new_state = not getattr(obj, attr)
logger.info(
'Toggle %s on #%d %s to %s by "%s"',
attr,
obj.pk,
obj,
"on" if new_state else "off",
request.user,
)
try:
before_data = self._ajax_editable_booleans[attr](self, obj)
setattr(obj, attr, new_state)
obj.save()
# Construct html snippets to send back to client for status update
data = self._ajax_editable_booleans[attr](self, obj)
except Exception:
logger.exception("Unhandled exception while toggling %s on %s", attr, obj)
return HttpResponseServerError(f"Unable to toggle {attr} on {obj}")
# Weed out unchanged cells to keep the updates small. This assumes
# that the order a possible get_descendents() returns does not change
# before and after toggling this attribute. Unlikely, but still...
return HttpResponse(
json.dumps([b for a, b in zip(before_data, data) if a != b]),
content_type="application/json",
)
def get_changelist(self, request, **kwargs):
return ChangeList
def changelist_view(self, request, extra_context=None, *args, **kwargs):
"""
Handle the changelist view, the django view for the model instances
change list/actions page.
"""
if "actions_column" not in self.list_display:
self.list_display.append("actions_column")
# handle common AJAX requests
if "__cmd" in request.POST:
cmd = request.POST.get("__cmd")
if cmd == "toggle_boolean":
return self._toggle_boolean(request)
elif cmd == "move_node":
return self._move_node(request)
return HttpResponseBadRequest("Oops. AJAX request not understood.")
extra_context = extra_context or {}
extra_context["tree_structure"] = mark_safe(
json.dumps(_build_tree_structure(self.get_queryset(request)))
)
extra_context["node_levels"] = mark_safe(
json.dumps(
dict(
self.get_queryset(request)
.order_by()
.values_list("pk", self.model._mptt_meta.level_attr)
)
)
)
return super().changelist_view(request, extra_context, *args, **kwargs)
def has_add_permission(self, request, obj=None):
"""
Implement a lookup for object level permissions. Basically the same as
ModelAdmin.has_add_permission, but also passes the obj parameter in.
"""
perm = self.object_add_permission
if settings.FEINCMS_TREE_EDITOR_OBJECT_PERMISSIONS:
r = request.user.has_perm(perm, obj)
else:
r = request.user.has_perm(perm)
return r and super().has_add_permission(request)
def has_change_permission(self, request, obj=None):
"""
Implement a lookup for object level permissions. Basically the same as
ModelAdmin.has_change_permission, but also passes the obj parameter in.
"""
perm = self.object_change_permission
if settings.FEINCMS_TREE_EDITOR_OBJECT_PERMISSIONS:
r = request.user.has_perm(perm, obj)
else:
r = request.user.has_perm(perm)
return r and super().has_change_permission(request, obj)
def has_delete_permission(self, request, obj=None):
"""
Implement a lookup for object level permissions. Basically the same as
ModelAdmin.has_delete_permission, but also passes the obj parameter in.
"""
perm = self.object_delete_permission
if settings.FEINCMS_TREE_EDITOR_OBJECT_PERMISSIONS:
r = request.user.has_perm(perm, obj)
else:
r = request.user.has_perm(perm)
return r and super().has_delete_permission(request, obj)
def _move_node(self, request):
if hasattr(self.model.objects, "move_node"):
tree_manager = self.model.objects
else:
tree_manager = self.model._tree_manager
queryset = self.get_queryset(request)
cut_item = queryset.get(pk=request.POST.get("cut_item"))
pasted_on = queryset.get(pk=request.POST.get("pasted_on"))
position = request.POST.get("position")
if not self.has_change_permission(request, cut_item):
self.message_user(request, _("No permission"))
return HttpResponse("FAIL")
if position in ("last-child", "left", "right"):
try:
tree_manager.move_node(cut_item, pasted_on, position)
except InvalidMove as e:
self.message_user(request, "%s" % e)
return HttpResponse("FAIL")
# Ensure that model save methods have been run (required to
# update Page._cached_url values, might also be helpful for other
# models inheriting MPTTModel)
for item in queryset.filter(id__in=(cut_item.pk, pasted_on.pk)):
item.save()
self.message_user(
request, gettext("%s has been moved to a new position.") % cut_item
)
return HttpResponse("OK")
self.message_user(request, _("Did not understand moving instruction."))
return HttpResponse("FAIL")
def _actions_column(self, instance):
if self.changeable(instance):
return ['<div class="drag_handle"></div>']
return []
def actions_column(self, instance):
return mark_safe(" ".join(self._actions_column(instance)))
actions_column.short_description = _("actions")
def delete_selected_tree(self, modeladmin, request, queryset):
"""
Deletes multiple instances and makes sure the MPTT fields get
recalculated properly. (Because merely doing a bulk delete doesn't
trigger the post_delete hooks.)
"""
# If this is True, the confirmation page has been displayed
if request.POST.get("post"):
n = 0
# TODO: The disable_mptt_updates / rebuild is a work around
# for what seems to be a mptt problem when deleting items
# in a loop. Revisit this, there should be a better solution.
with queryset.model.objects.disable_mptt_updates():
for obj in queryset:
if self.has_delete_permission(request, obj):
obj.delete()
n += 1
obj_display = force_str(obj)
self.log_deletion(request, obj, obj_display)
else:
logger.warning(
'Denied delete request by "%s" for object #%s',
request.user,
obj.id,
)
if n > 0:
queryset.model.objects.rebuild()
self.message_user(
request, _("Successfully deleted %(count)d items.") % {"count": n}
)
# Return None to display the change list page again
return None
else:
# (ab)using the built-in action to display the confirmation page
return delete_selected(self, request, queryset)
def get_actions(self, request):
actions = super().get_actions(request)
if "delete_selected" in actions:
actions["delete_selected"] = (
self.delete_selected_tree,
"delete_selected",
_("Delete selected %(verbose_name_plural)s"),
)
return actions
| |
"""
Common tests shared by test_str, test_unicode, test_userstring and test_string.
"""
import unittest, string, sys
from test import test_support
from UserList import UserList
class Sequence:
def __init__(self, seq='wxyz'): self.seq = seq
def __len__(self): return len(self.seq)
def __getitem__(self, i): return self.seq[i]
class BadSeq1(Sequence):
def __init__(self): self.seq = [7, 'hello', 123L]
class BadSeq2(Sequence):
def __init__(self): self.seq = ['a', 'b', 'c']
def __len__(self): return 8
class CommonTest(unittest.TestCase):
# This testcase contains test that can be used in all
# stringlike classes. Currently this is str, unicode
# UserString and the string module.
# The type to be tested
# Change in subclasses to change the behaviour of fixtesttype()
type2test = None
# All tests pass their arguments to the testing methods
# as str objects. fixtesttype() can be used to propagate
# these arguments to the appropriate type
def fixtype(self, obj):
if isinstance(obj, str):
return self.__class__.type2test(obj)
elif isinstance(obj, list):
return [self.fixtype(x) for x in obj]
elif isinstance(obj, tuple):
return tuple([self.fixtype(x) for x in obj])
elif isinstance(obj, dict):
return dict([
(self.fixtype(key), self.fixtype(value))
for (key, value) in obj.iteritems()
])
else:
return obj
# check that object.method(*args) returns result
def checkequal(self, result, object, methodname, *args):
result = self.fixtype(result)
object = self.fixtype(object)
args = self.fixtype(args)
realresult = getattr(object, methodname)(*args)
self.assertEqual(
result,
realresult
)
# if the original is returned make sure that
# this doesn't happen with subclasses
if object == realresult:
class subtype(self.__class__.type2test):
pass
object = subtype(object)
realresult = getattr(object, methodname)(*args)
self.assert_(object is not realresult)
# check that object.method(*args) raises exc
def checkraises(self, exc, object, methodname, *args):
object = self.fixtype(object)
args = self.fixtype(args)
self.assertRaises(
exc,
getattr(object, methodname),
*args
)
# call object.method(*args) without any checks
def checkcall(self, object, methodname, *args):
object = self.fixtype(object)
args = self.fixtype(args)
getattr(object, methodname)(*args)
def test_hash(self):
# SF bug 1054139: += optimization was not invalidating cached hash value
a = self.type2test('DNSSEC')
b = self.type2test('')
for c in a:
b += c
hash(b)
self.assertEqual(hash(a), hash(b))
def test_capitalize(self):
self.checkequal(' hello ', ' hello ', 'capitalize')
self.checkequal('Hello ', 'Hello ','capitalize')
self.checkequal('Hello ', 'hello ','capitalize')
self.checkequal('Aaaa', 'aaaa', 'capitalize')
self.checkequal('Aaaa', 'AaAa', 'capitalize')
self.checkraises(TypeError, 'hello', 'capitalize', 42)
def test_count(self):
self.checkequal(3, 'aaa', 'count', 'a')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(3, 'aaa', 'count', 'a')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(3, 'aaa', 'count', 'a')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(1, 'aaa', 'count', 'a', -1)
self.checkequal(3, 'aaa', 'count', 'a', -10)
self.checkequal(2, 'aaa', 'count', 'a', 0, -1)
self.checkequal(0, 'aaa', 'count', 'a', 0, -10)
self.checkraises(TypeError, 'hello', 'count')
self.checkraises(TypeError, 'hello', 'count', 42)
def test_find(self):
self.checkequal(0, 'abcdefghiabc', 'find', 'abc')
self.checkequal(9, 'abcdefghiabc', 'find', 'abc', 1)
self.checkequal(-1, 'abcdefghiabc', 'find', 'def', 4)
self.checkraises(TypeError, 'hello', 'find')
self.checkraises(TypeError, 'hello', 'find', 42)
def test_rfind(self):
self.checkequal(9, 'abcdefghiabc', 'rfind', 'abc')
self.checkequal(12, 'abcdefghiabc', 'rfind', '')
self.checkequal(0, 'abcdefghiabc', 'rfind', 'abcd')
self.checkequal(-1, 'abcdefghiabc', 'rfind', 'abcz')
self.checkraises(TypeError, 'hello', 'rfind')
self.checkraises(TypeError, 'hello', 'rfind', 42)
def test_index(self):
self.checkequal(0, 'abcdefghiabc', 'index', '')
self.checkequal(3, 'abcdefghiabc', 'index', 'def')
self.checkequal(0, 'abcdefghiabc', 'index', 'abc')
self.checkequal(9, 'abcdefghiabc', 'index', 'abc', 1)
self.checkraises(ValueError, 'abcdefghiabc', 'index', 'hib')
self.checkraises(ValueError, 'abcdefghiab', 'index', 'abc', 1)
self.checkraises(ValueError, 'abcdefghi', 'index', 'ghi', 8)
self.checkraises(ValueError, 'abcdefghi', 'index', 'ghi', -1)
self.checkraises(TypeError, 'hello', 'index')
self.checkraises(TypeError, 'hello', 'index', 42)
def test_rindex(self):
self.checkequal(12, 'abcdefghiabc', 'rindex', '')
self.checkequal(3, 'abcdefghiabc', 'rindex', 'def')
self.checkequal(9, 'abcdefghiabc', 'rindex', 'abc')
self.checkequal(0, 'abcdefghiabc', 'rindex', 'abc', 0, -1)
self.checkraises(ValueError, 'abcdefghiabc', 'rindex', 'hib')
self.checkraises(ValueError, 'defghiabc', 'rindex', 'def', 1)
self.checkraises(ValueError, 'defghiabc', 'rindex', 'abc', 0, -1)
self.checkraises(ValueError, 'abcdefghi', 'rindex', 'ghi', 0, 8)
self.checkraises(ValueError, 'abcdefghi', 'rindex', 'ghi', 0, -1)
self.checkraises(TypeError, 'hello', 'rindex')
self.checkraises(TypeError, 'hello', 'rindex', 42)
def test_lower(self):
self.checkequal('hello', 'HeLLo', 'lower')
self.checkequal('hello', 'hello', 'lower')
self.checkraises(TypeError, 'hello', 'lower', 42)
def test_upper(self):
self.checkequal('HELLO', 'HeLLo', 'upper')
self.checkequal('HELLO', 'HELLO', 'upper')
self.checkraises(TypeError, 'hello', 'upper', 42)
def test_expandtabs(self):
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs')
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs', 8)
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs', 4)
self.checkequal('abc\r\nab def\ng hi', 'abc\r\nab\tdef\ng\thi', 'expandtabs', 4)
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs')
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs', 8)
self.checkequal('abc\r\nab\r\ndef\ng\r\nhi', 'abc\r\nab\r\ndef\ng\r\nhi', 'expandtabs', 4)
self.checkraises(TypeError, 'hello', 'expandtabs', 42, 42)
def test_split(self):
self.checkequal(['this', 'is', 'the', 'split', 'function'],
'this is the split function', 'split')
# by whitespace
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d ', 'split')
self.checkequal(['a', 'b c d'], 'a b c d', 'split', None, 1)
self.checkequal(['a', 'b', 'c d'], 'a b c d', 'split', None, 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'split', None, 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'split', None, 4)
self.checkequal(['a b c d'], 'a b c d', 'split', None, 0)
self.checkequal(['a', 'b', 'c d'], 'a b c d', 'split', None, 2)
# by a char
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'split', '|')
self.checkequal(['a', 'b|c|d'], 'a|b|c|d', 'split', '|', 1)
self.checkequal(['a', 'b', 'c|d'], 'a|b|c|d', 'split', '|', 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'split', '|', 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'split', '|', 4)
self.checkequal(['a|b|c|d'], 'a|b|c|d', 'split', '|', 0)
self.checkequal(['a', '', 'b||c||d'], 'a||b||c||d', 'split', '|', 2)
self.checkequal(['endcase ', ''], 'endcase |', 'split', '|')
self.checkequal(['a', '', 'b\x00c\x00d'], 'a\x00\x00b\x00c\x00d', 'split', '\x00', 2)
# by string
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//')
self.checkequal(['a', 'b//c//d'], 'a//b//c//d', 'split', '//', 1)
self.checkequal(['a', 'b', 'c//d'], 'a//b//c//d', 'split', '//', 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//', 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//', 4)
self.checkequal(['a//b//c//d'], 'a//b//c//d', 'split', '//', 0)
self.checkequal(['a', '', 'b////c////d'], 'a////b////c////d', 'split', '//', 2)
self.checkequal(['endcase ', ''], 'endcase test', 'split', 'test')
# mixed use of str and unicode
self.checkequal([u'a', u'b', u'c d'], 'a b c d', 'split', u' ', 2)
# argument type
self.checkraises(TypeError, 'hello', 'split', 42, 42, 42)
def test_rsplit(self):
self.checkequal(['this', 'is', 'the', 'rsplit', 'function'],
'this is the rsplit function', 'rsplit')
# by whitespace
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d ', 'rsplit')
self.checkequal(['a b c', 'd'], 'a b c d', 'rsplit', None, 1)
self.checkequal(['a b', 'c', 'd'], 'a b c d', 'rsplit', None, 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'rsplit', None, 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'rsplit', None, 4)
self.checkequal(['a b c d'], 'a b c d', 'rsplit', None, 0)
self.checkequal(['a b', 'c', 'd'], 'a b c d', 'rsplit', None, 2)
# by a char
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|')
self.checkequal(['a|b|c', 'd'], 'a|b|c|d', 'rsplit', '|', 1)
self.checkequal(['a|b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|', 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|', 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|', 4)
self.checkequal(['a|b|c|d'], 'a|b|c|d', 'rsplit', '|', 0)
self.checkequal(['a||b||c', '', 'd'], 'a||b||c||d', 'rsplit', '|', 2)
self.checkequal(['', ' begincase'], '| begincase', 'rsplit', '|')
self.checkequal(['a\x00\x00b', 'c', 'd'], 'a\x00\x00b\x00c\x00d', 'rsplit', '\x00', 2)
# by string
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//')
self.checkequal(['a//b//c', 'd'], 'a//b//c//d', 'rsplit', '//', 1)
self.checkequal(['a//b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//', 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//', 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//', 4)
self.checkequal(['a//b//c//d'], 'a//b//c//d', 'rsplit', '//', 0)
self.checkequal(['a////b////c', '', 'd'], 'a////b////c////d', 'rsplit', '//', 2)
self.checkequal(['', ' begincase'], 'test begincase', 'rsplit', 'test')
# mixed use of str and unicode
self.checkequal([u'a b', u'c', u'd'], 'a b c d', 'rsplit', u' ', 2)
# argument type
self.checkraises(TypeError, 'hello', 'rsplit', 42, 42, 42)
def test_strip(self):
self.checkequal('hello', ' hello ', 'strip')
self.checkequal('hello ', ' hello ', 'lstrip')
self.checkequal(' hello', ' hello ', 'rstrip')
self.checkequal('hello', 'hello', 'strip')
# strip/lstrip/rstrip with None arg
self.checkequal('hello', ' hello ', 'strip', None)
self.checkequal('hello ', ' hello ', 'lstrip', None)
self.checkequal(' hello', ' hello ', 'rstrip', None)
self.checkequal('hello', 'hello', 'strip', None)
# strip/lstrip/rstrip with str arg
self.checkequal('hello', 'xyzzyhelloxyzzy', 'strip', 'xyz')
self.checkequal('helloxyzzy', 'xyzzyhelloxyzzy', 'lstrip', 'xyz')
self.checkequal('xyzzyhello', 'xyzzyhelloxyzzy', 'rstrip', 'xyz')
self.checkequal('hello', 'hello', 'strip', 'xyz')
# strip/lstrip/rstrip with unicode arg
if test_support.have_unicode:
self.checkequal(unicode('hello', 'ascii'), 'xyzzyhelloxyzzy',
'strip', unicode('xyz', 'ascii'))
self.checkequal(unicode('helloxyzzy', 'ascii'), 'xyzzyhelloxyzzy',
'lstrip', unicode('xyz', 'ascii'))
self.checkequal(unicode('xyzzyhello', 'ascii'), 'xyzzyhelloxyzzy',
'rstrip', unicode('xyz', 'ascii'))
self.checkequal(unicode('hello', 'ascii'), 'hello',
'strip', unicode('xyz', 'ascii'))
self.checkraises(TypeError, 'hello', 'strip', 42, 42)
self.checkraises(TypeError, 'hello', 'lstrip', 42, 42)
self.checkraises(TypeError, 'hello', 'rstrip', 42, 42)
def test_ljust(self):
self.checkequal('abc ', 'abc', 'ljust', 10)
self.checkequal('abc ', 'abc', 'ljust', 6)
self.checkequal('abc', 'abc', 'ljust', 3)
self.checkequal('abc', 'abc', 'ljust', 2)
self.checkequal('abc*******', 'abc', 'ljust', 10, '*')
self.checkraises(TypeError, 'abc', 'ljust')
def test_rjust(self):
self.checkequal(' abc', 'abc', 'rjust', 10)
self.checkequal(' abc', 'abc', 'rjust', 6)
self.checkequal('abc', 'abc', 'rjust', 3)
self.checkequal('abc', 'abc', 'rjust', 2)
self.checkequal('*******abc', 'abc', 'rjust', 10, '*')
self.checkraises(TypeError, 'abc', 'rjust')
def test_center(self):
self.checkequal(' abc ', 'abc', 'center', 10)
self.checkequal(' abc ', 'abc', 'center', 6)
self.checkequal('abc', 'abc', 'center', 3)
self.checkequal('abc', 'abc', 'center', 2)
self.checkequal('***abc****', 'abc', 'center', 10, '*')
self.checkraises(TypeError, 'abc', 'center')
def test_swapcase(self):
self.checkequal('hEllO CoMPuTErS', 'HeLLo cOmpUteRs', 'swapcase')
self.checkraises(TypeError, 'hello', 'swapcase', 42)
def test_replace(self):
self.checkequal('one@two!three!', 'one!two!three!', 'replace', '!', '@', 1)
self.checkequal('onetwothree', 'one!two!three!', 'replace', '!', '')
self.checkequal('one@two@three!', 'one!two!three!', 'replace', '!', '@', 2)
self.checkequal('one@two@three@', 'one!two!three!', 'replace', '!', '@', 3)
self.checkequal('one@two@three@', 'one!two!three!', 'replace', '!', '@', 4)
self.checkequal('one!two!three!', 'one!two!three!', 'replace', '!', '@', 0)
self.checkequal('one@two@three@', 'one!two!three!', 'replace', '!', '@')
self.checkequal('one!two!three!', 'one!two!three!', 'replace', 'x', '@')
self.checkequal('one!two!three!', 'one!two!three!', 'replace', 'x', '@', 2)
self.checkequal('-a-b-c-', 'abc', 'replace', '', '-')
self.checkequal('-a-b-c', 'abc', 'replace', '', '-', 3)
self.checkequal('abc', 'abc', 'replace', '', '-', 0)
self.checkequal('', '', 'replace', '', '')
self.checkequal('abc', 'abc', 'replace', 'ab', '--', 0)
self.checkequal('abc', 'abc', 'replace', 'xy', '--')
# Next three for SF bug 422088: [OSF1 alpha] string.replace(); died with
# MemoryError due to empty result (platform malloc issue when requesting
# 0 bytes).
self.checkequal('', '123', 'replace', '123', '')
self.checkequal('', '123123', 'replace', '123', '')
self.checkequal('x', '123x123', 'replace', '123', '')
self.checkraises(TypeError, 'hello', 'replace')
self.checkraises(TypeError, 'hello', 'replace', 42)
self.checkraises(TypeError, 'hello', 'replace', 42, 'h')
self.checkraises(TypeError, 'hello', 'replace', 'h', 42)
def test_zfill(self):
self.checkequal('123', '123', 'zfill', 2)
self.checkequal('123', '123', 'zfill', 3)
self.checkequal('0123', '123', 'zfill', 4)
self.checkequal('+123', '+123', 'zfill', 3)
self.checkequal('+123', '+123', 'zfill', 4)
self.checkequal('+0123', '+123', 'zfill', 5)
self.checkequal('-123', '-123', 'zfill', 3)
self.checkequal('-123', '-123', 'zfill', 4)
self.checkequal('-0123', '-123', 'zfill', 5)
self.checkequal('000', '', 'zfill', 3)
self.checkequal('34', '34', 'zfill', 1)
self.checkequal('0034', '34', 'zfill', 4)
self.checkraises(TypeError, '123', 'zfill')
class MixinStrUnicodeUserStringTest:
# additional tests that only work for
# stringlike objects, i.e. str, unicode, UserString
# (but not the string module)
def test_islower(self):
self.checkequal(False, '', 'islower')
self.checkequal(True, 'a', 'islower')
self.checkequal(False, 'A', 'islower')
self.checkequal(False, '\n', 'islower')
self.checkequal(True, 'abc', 'islower')
self.checkequal(False, 'aBc', 'islower')
self.checkequal(True, 'abc\n', 'islower')
self.checkraises(TypeError, 'abc', 'islower', 42)
def test_isupper(self):
self.checkequal(False, '', 'isupper')
self.checkequal(False, 'a', 'isupper')
self.checkequal(True, 'A', 'isupper')
self.checkequal(False, '\n', 'isupper')
self.checkequal(True, 'ABC', 'isupper')
self.checkequal(False, 'AbC', 'isupper')
self.checkequal(True, 'ABC\n', 'isupper')
self.checkraises(TypeError, 'abc', 'isupper', 42)
def test_istitle(self):
self.checkequal(False, '', 'istitle')
self.checkequal(False, 'a', 'istitle')
self.checkequal(True, 'A', 'istitle')
self.checkequal(False, '\n', 'istitle')
self.checkequal(True, 'A Titlecased Line', 'istitle')
self.checkequal(True, 'A\nTitlecased Line', 'istitle')
self.checkequal(True, 'A Titlecased, Line', 'istitle')
self.checkequal(False, 'Not a capitalized String', 'istitle')
self.checkequal(False, 'Not\ta Titlecase String', 'istitle')
self.checkequal(False, 'Not--a Titlecase String', 'istitle')
self.checkequal(False, 'NOT', 'istitle')
self.checkraises(TypeError, 'abc', 'istitle', 42)
def test_isspace(self):
self.checkequal(False, '', 'isspace')
self.checkequal(False, 'a', 'isspace')
self.checkequal(True, ' ', 'isspace')
self.checkequal(True, '\t', 'isspace')
self.checkequal(True, '\r', 'isspace')
self.checkequal(True, '\n', 'isspace')
self.checkequal(True, ' \t\r\n', 'isspace')
self.checkequal(False, ' \t\r\na', 'isspace')
self.checkraises(TypeError, 'abc', 'isspace', 42)
def test_isalpha(self):
self.checkequal(False, '', 'isalpha')
self.checkequal(True, 'a', 'isalpha')
self.checkequal(True, 'A', 'isalpha')
self.checkequal(False, '\n', 'isalpha')
self.checkequal(True, 'abc', 'isalpha')
self.checkequal(False, 'aBc123', 'isalpha')
self.checkequal(False, 'abc\n', 'isalpha')
self.checkraises(TypeError, 'abc', 'isalpha', 42)
def test_isalnum(self):
self.checkequal(False, '', 'isalnum')
self.checkequal(True, 'a', 'isalnum')
self.checkequal(True, 'A', 'isalnum')
self.checkequal(False, '\n', 'isalnum')
self.checkequal(True, '123abc456', 'isalnum')
self.checkequal(True, 'a1b3c', 'isalnum')
self.checkequal(False, 'aBc000 ', 'isalnum')
self.checkequal(False, 'abc\n', 'isalnum')
self.checkraises(TypeError, 'abc', 'isalnum', 42)
def test_isdigit(self):
self.checkequal(False, '', 'isdigit')
self.checkequal(False, 'a', 'isdigit')
self.checkequal(True, '0', 'isdigit')
self.checkequal(True, '0123456789', 'isdigit')
self.checkequal(False, '0123456789a', 'isdigit')
self.checkraises(TypeError, 'abc', 'isdigit', 42)
def test_title(self):
self.checkequal(' Hello ', ' hello ', 'title')
self.checkequal('Hello ', 'hello ', 'title')
self.checkequal('Hello ', 'Hello ', 'title')
self.checkequal('Format This As Title String', "fOrMaT thIs aS titLe String", 'title')
self.checkequal('Format,This-As*Title;String', "fOrMaT,thIs-aS*titLe;String", 'title', )
self.checkequal('Getint', "getInt", 'title')
self.checkraises(TypeError, 'hello', 'title', 42)
def test_splitlines(self):
self.checkequal(['abc', 'def', '', 'ghi'], "abc\ndef\n\rghi", 'splitlines')
self.checkequal(['abc', 'def', '', 'ghi'], "abc\ndef\n\r\nghi", 'splitlines')
self.checkequal(['abc', 'def', 'ghi'], "abc\ndef\r\nghi", 'splitlines')
self.checkequal(['abc', 'def', 'ghi'], "abc\ndef\r\nghi\n", 'splitlines')
self.checkequal(['abc', 'def', 'ghi', ''], "abc\ndef\r\nghi\n\r", 'splitlines')
self.checkequal(['', 'abc', 'def', 'ghi', ''], "\nabc\ndef\r\nghi\n\r", 'splitlines')
self.checkequal(['\n', 'abc\n', 'def\r\n', 'ghi\n', '\r'], "\nabc\ndef\r\nghi\n\r", 'splitlines', 1)
self.checkraises(TypeError, 'abc', 'splitlines', 42, 42)
def test_startswith(self):
self.checkequal(True, 'hello', 'startswith', 'he')
self.checkequal(True, 'hello', 'startswith', 'hello')
self.checkequal(False, 'hello', 'startswith', 'hello world')
self.checkequal(True, 'hello', 'startswith', '')
self.checkequal(False, 'hello', 'startswith', 'ello')
self.checkequal(True, 'hello', 'startswith', 'ello', 1)
self.checkequal(True, 'hello', 'startswith', 'o', 4)
self.checkequal(False, 'hello', 'startswith', 'o', 5)
self.checkequal(True, 'hello', 'startswith', '', 5)
self.checkequal(False, 'hello', 'startswith', 'lo', 6)
self.checkequal(True, 'helloworld', 'startswith', 'lowo', 3)
self.checkequal(True, 'helloworld', 'startswith', 'lowo', 3, 7)
self.checkequal(False, 'helloworld', 'startswith', 'lowo', 3, 6)
# test negative indices
self.checkequal(True, 'hello', 'startswith', 'he', 0, -1)
self.checkequal(True, 'hello', 'startswith', 'he', -53, -1)
self.checkequal(False, 'hello', 'startswith', 'hello', 0, -1)
self.checkequal(False, 'hello', 'startswith', 'hello world', -1, -10)
self.checkequal(False, 'hello', 'startswith', 'ello', -5)
self.checkequal(True, 'hello', 'startswith', 'ello', -4)
self.checkequal(False, 'hello', 'startswith', 'o', -2)
self.checkequal(True, 'hello', 'startswith', 'o', -1)
self.checkequal(True, 'hello', 'startswith', '', -3, -3)
self.checkequal(False, 'hello', 'startswith', 'lo', -9)
self.checkraises(TypeError, 'hello', 'startswith')
self.checkraises(TypeError, 'hello', 'startswith', 42)
def test_endswith(self):
self.checkequal(True, 'hello', 'endswith', 'lo')
self.checkequal(False, 'hello', 'endswith', 'he')
self.checkequal(True, 'hello', 'endswith', '')
self.checkequal(False, 'hello', 'endswith', 'hello world')
self.checkequal(False, 'helloworld', 'endswith', 'worl')
self.checkequal(True, 'helloworld', 'endswith', 'worl', 3, 9)
self.checkequal(True, 'helloworld', 'endswith', 'world', 3, 12)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', 1, 7)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', 2, 7)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', 3, 7)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', 4, 7)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', 3, 8)
self.checkequal(False, 'ab', 'endswith', 'ab', 0, 1)
self.checkequal(False, 'ab', 'endswith', 'ab', 0, 0)
# test negative indices
self.checkequal(True, 'hello', 'endswith', 'lo', -2)
self.checkequal(False, 'hello', 'endswith', 'he', -2)
self.checkequal(True, 'hello', 'endswith', '', -3, -3)
self.checkequal(False, 'hello', 'endswith', 'hello world', -10, -2)
self.checkequal(False, 'helloworld', 'endswith', 'worl', -6)
self.checkequal(True, 'helloworld', 'endswith', 'worl', -5, -1)
self.checkequal(True, 'helloworld', 'endswith', 'worl', -5, 9)
self.checkequal(True, 'helloworld', 'endswith', 'world', -7, 12)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', -99, -3)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', -8, -3)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', -7, -3)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', 3, -4)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', -8, -2)
self.checkraises(TypeError, 'hello', 'endswith')
self.checkraises(TypeError, 'hello', 'endswith', 42)
def test___contains__(self):
self.checkequal(True, '', '__contains__', '') # vereq('' in '', True)
self.checkequal(True, 'abc', '__contains__', '') # vereq('' in 'abc', True)
self.checkequal(False, 'abc', '__contains__', '\0') # vereq('\0' in 'abc', False)
self.checkequal(True, '\0abc', '__contains__', '\0') # vereq('\0' in '\0abc', True)
self.checkequal(True, 'abc\0', '__contains__', '\0') # vereq('\0' in 'abc\0', True)
self.checkequal(True, '\0abc', '__contains__', 'a') # vereq('a' in '\0abc', True)
self.checkequal(True, 'asdf', '__contains__', 'asdf') # vereq('asdf' in 'asdf', True)
self.checkequal(False, 'asd', '__contains__', 'asdf') # vereq('asdf' in 'asd', False)
self.checkequal(False, '', '__contains__', 'asdf') # vereq('asdf' in '', False)
def test_subscript(self):
self.checkequal(u'a', 'abc', '__getitem__', 0)
self.checkequal(u'c', 'abc', '__getitem__', -1)
self.checkequal(u'a', 'abc', '__getitem__', 0L)
self.checkequal(u'abc', 'abc', '__getitem__', slice(0, 3))
self.checkequal(u'abc', 'abc', '__getitem__', slice(0, 1000))
self.checkequal(u'a', 'abc', '__getitem__', slice(0, 1))
self.checkequal(u'', 'abc', '__getitem__', slice(0, 0))
# FIXME What about negative indizes? This is handled differently by [] and __getitem__(slice)
self.checkraises(TypeError, 'abc', '__getitem__', 'def')
def test_slice(self):
self.checkequal('abc', 'abc', '__getslice__', 0, 1000)
self.checkequal('abc', 'abc', '__getslice__', 0, 3)
self.checkequal('ab', 'abc', '__getslice__', 0, 2)
self.checkequal('bc', 'abc', '__getslice__', 1, 3)
self.checkequal('b', 'abc', '__getslice__', 1, 2)
self.checkequal('', 'abc', '__getslice__', 2, 2)
self.checkequal('', 'abc', '__getslice__', 1000, 1000)
self.checkequal('', 'abc', '__getslice__', 2000, 1000)
self.checkequal('', 'abc', '__getslice__', 2, 1)
# FIXME What about negative indizes? This is handled differently by [] and __getslice__
self.checkraises(TypeError, 'abc', '__getslice__', 'def')
def test_mul(self):
self.checkequal('', 'abc', '__mul__', -1)
self.checkequal('', 'abc', '__mul__', 0)
self.checkequal('abc', 'abc', '__mul__', 1)
self.checkequal('abcabcabc', 'abc', '__mul__', 3)
self.checkraises(TypeError, 'abc', '__mul__')
self.checkraises(TypeError, 'abc', '__mul__', '')
self.checkraises(OverflowError, 10000*'abc', '__mul__', 2000000000)
def test_join(self):
# join now works with any sequence type
# moved here, because the argument order is
# different in string.join (see the test in
# test.test_string.StringTest.test_join)
self.checkequal('a b c d', ' ', 'join', ['a', 'b', 'c', 'd'])
self.checkequal('abcd', '', 'join', ('a', 'b', 'c', 'd'))
self.checkequal('w x y z', ' ', 'join', Sequence())
self.checkequal('abc', 'a', 'join', ('abc',))
self.checkequal('z', 'a', 'join', UserList(['z']))
if test_support.have_unicode:
self.checkequal(unicode('a.b.c'), unicode('.'), 'join', ['a', 'b', 'c'])
self.checkequal(unicode('a.b.c'), '.', 'join', [unicode('a'), 'b', 'c'])
self.checkequal(unicode('a.b.c'), '.', 'join', ['a', unicode('b'), 'c'])
self.checkequal(unicode('a.b.c'), '.', 'join', ['a', 'b', unicode('c')])
self.checkraises(TypeError, '.', 'join', ['a', unicode('b'), 3])
for i in [5, 25, 125]:
self.checkequal(((('a' * i) + '-') * i)[:-1], '-', 'join',
['a' * i] * i)
self.checkequal(((('a' * i) + '-') * i)[:-1], '-', 'join',
('a' * i,) * i)
self.checkraises(TypeError, ' ', 'join', BadSeq1())
self.checkequal('a b c', ' ', 'join', BadSeq2())
self.checkraises(TypeError, ' ', 'join')
self.checkraises(TypeError, ' ', 'join', 7)
self.checkraises(TypeError, ' ', 'join', Sequence([7, 'hello', 123L]))
def test_formatting(self):
self.checkequal('+hello+', '+%s+', '__mod__', 'hello')
self.checkequal('+10+', '+%d+', '__mod__', 10)
self.checkequal('a', "%c", '__mod__', "a")
self.checkequal('a', "%c", '__mod__', "a")
self.checkequal('"', "%c", '__mod__', 34)
self.checkequal('$', "%c", '__mod__', 36)
self.checkequal('10', "%d", '__mod__', 10)
self.checkequal('\x7f', "%c", '__mod__', 0x7f)
for ordinal in (-100, 0x200000):
# unicode raises ValueError, str raises OverflowError
self.checkraises((ValueError, OverflowError), '%c', '__mod__', ordinal)
self.checkequal(' 42', '%3ld', '__mod__', 42)
self.checkequal('0042.00', '%07.2f', '__mod__', 42)
self.checkequal('0042.00', '%07.2F', '__mod__', 42)
self.checkraises(TypeError, 'abc', '__mod__')
self.checkraises(TypeError, '%(foo)s', '__mod__', 42)
self.checkraises(TypeError, '%s%s', '__mod__', (42,))
self.checkraises(TypeError, '%c', '__mod__', (None,))
self.checkraises(ValueError, '%(foo', '__mod__', {})
self.checkraises(TypeError, '%(foo)s %(bar)s', '__mod__', ('foo', 42))
# argument names with properly nested brackets are supported
self.checkequal('bar', '%((foo))s', '__mod__', {'(foo)': 'bar'})
# 100 is a magic number in PyUnicode_Format, this forces a resize
self.checkequal(103*'a'+'x', '%sx', '__mod__', 103*'a')
self.checkraises(TypeError, '%*s', '__mod__', ('foo', 'bar'))
self.checkraises(TypeError, '%10.*f', '__mod__', ('foo', 42.))
self.checkraises(ValueError, '%10', '__mod__', (42,))
def test_floatformatting(self):
# float formatting
for prec in xrange(100):
format = '%%.%if' % prec
value = 0.01
for x in xrange(60):
value = value * 3.141592655 / 3.0 * 10.0
# The formatfloat() code in stringobject.c and
# unicodeobject.c uses a 120 byte buffer and switches from
# 'f' formatting to 'g' at precision 50, so we expect
# OverflowErrors for the ranges x < 50 and prec >= 67.
if x < 50 and prec >= 67:
self.checkraises(OverflowError, format, "__mod__", value)
else:
self.checkcall(format, "__mod__", value)
class MixinStrStringUserStringTest:
# Additional tests for 8bit strings, i.e. str, UserString and
# the string module
def test_maketrans(self):
self.assertEqual(
''.join(map(chr, xrange(256))).replace('abc', 'xyz'),
string.maketrans('abc', 'xyz')
)
self.assertRaises(ValueError, string.maketrans, 'abc', 'xyzw')
def test_translate(self):
table = string.maketrans('abc', 'xyz')
self.checkequal('xyzxyz', 'xyzabcdef', 'translate', table, 'def')
table = string.maketrans('a', 'A')
self.checkequal('Abc', 'abc', 'translate', table)
self.checkequal('xyz', 'xyz', 'translate', table)
self.checkequal('yz', 'xyz', 'translate', table, 'x')
self.checkraises(ValueError, 'xyz', 'translate', 'too short', 'strip')
self.checkraises(ValueError, 'xyz', 'translate', 'too short')
class MixinStrUserStringTest:
# Additional tests that only work with
# 8bit compatible object, i.e. str and UserString
def test_encoding_decoding(self):
codecs = [('rot13', 'uryyb jbeyq'),
('base64', 'aGVsbG8gd29ybGQ=\n'),
('hex', '68656c6c6f20776f726c64'),
('uu', 'begin 666 <data>\n+:&5L;&\\@=V]R;&0 \n \nend\n')]
for encoding, data in codecs:
self.checkequal(data, 'hello world', 'encode', encoding)
self.checkequal('hello world', data, 'decode', encoding)
# zlib is optional, so we make the test optional too...
try:
import zlib
except ImportError:
pass
else:
data = 'x\x9c\xcbH\xcd\xc9\xc9W(\xcf/\xcaI\x01\x00\x1a\x0b\x04]'
self.checkequal(data, 'hello world', 'encode', 'zlib')
self.checkequal('hello world', data, 'decode', 'zlib')
self.checkraises(TypeError, 'xyz', 'decode', 42)
self.checkraises(TypeError, 'xyz', 'encode', 42)
class MixinStrUnicodeTest:
# Additional tests that only work with str and unicode.
def test_bug1001011(self):
# Make sure join returns a NEW object for single item sequences
# involving a subclass.
# Make sure that it is of the appropriate type.
# Check the optimisation still occurs for standard objects.
t = self.type2test
class subclass(t):
pass
s1 = subclass("abcd")
s2 = t().join([s1])
self.assert_(s1 is not s2)
self.assert_(type(s2) is t)
s1 = t("abcd")
s2 = t().join([s1])
self.assert_(s1 is s2)
# Should also test mixed-type join.
if t is unicode:
s1 = subclass("abcd")
s2 = "".join([s1])
self.assert_(s1 is not s2)
self.assert_(type(s2) is t)
s1 = t("abcd")
s2 = "".join([s1])
self.assert_(s1 is s2)
elif t is str:
s1 = subclass("abcd")
s2 = u"".join([s1])
self.assert_(s1 is not s2)
self.assert_(type(s2) is unicode) # promotes!
s1 = t("abcd")
s2 = u"".join([s1])
self.assert_(s1 is not s2)
self.assert_(type(s2) is unicode) # promotes!
else:
self.fail("unexpected type for MixinStrUnicodeTest %r" % t)
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Keystone Client functionality for use by resources."""
import collections
import uuid
import weakref
from keystoneauth1 import exceptions as ks_exception
from keystoneauth1.identity import generic as ks_auth
from keystoneclient.v3 import client as kc_v3
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import importutils
from heat.common import context
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common.i18n import _LW
LOG = logging.getLogger('heat.engine.clients.keystoneclient')
AccessKey = collections.namedtuple('AccessKey', ['id', 'access', 'secret'])
_default_keystone_backend = (
'heat.engine.clients.os.keystone.heat_keystoneclient.KsClientWrapper')
keystone_opts = [
cfg.StrOpt('keystone_backend',
default=_default_keystone_backend,
help="Fully qualified class name to use as a keystone backend.")
]
cfg.CONF.register_opts(keystone_opts)
class KsClientWrapper(object):
"""Wrap keystone client so we can encapsulate logic used in resources.
Note this is intended to be initialized from a resource on a per-session
basis, so the session context is passed in on initialization
Also note that an instance of this is created in each request context as
part of a lazy-loaded cloud backend and it can be easily referenced in
each resource as ``self.keystone()``, so there should not be any need to
directly instantiate instances of this class inside resources themselves.
"""
def __init__(self, context):
# If a trust_id is specified in the context, we immediately
# authenticate so we can populate the context with a trust token
# otherwise, we delay client authentication until needed to avoid
# unnecessary calls to keystone.
#
# Note that when you obtain a token using a trust, it cannot be
# used to reauthenticate and get another token, so we have to
# get a new trust-token even if context.auth_token is set.
#
# - context.auth_url is expected to contain a versioned keystone
# path, we will work with either a v2.0 or v3 path
self._context = weakref.ref(context)
self._client = None
self._admin_auth = None
self._domain_admin_auth = None
self._domain_admin_client = None
self.session = self.context.keystone_session
self.v3_endpoint = self.context.keystone_v3_endpoint
if self.context.trust_id:
# Create a client with the specified trust_id, this
# populates self.context.auth_token with a trust-scoped token
self._client = self._v3_client_init()
# The stack domain user ID should be set in heat.conf
# It can be created via python-openstackclient
# openstack --os-identity-api-version=3 domain create heat
# If the domain is specified, then you must specify a domain
# admin user. If no domain is specified, we fall back to
# legacy behavior with warnings.
self._stack_domain_id = cfg.CONF.stack_user_domain_id
self.stack_domain_name = cfg.CONF.stack_user_domain_name
self.domain_admin_user = cfg.CONF.stack_domain_admin
self.domain_admin_password = cfg.CONF.stack_domain_admin_password
LOG.debug('Using stack domain %s' % self.stack_domain)
@property
def context(self):
ctxt = self._context()
assert ctxt is not None, "Need a reference to the context"
return ctxt
@property
def stack_domain(self):
"""Domain scope data.
This is only used for checking for scoping data, not using the value.
"""
return self._stack_domain_id or self.stack_domain_name
@property
def client(self):
if not self._client:
# Create connection to v3 API
self._client = self._v3_client_init()
return self._client
@property
def domain_admin_auth(self):
if not self._domain_admin_auth:
# Note we must specify the domain when getting the token
# as only a domain scoped token can create projects in the domain
auth = ks_auth.Password(username=self.domain_admin_user,
password=self.domain_admin_password,
auth_url=self.v3_endpoint,
domain_id=self._stack_domain_id,
domain_name=self.stack_domain_name,
user_domain_id=self._stack_domain_id,
user_domain_name=self.stack_domain_name)
# NOTE(jamielennox): just do something to ensure a valid token
try:
auth.get_token(self.session)
except ks_exception.Unauthorized:
LOG.error(_LE("Domain admin client authentication failed"))
raise exception.AuthorizationFailure()
self._domain_admin_auth = auth
return self._domain_admin_auth
@property
def domain_admin_client(self):
if not self._domain_admin_client:
self._domain_admin_client = kc_v3.Client(
session=self.session,
auth=self.domain_admin_auth)
return self._domain_admin_client
def _v3_client_init(self):
client = kc_v3.Client(session=self.session)
if hasattr(self.context.auth_plugin, 'get_access'):
# NOTE(jamielennox): get_access returns the current token without
# reauthenticating if it's present and valid.
try:
auth_ref = self.context.auth_plugin.get_access(self.session)
except ks_exception.Unauthorized:
LOG.error(_LE("Keystone client authentication failed"))
raise exception.AuthorizationFailure()
if self.context.trust_id:
# Sanity check
if not auth_ref.trust_scoped:
LOG.error(_LE("trust token re-scoping failed!"))
raise exception.AuthorizationFailure()
# Sanity check that impersonation is effective
if self.context.trustor_user_id != auth_ref.user_id:
LOG.error(_LE("Trust impersonation failed"))
raise exception.AuthorizationFailure()
return client
def create_trust_context(self):
"""Create a trust using the trustor identity in the current context.
The trust is created with the trustee as the heat service user.
If the current context already contains a trust_id, we do nothing
and return the current context.
Returns a context containing the new trust_id.
"""
if self.context.trust_id:
return self.context
# We need the service admin user ID (not name), as the trustor user
# can't lookup the ID in keystoneclient unless they're admin
# workaround this by getting the user_id from admin_client
try:
trustee_user_id = self.context.trusts_auth_plugin.get_user_id(
self.session)
except ks_exception.Unauthorized:
LOG.error(_LE("Domain admin client authentication failed"))
raise exception.AuthorizationFailure()
trustor_user_id = self.context.auth_plugin.get_user_id(self.session)
trustor_proj_id = self.context.auth_plugin.get_project_id(self.session)
# inherit the roles of the trustor, unless set trusts_delegated_roles
if cfg.CONF.trusts_delegated_roles:
roles = cfg.CONF.trusts_delegated_roles
else:
roles = self.context.roles
try:
trust = self.client.trusts.create(trustor_user=trustor_user_id,
trustee_user=trustee_user_id,
project=trustor_proj_id,
impersonation=True,
role_names=roles)
except ks_exception.NotFound:
LOG.debug("Failed to find roles %s for user %s"
% (roles, trustor_user_id))
raise exception.MissingCredentialError(
required=_("roles %s") % roles)
context_data = self.context.to_dict()
context_data['overwrite'] = False
trust_context = context.RequestContext.from_dict(context_data)
trust_context.trust_id = trust.id
trust_context.trustor_user_id = trustor_user_id
return trust_context
def delete_trust(self, trust_id):
"""Delete the specified trust."""
try:
self.client.trusts.delete(trust_id)
except ks_exception.NotFound:
pass
def _get_username(self, username):
if(len(username) > 64):
LOG.warning(_LW("Truncating the username %s to the last 64 "
"characters."), username)
# get the last 64 characters of the username
return username[-64:]
def create_stack_user(self, username, password=''):
"""Create a user defined as part of a stack.
The user is defined either via template or created internally by a
resource. This user will be added to the heat_stack_user_role as
defined in the config.
Returns the keystone ID of the resulting user.
"""
# FIXME(shardy): There's duplicated logic between here and
# create_stack_domain user, but this function is expected to
# be removed after the transition of all resources to domain
# users has been completed
stack_user_role = self.client.roles.list(
name=cfg.CONF.heat_stack_user_role)
if len(stack_user_role) == 1:
role_id = stack_user_role[0].id
# Create the user
user = self.client.users.create(
name=self._get_username(username), password=password,
default_project=self.context.tenant_id)
# Add user to heat_stack_user_role
LOG.debug("Adding user %(user)s to role %(role)s" % {
'user': user.id, 'role': role_id})
self.client.roles.grant(role=role_id, user=user.id,
project=self.context.tenant_id)
else:
LOG.error(_LE("Failed to add user %(user)s to role %(role)s, "
"check role exists!"), {
'user': username,
'role': cfg.CONF.heat_stack_user_role})
raise exception.Error(_("Can't find role %s")
% cfg.CONF.heat_stack_user_role)
return user.id
def stack_domain_user_token(self, user_id, project_id, password):
"""Get a token for a stack domain user."""
if not self.stack_domain:
# Note, no legacy fallback path as we don't want to deploy
# tokens for non stack-domain users inside instances
msg = _('Cannot get stack domain user token, no stack domain id '
'configured, please fix your heat.conf')
raise exception.Error(msg)
# Create a keystone session, then request a token with no
# catalog (the token is expected to be used inside an instance
# where a specific endpoint will be specified, and user-data
# space is limited..)
# TODO(rabi): generic auth plugins don't support `include_catalog'
# flag yet. We'll add it once it's supported..
auth = ks_auth.Password(auth_url=self.v3_endpoint,
user_id=user_id,
password=password,
project_id=project_id)
return auth.get_token(self.session)
def create_stack_domain_user(self, username, project_id, password=None):
"""Create a domain user defined as part of a stack.
The user is defined either via template or created internally by a
resource. This user will be added to the heat_stack_user_role as
defined in the config, and created in the specified project (which is
expected to be in the stack_domain).
Returns the keystone ID of the resulting user.
"""
if not self.stack_domain:
# FIXME(shardy): Legacy fallback for folks using old heat.conf
# files which lack domain configuration
return self.create_stack_user(username=username, password=password)
# We add the new user to a special keystone role
# This role is designed to allow easier differentiation of the
# heat-generated "stack users" which will generally have credentials
# deployed on an instance (hence are implicitly untrusted)
stack_user_role = self.domain_admin_client.roles.list(
name=cfg.CONF.heat_stack_user_role)
if len(stack_user_role) == 1:
role_id = stack_user_role[0].id
# Create user
user = self.domain_admin_client.users.create(
name=self._get_username(username), password=password,
default_project=project_id, domain=self.stack_domain_id)
# Add to stack user role
LOG.debug("Adding user %(user)s to role %(role)s" % {
'user': user.id, 'role': role_id})
self.domain_admin_client.roles.grant(role=role_id, user=user.id,
project=project_id)
else:
LOG.error(_LE("Failed to add user %(user)s to role %(role)s, "
"check role exists!"),
{'user': username,
'role': cfg.CONF.heat_stack_user_role})
raise exception.Error(_("Can't find role %s")
% cfg.CONF.heat_stack_user_role)
return user.id
@property
def stack_domain_id(self):
if not self._stack_domain_id:
try:
access = self.domain_admin_auth.get_access(self.session)
except ks_exception.Unauthorized:
LOG.error(_LE("Keystone client authentication failed"))
raise exception.AuthorizationFailure()
self._stack_domain_id = access.domain_id
return self._stack_domain_id
def _check_stack_domain_user(self, user_id, project_id, action):
"""Sanity check that domain/project is correct."""
user = self.domain_admin_client.users.get(user_id)
if user.domain_id != self.stack_domain_id:
raise ValueError(_('User %s in invalid domain') % action)
if user.default_project_id != project_id:
raise ValueError(_('User %s in invalid project') % action)
def delete_stack_domain_user(self, user_id, project_id):
if not self.stack_domain:
# FIXME(shardy): Legacy fallback for folks using old heat.conf
# files which lack domain configuration
return self.delete_stack_user(user_id)
try:
self._check_stack_domain_user(user_id, project_id, 'delete')
self.domain_admin_client.users.delete(user_id)
except ks_exception.NotFound:
pass
def delete_stack_user(self, user_id):
try:
self.client.users.delete(user=user_id)
except ks_exception.NotFound:
pass
def create_stack_domain_project(self, stack_id):
"""Create a project in the heat stack-user domain."""
if not self.stack_domain:
# FIXME(shardy): Legacy fallback for folks using old heat.conf
# files which lack domain configuration
return self.context.tenant_id
# Note we use the tenant ID not name to ensure uniqueness in a multi-
# domain environment (where the tenant name may not be globally unique)
project_name = ('%s-%s' % (self.context.tenant_id, stack_id))[:64]
desc = "Heat stack user project"
domain_project = self.domain_admin_client.projects.create(
name=project_name,
domain=self.stack_domain_id,
description=desc)
return domain_project.id
def delete_stack_domain_project(self, project_id):
if not self.stack_domain:
# FIXME(shardy): Legacy fallback for folks using old heat.conf
# files which lack domain configuration
return
# If stacks are created before configuring the heat domain, they
# exist in the default domain, in the user's project, which we
# do *not* want to delete! However, if the keystone v3cloudsample
# policy is used, it's possible that we'll get Forbidden when trying
# to get the project, so again we should do nothing
try:
project = self.domain_admin_client.projects.get(project=project_id)
except ks_exception.NotFound:
return
except ks_exception.Forbidden:
LOG.warning(_LW('Unable to get details for project %s, '
'not deleting'), project_id)
return
if project.domain_id != self.stack_domain_id:
LOG.warning(_LW('Not deleting non heat-domain project'))
return
try:
project.delete()
except ks_exception.NotFound:
pass
def _find_ec2_keypair(self, access, user_id=None):
"""Lookup an ec2 keypair by access ID."""
# FIXME(shardy): add filtering for user_id when keystoneclient
# extensible-crud-manager-operations bp lands
credentials = self.client.credentials.list()
for cr in credentials:
ec2_creds = jsonutils.loads(cr.blob)
if ec2_creds.get('access') == access:
return AccessKey(id=cr.id,
access=ec2_creds['access'],
secret=ec2_creds['secret'])
def delete_ec2_keypair(self, credential_id=None, access=None,
user_id=None):
"""Delete credential containing ec2 keypair."""
if credential_id:
try:
self.client.credentials.delete(credential_id)
except ks_exception.NotFound:
pass
elif access:
cred = self._find_ec2_keypair(access=access, user_id=user_id)
if cred:
self.client.credentials.delete(cred.id)
else:
raise ValueError("Must specify either credential_id or access")
def get_ec2_keypair(self, credential_id=None, access=None, user_id=None):
"""Get an ec2 keypair via v3/credentials, by id or access."""
# Note v3/credentials does not support filtering by access
# because it's stored in the credential blob, so we expect
# all resources to pass credential_id except where backwards
# compatibility is required (resource only has access stored)
# then we'll have to do a brute-force lookup locally
if credential_id:
cred = self.client.credentials.get(credential_id)
ec2_creds = jsonutils.loads(cred.blob)
return AccessKey(id=cred.id,
access=ec2_creds['access'],
secret=ec2_creds['secret'])
elif access:
return self._find_ec2_keypair(access=access, user_id=user_id)
else:
raise ValueError("Must specify either credential_id or access")
def create_ec2_keypair(self, user_id=None):
user_id = user_id or self.context.get_access(self.session).user_id
project_id = self.context.tenant_id
data_blob = {'access': uuid.uuid4().hex,
'secret': uuid.uuid4().hex}
ec2_creds = self.client.credentials.create(
user=user_id, type='ec2', data=jsonutils.dumps(data_blob),
project=project_id)
# Return a AccessKey namedtuple for easier access to the blob contents
# We return the id as the v3 api provides no way to filter by
# access in the blob contents, so it will be much more efficient
# if we manage credentials by ID instead
return AccessKey(id=ec2_creds.id,
access=data_blob['access'],
secret=data_blob['secret'])
def create_stack_domain_user_keypair(self, user_id, project_id):
if not self.stack_domain:
# FIXME(shardy): Legacy fallback for folks using old heat.conf
# files which lack domain configuration
return self.create_ec2_keypair(user_id)
data_blob = {'access': uuid.uuid4().hex,
'secret': uuid.uuid4().hex}
creds = self.domain_admin_client.credentials.create(
user=user_id, type='ec2', data=jsonutils.dumps(data_blob),
project=project_id)
return AccessKey(id=creds.id,
access=data_blob['access'],
secret=data_blob['secret'])
def delete_stack_domain_user_keypair(self, user_id, project_id,
credential_id):
if not self.stack_domain:
# FIXME(shardy): Legacy fallback for folks using old heat.conf
# files which lack domain configuration
return self.delete_ec2_keypair(credential_id=credential_id)
self._check_stack_domain_user(user_id, project_id, 'delete_keypair')
try:
self.domain_admin_client.credentials.delete(credential_id)
except ks_exception.NotFound:
pass
def disable_stack_user(self, user_id):
self.client.users.update(user=user_id, enabled=False)
def enable_stack_user(self, user_id):
self.client.users.update(user=user_id, enabled=True)
def disable_stack_domain_user(self, user_id, project_id):
if not self.stack_domain:
# FIXME(shardy): Legacy fallback for folks using old heat.conf
# files which lack domain configuration
return self.disable_stack_user(user_id)
self._check_stack_domain_user(user_id, project_id, 'disable')
self.domain_admin_client.users.update(user=user_id, enabled=False)
def enable_stack_domain_user(self, user_id, project_id):
if not self.stack_domain:
# FIXME(shardy): Legacy fallback for folks using old heat.conf
# files which lack domain configuration
return self.enable_stack_user(user_id)
self._check_stack_domain_user(user_id, project_id, 'enable')
self.domain_admin_client.users.update(user=user_id, enabled=True)
def url_for(self, **kwargs):
default_region_name = (self.context.region_name or
cfg.CONF.region_name_for_services)
kwargs.setdefault('region_name', default_region_name)
return self.context.auth_plugin.get_endpoint(self.session, **kwargs)
@property
def auth_token(self):
return self.context.auth_plugin.get_token(self.session)
@property
def auth_ref(self):
return self.context.auth_plugin.get_access(self.session)
class KeystoneClient(object):
"""Keystone Auth Client.
Delay choosing the backend client module until the client's class
needs to be initialized.
"""
def __new__(cls, context):
if cfg.CONF.keystone_backend == _default_keystone_backend:
return KsClientWrapper(context)
else:
return importutils.import_object(
cfg.CONF.keystone_backend,
context
)
def list_opts():
yield None, keystone_opts
| |
#!/usr/bin/env python
#
# Azure Linux extension
#
# Linux Azure Diagnostic Extension (Current version is specified in manifest.xml)
# Copyright (c) Microsoft Corporation
# All rights reserved.
# MIT License
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the ""Software""), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import re
import string
import traceback
import xml.dom.minidom
from Utils.WAAgentUtil import waagent
def get_extension_operation_type(command):
if re.match("^([-/]*)(enable)", command):
return waagent.WALAEventOperation.Enable
if re.match("^([-/]*)(daemon)", command): # LAD-specific extension operation (invoked from "./diagnostic.py -enable")
return "Daemon"
if re.match("^([-/]*)(install)", command):
return waagent.WALAEventOperation.Install
if re.match("^([-/]*)(disable)", command):
return waagent.WALAEventOperation.Disable
if re.match("^([-/]*)(uninstall)", command):
return waagent.WALAEventOperation.Uninstall
if re.match("^([-/]*)(update)", command):
return waagent.WALAEventOperation.Update
def wala_event_type_for_telemetry(ext_op_type):
return "HeartBeat" if ext_op_type == "Daemon" else ext_op_type
def get_storage_endpoint_with_account(account, endpoint_without_account):
endpoint = endpoint_without_account
if endpoint:
parts = endpoint.split('//', 1)
if len(parts) > 1:
endpoint = parts[0]+'//'+account+".table."+parts[1]
else:
endpoint = 'https://'+account+".table."+parts[0]
else:
endpoint = 'https://'+account+'.table.core.windows.net'
return endpoint
def check_suspected_memory_leak(pid, logger_err):
"""
Check suspected memory leak of a process, by inspecting /proc/<pid>/status's VmRSS value.
:param pid: ID of the process we are checking.
:param logger_err: Error logging function (e.g., hutil.error)
:return (bool, int): Bool indicating whether memory leak is suspected. Int for memory usage in KB in true case.
"""
memory_leak_threshold_in_KB = 2000000 # Roughly 2GB. TODO: Make it configurable or automatically calculated
memory_usage_in_KB = 0
memory_leak_suspected = False
try:
# Check /proc/[pid]/status file for "VmRSS" to find out the process's virtual memory usage
# Note: "VmSize" for some reason starts out very high (>2000000) at this moment, so can't use that.
with open("/proc/{0}/status".format(pid)) as proc_file:
for line in proc_file:
if line.startswith("VmRSS:"): # Example line: "VmRSS: 33904 kB"
memory_usage_in_KB = int(line.split()[1])
memory_leak_suspected = memory_usage_in_KB > memory_leak_threshold_in_KB
break
except Exception as e:
# Not to throw in case any statement above fails (e.g., invalid pid). Just log.
logger_err("Failed to check memory usage of pid={0}.\nError: {1}\nTrace:\n{2}".format(pid, e, traceback.format_exc()))
return memory_leak_suspected, memory_usage_in_KB
class LadLogHelper(object):
"""
Various LAD log helper functions encapsulated here, so that we don't have to tag along all the parameters.
"""
def __init__(self, logger_log, logger_error, waagent_event_adder, status_reporter, ext_name, ext_ver):
"""
Constructor
:param logger_log: Normal logging function (e.g., hutil.log)
:param logger_error: Error logging function (e.g., hutil.error)
:param waagent_event_adder: waagent event add function (waagent.AddExtensionEvent)
:param status_reporter: waagent/extension status report function (hutil.do_status_report)
:param ext_name: Extension name (hutil.get_name())
:param ext_ver: Extension version (hutil.get_extension_version())
"""
self._logger_log = logger_log
self._logger_error = logger_error
self._waagent_event_adder = waagent_event_adder
self._status_reporter = status_reporter
self._ext_name = ext_name
self._ext_ver = ext_ver
def log_suspected_memory_leak_and_kill_mdsd(self, memory_usage_in_KB, mdsd_process, ext_op):
"""
Log suspected-memory-leak message both in ext logs and as a waagent event.
:param memory_usage_in_KB: Memory usage in KB (to be included in the log)
:param mdsd_process: Python Process object for the mdsd process to kill
:param ext_op: Extension operation type to use for waagent event (waagent.WALAEventOperation.HeartBeat)
:return: None
"""
memory_leak_msg = "Suspected mdsd memory leak (Virtual memory usage: {0}MB). " \
"Recycling mdsd to self-mitigate.".format(int((memory_usage_in_KB + 1023) / 1024))
self._logger_log(memory_leak_msg)
# Add a telemetry for a possible statistical analysis
self._waagent_event_add(name=self._ext_name,
op=ext_op,
isSuccess=True,
version=self._ext_ver,
message=memory_leak_msg)
mdsd_process.kill()
def report_mdsd_dependency_setup_failure(self, ext_event_type, failure_msg):
"""
Report mdsd dependency setup failure to 3 destinations (ext log, status report, agent event)
:param ext_event_type: Type of extension event being performed (e.g., 'HeartBeat')
:param failure_msg: Dependency setup failure message to be added to the logs
:return: None
"""
dependencies_err_log_msg = "Failed to set up mdsd dependencies: {0}".format(failure_msg)
self._logger_error(dependencies_err_log_msg)
self._status_reporter(ext_event_type, 'error', '1', dependencies_err_log_msg)
self._waagent_event_adder(name=self._ext_name,
op=ext_event_type,
isSuccess=False,
version=self._ext_ver,
message=dependencies_err_log_msg)
def read_uuid(run_command):
code, str_ret = run_command("dmidecode |grep UUID |awk '{print $2}'", chk_err=False)
return str_ret.strip()
def tail(log_file, output_size=1024):
if not os.path.exists(log_file):
return ""
pos = min(output_size, os.path.getsize(log_file))
with open(log_file, "r") as log:
log.seek(-pos, 2)
buf = log.read(output_size)
buf = filter(lambda x: x in string.printable, buf)
return buf.decode("ascii", "ignore")
def update_selinux_settings_for_rsyslogomazuremds(run_command, ext_dir):
# This is still needed for Redhat-based distros, which still require SELinux to be allowed
# for even Unix domain sockets.
# Anyway, we no longer use 'semanage' (so no need to install policycoreutils-python).
# We instead compile from the bundled SELinux module def for lad_mdsd
# TODO Either check the output of these commands or run without capturing output
if os.path.exists("/usr/sbin/semodule") or os.path.exists("/sbin/semodule"):
run_command('checkmodule -M -m -o {0}/lad_mdsd.mod {1}/lad_mdsd.te'.format(ext_dir, ext_dir))
run_command('semodule_package -o {0}/lad_mdsd.pp -m {1}/lad_mdsd.mod'.format(ext_dir, ext_dir))
run_command('semodule -u {0}/lad_mdsd.pp'.format(ext_dir))
def get_mdsd_proxy_config(waagent_setting, ext_settings, logger):
# mdsd http proxy setting
proxy_setting_name = 'mdsdHttpProxy'
proxy_config = waagent_setting # waagent.HttpProxyConfigString from /etc/waagent.conf has highest priority
if not proxy_config:
proxy_config = ext_settings.read_protected_config(proxy_setting_name) # Protected setting has next priority
if not proxy_config:
proxy_config = ext_settings.read_public_config(proxy_setting_name)
if not isinstance(proxy_config, basestring):
logger('Error: mdsdHttpProxy config is not a string. Ignored.')
else:
proxy_config = proxy_config.strip()
if proxy_config:
logger("mdsdHttpProxy setting was given and will be passed to mdsd, "
"but not logged here in case there's a password in it")
return proxy_config
return ''
def escape_nonalphanumerics(data):
return ''.join([ch if ch.isalnum() else ":{0:04X}".format(ord(ch)) for ch in data])
# TODO Should this be placed in WAAgentUtil.py?
def get_deployment_id_from_hosting_env_cfg(waagent_dir, logger_log, logger_error):
"""
Get deployment ID from waagent dir's HostingEnvironmentConfig.xml.
:param waagent_dir: Waagent dir path (/var/lib/waagent)
:param logger_log: Normal logging function (hutil.log)
:param logger_error: Error logging function (hutil.error)
:return: Obtained deployment ID string if the hosting env cfg xml exists & deployment ID is found.
"unknown" if the xml exists, but deployment ID can't be found.
None if the xml does not exist.
"""
identity = "unknown"
env_cfg_path = os.path.join(waagent_dir, "HostingEnvironmentConfig.xml")
if not os.path.exists(env_cfg_path):
logger_log("No Deployment ID (not running in a hosted environment")
return None
try:
with open(env_cfg_path, 'r') as env_cfg_file:
xml_text = env_cfg_file.read()
dom = xml.dom.minidom.parseString(xml_text)
deployment = dom.getElementsByTagName("Deployment")
name = deployment[0].getAttribute("name")
if name:
identity = name
logger_log("Deployment ID found: {0}.".format(identity))
except Exception as e:
# use fallback identity
logger_error("Failed to retrieve deployment ID. Error:{0}\nStacktrace: {1}".format(e, traceback.format_exc()))
return identity
def write_lad_pids_to_file(pid_file_path, py_pid, mdsd_pid=None):
"""
Write LAD process IDs to file
:param int py_pid: PID of diagnostic.py
:param int mdsd_pid: PID of mdsd or None (when called before mdsd is started)
:param str pid_file_path: Path of the file to be written
:return: None
"""
with open(pid_file_path, 'w') as f:
f.write(str(py_pid) + '\n')
if mdsd_pid is not None:
f.write(str(mdsd_pid) + '\n')
| |
"""
Support for MQTT Template lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.mqtt_template/
"""
import logging
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.components import mqtt
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_EFFECT, ATTR_FLASH,
ATTR_HS_COLOR, ATTR_TRANSITION, ATTR_WHITE_VALUE, Light,
SUPPORT_BRIGHTNESS, SUPPORT_COLOR_TEMP, SUPPORT_EFFECT, SUPPORT_FLASH,
SUPPORT_COLOR, SUPPORT_TRANSITION, SUPPORT_WHITE_VALUE)
from homeassistant.const import (
CONF_DEVICE, CONF_NAME, CONF_OPTIMISTIC, STATE_ON, STATE_OFF)
from homeassistant.components.mqtt import (
CONF_COMMAND_TOPIC, CONF_QOS, CONF_RETAIN, CONF_STATE_TOPIC,
CONF_UNIQUE_ID, MqttAttributes, MqttAvailability, MqttDiscoveryUpdate,
MqttEntityDeviceInfo, subscription)
import homeassistant.helpers.config_validation as cv
import homeassistant.util.color as color_util
from homeassistant.helpers.restore_state import RestoreEntity
from . import MQTT_LIGHT_SCHEMA_SCHEMA
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'mqtt_template'
DEPENDENCIES = ['mqtt']
DEFAULT_NAME = 'MQTT Template Light'
DEFAULT_OPTIMISTIC = False
CONF_BLUE_TEMPLATE = 'blue_template'
CONF_BRIGHTNESS_TEMPLATE = 'brightness_template'
CONF_COLOR_TEMP_TEMPLATE = 'color_temp_template'
CONF_COMMAND_OFF_TEMPLATE = 'command_off_template'
CONF_COMMAND_ON_TEMPLATE = 'command_on_template'
CONF_EFFECT_LIST = 'effect_list'
CONF_EFFECT_TEMPLATE = 'effect_template'
CONF_GREEN_TEMPLATE = 'green_template'
CONF_RED_TEMPLATE = 'red_template'
CONF_STATE_TEMPLATE = 'state_template'
CONF_WHITE_VALUE_TEMPLATE = 'white_value_template'
PLATFORM_SCHEMA_TEMPLATE = mqtt.MQTT_RW_PLATFORM_SCHEMA.extend({
vol.Optional(CONF_BLUE_TEMPLATE): cv.template,
vol.Optional(CONF_BRIGHTNESS_TEMPLATE): cv.template,
vol.Optional(CONF_COLOR_TEMP_TEMPLATE): cv.template,
vol.Optional(CONF_EFFECT_LIST): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_EFFECT_TEMPLATE): cv.template,
vol.Optional(CONF_GREEN_TEMPLATE): cv.template,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_RED_TEMPLATE): cv.template,
vol.Optional(CONF_RETAIN, default=mqtt.DEFAULT_RETAIN): cv.boolean,
vol.Optional(CONF_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_WHITE_VALUE_TEMPLATE): cv.template,
vol.Required(CONF_COMMAND_OFF_TEMPLATE): cv.template,
vol.Required(CONF_COMMAND_ON_TEMPLATE): cv.template,
vol.Required(CONF_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_QOS, default=mqtt.DEFAULT_QOS):
vol.All(vol.Coerce(int), vol.In([0, 1, 2])),
vol.Optional(CONF_UNIQUE_ID): cv.string,
vol.Optional(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA,
}).extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema).extend(
mqtt.MQTT_JSON_ATTRS_SCHEMA.schema).extend(MQTT_LIGHT_SCHEMA_SCHEMA.schema)
async def async_setup_entity_template(config, async_add_entities, config_entry,
discovery_hash):
"""Set up a MQTT Template light."""
async_add_entities([MqttTemplate(config, config_entry, discovery_hash)])
# pylint: disable=too-many-ancestors
class MqttTemplate(MqttAttributes, MqttAvailability, MqttDiscoveryUpdate,
MqttEntityDeviceInfo, Light, RestoreEntity):
"""Representation of a MQTT Template light."""
def __init__(self, config, config_entry, discovery_hash):
"""Initialize a MQTT Template light."""
self._state = False
self._sub_state = None
self._topics = None
self._templates = None
self._optimistic = False
# features
self._brightness = None
self._color_temp = None
self._white_value = None
self._hs = None
self._effect = None
self._unique_id = config.get(CONF_UNIQUE_ID)
# Load config
self._setup_from_config(config)
device_config = config.get(CONF_DEVICE)
MqttAttributes.__init__(self, config)
MqttAvailability.__init__(self, config)
MqttDiscoveryUpdate.__init__(self, discovery_hash,
self.discovery_update)
MqttEntityDeviceInfo.__init__(self, device_config, config_entry)
async def async_added_to_hass(self):
"""Subscribe to MQTT events."""
await super().async_added_to_hass()
await self._subscribe_topics()
async def discovery_update(self, discovery_payload):
"""Handle updated discovery message."""
config = PLATFORM_SCHEMA_TEMPLATE(discovery_payload)
self._setup_from_config(config)
await self.attributes_discovery_update(config)
await self.availability_discovery_update(config)
await self.device_info_discovery_update(config)
await self._subscribe_topics()
self.async_schedule_update_ha_state()
def _setup_from_config(self, config):
"""(Re)Setup the entity."""
self._config = config
self._topics = {
key: config.get(key) for key in (
CONF_STATE_TOPIC,
CONF_COMMAND_TOPIC
)
}
self._templates = {
key: config.get(key) for key in (
CONF_BLUE_TEMPLATE,
CONF_BRIGHTNESS_TEMPLATE,
CONF_COLOR_TEMP_TEMPLATE,
CONF_COMMAND_OFF_TEMPLATE,
CONF_COMMAND_ON_TEMPLATE,
CONF_EFFECT_TEMPLATE,
CONF_GREEN_TEMPLATE,
CONF_RED_TEMPLATE,
CONF_STATE_TEMPLATE,
CONF_WHITE_VALUE_TEMPLATE,
)
}
optimistic = config.get(CONF_OPTIMISTIC)
self._optimistic = optimistic \
or self._topics[CONF_STATE_TOPIC] is None \
or self._templates[CONF_STATE_TEMPLATE] is None
# features
if self._templates[CONF_BRIGHTNESS_TEMPLATE] is not None:
self._brightness = 255
else:
self._brightness = None
if self._templates[CONF_COLOR_TEMP_TEMPLATE] is not None:
self._color_temp = 255
else:
self._color_temp = None
if self._templates[CONF_WHITE_VALUE_TEMPLATE] is not None:
self._white_value = 255
else:
self._white_value = None
if (self._templates[CONF_RED_TEMPLATE] is not None and
self._templates[CONF_GREEN_TEMPLATE] is not None and
self._templates[CONF_BLUE_TEMPLATE] is not None):
self._hs = [0, 0]
else:
self._hs = None
self._effect = None
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
for tpl in self._templates.values():
if tpl is not None:
tpl.hass = self.hass
last_state = await self.async_get_last_state()
@callback
def state_received(topic, payload, qos):
"""Handle new MQTT messages."""
state = self._templates[CONF_STATE_TEMPLATE].\
async_render_with_possible_json_value(payload)
if state == STATE_ON:
self._state = True
elif state == STATE_OFF:
self._state = False
else:
_LOGGER.warning("Invalid state value received")
if self._brightness is not None:
try:
self._brightness = int(
self._templates[CONF_BRIGHTNESS_TEMPLATE].
async_render_with_possible_json_value(payload)
)
except ValueError:
_LOGGER.warning("Invalid brightness value received")
if self._color_temp is not None:
try:
self._color_temp = int(
self._templates[CONF_COLOR_TEMP_TEMPLATE].
async_render_with_possible_json_value(payload)
)
except ValueError:
_LOGGER.warning("Invalid color temperature value received")
if self._hs is not None:
try:
red = int(
self._templates[CONF_RED_TEMPLATE].
async_render_with_possible_json_value(payload))
green = int(
self._templates[CONF_GREEN_TEMPLATE].
async_render_with_possible_json_value(payload))
blue = int(
self._templates[CONF_BLUE_TEMPLATE].
async_render_with_possible_json_value(payload))
self._hs = color_util.color_RGB_to_hs(red, green, blue)
except ValueError:
_LOGGER.warning("Invalid color value received")
if self._white_value is not None:
try:
self._white_value = int(
self._templates[CONF_WHITE_VALUE_TEMPLATE].
async_render_with_possible_json_value(payload)
)
except ValueError:
_LOGGER.warning('Invalid white value received')
if self._templates[CONF_EFFECT_TEMPLATE] is not None:
effect = self._templates[CONF_EFFECT_TEMPLATE].\
async_render_with_possible_json_value(payload)
if effect in self._config.get(CONF_EFFECT_LIST):
self._effect = effect
else:
_LOGGER.warning("Unsupported effect value received")
self.async_schedule_update_ha_state()
if self._topics[CONF_STATE_TOPIC] is not None:
self._sub_state = await subscription.async_subscribe_topics(
self.hass, self._sub_state,
{'state_topic': {'topic': self._topics[CONF_STATE_TOPIC],
'msg_callback': state_received,
'qos': self._config.get(CONF_QOS)}})
if self._optimistic and last_state:
self._state = last_state.state == STATE_ON
if last_state.attributes.get(ATTR_BRIGHTNESS):
self._brightness = last_state.attributes.get(ATTR_BRIGHTNESS)
if last_state.attributes.get(ATTR_HS_COLOR):
self._hs = last_state.attributes.get(ATTR_HS_COLOR)
if last_state.attributes.get(ATTR_COLOR_TEMP):
self._color_temp = last_state.attributes.get(ATTR_COLOR_TEMP)
if last_state.attributes.get(ATTR_EFFECT):
self._effect = last_state.attributes.get(ATTR_EFFECT)
if last_state.attributes.get(ATTR_WHITE_VALUE):
self._white_value = last_state.attributes.get(ATTR_WHITE_VALUE)
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._sub_state = await subscription.async_unsubscribe_topics(
self.hass, self._sub_state)
await MqttAttributes.async_will_remove_from_hass(self)
await MqttAvailability.async_will_remove_from_hass(self)
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def color_temp(self):
"""Return the color temperature in mired."""
return self._color_temp
@property
def hs_color(self):
"""Return the hs color value [int, int]."""
return self._hs
@property
def white_value(self):
"""Return the white property."""
return self._white_value
@property
def should_poll(self):
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
return False
@property
def name(self):
"""Return the name of the entity."""
return self._config.get(CONF_NAME)
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def is_on(self):
"""Return True if entity is on."""
return self._state
@property
def assumed_state(self):
"""Return True if unable to access real state of the entity."""
return self._optimistic
@property
def effect_list(self):
"""Return the list of supported effects."""
return self._config.get(CONF_EFFECT_LIST)
@property
def effect(self):
"""Return the current effect."""
return self._effect
async def async_turn_on(self, **kwargs):
"""Turn the entity on.
This method is a coroutine.
"""
values = {'state': True}
if self._optimistic:
self._state = True
if ATTR_BRIGHTNESS in kwargs:
values['brightness'] = int(kwargs[ATTR_BRIGHTNESS])
if self._optimistic:
self._brightness = kwargs[ATTR_BRIGHTNESS]
if ATTR_COLOR_TEMP in kwargs:
values['color_temp'] = int(kwargs[ATTR_COLOR_TEMP])
if self._optimistic:
self._color_temp = kwargs[ATTR_COLOR_TEMP]
if ATTR_HS_COLOR in kwargs:
hs_color = kwargs[ATTR_HS_COLOR]
# If there's a brightness topic set, we don't want to scale the RGB
# values given using the brightness.
if self._templates[CONF_BRIGHTNESS_TEMPLATE] is not None:
brightness = 255
else:
brightness = kwargs.get(
ATTR_BRIGHTNESS, self._brightness if self._brightness else
255)
rgb = color_util.color_hsv_to_RGB(
hs_color[0], hs_color[1], brightness / 255 * 100)
values['red'] = rgb[0]
values['green'] = rgb[1]
values['blue'] = rgb[2]
if self._optimistic:
self._hs = kwargs[ATTR_HS_COLOR]
if ATTR_WHITE_VALUE in kwargs:
values['white_value'] = int(kwargs[ATTR_WHITE_VALUE])
if self._optimistic:
self._white_value = kwargs[ATTR_WHITE_VALUE]
if ATTR_EFFECT in kwargs:
values['effect'] = kwargs.get(ATTR_EFFECT)
if ATTR_FLASH in kwargs:
values['flash'] = kwargs.get(ATTR_FLASH)
if ATTR_TRANSITION in kwargs:
values['transition'] = int(kwargs[ATTR_TRANSITION])
mqtt.async_publish(
self.hass, self._topics[CONF_COMMAND_TOPIC],
self._templates[CONF_COMMAND_ON_TEMPLATE].async_render(**values),
self._config.get(CONF_QOS), self._config.get(CONF_RETAIN)
)
if self._optimistic:
self.async_schedule_update_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the entity off.
This method is a coroutine.
"""
values = {'state': False}
if self._optimistic:
self._state = False
if ATTR_TRANSITION in kwargs:
values['transition'] = int(kwargs[ATTR_TRANSITION])
mqtt.async_publish(
self.hass, self._topics[CONF_COMMAND_TOPIC],
self._templates[CONF_COMMAND_OFF_TEMPLATE].async_render(**values),
self._config.get(CONF_QOS), self._config.get(CONF_RETAIN)
)
if self._optimistic:
self.async_schedule_update_ha_state()
@property
def supported_features(self):
"""Flag supported features."""
features = (SUPPORT_FLASH | SUPPORT_TRANSITION)
if self._brightness is not None:
features = features | SUPPORT_BRIGHTNESS
if self._hs is not None:
features = features | SUPPORT_COLOR
if self._config.get(CONF_EFFECT_LIST) is not None:
features = features | SUPPORT_EFFECT
if self._color_temp is not None:
features = features | SUPPORT_COLOR_TEMP
if self._white_value is not None:
features = features | SUPPORT_WHITE_VALUE
return features
| |
# pyOCD debugger
# Copyright (c) 2016-2020 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .provider import (TargetThread, ThreadProvider)
from .common import (read_c_string, HandlerModeThread, EXC_RETURN_EXT_FRAME_MASK)
from ..core import exceptions
from ..core.target import Target
from ..core.plugin import Plugin
from ..debug.context import DebugContext
from ..coresight.cortex_m_core_registers import index_for_reg
from ..trace import events
from ..trace.sink import TraceEventFilter
import logging
KERNEL_FLAGS_OFFSET = 0x1c
IS_RUNNING_MASK = 0x1
ALL_OBJECTS_THREADS_OFFSET = 0
THREAD_STACK_POINTER_OFFSET = 0
THREAD_EXTENDED_FRAME_OFFSET = 4
THREAD_NAME_OFFSET = 8
THREAD_STACK_BOTTOM_OFFSET = 12
THREAD_PRIORITY_OFFSET = 16
THREAD_STATE_OFFSET = 17
THREAD_CREATED_NODE_OFFSET = 36
LIST_NODE_NEXT_OFFSET = 0
LIST_NODE_OBJ_OFFSET= 8
# Create a logger for this module.
LOG = logging.getLogger(__name__)
class TargetList(object):
def __init__(self, context, ptr):
self._context = context
self._list = ptr
def __iter__(self):
next = 0
head = self._context.read32(self._list)
node = head
is_valid = head != 0
while is_valid and next != head:
try:
# Read the object from the node.
obj = self._context.read32(node + LIST_NODE_OBJ_OFFSET)
yield obj
next = self._context.read32(node + LIST_NODE_NEXT_OFFSET)
node = next
except exceptions.TransferError:
LOG.warning("TransferError while reading list elements (list=0x%08x, node=0x%08x), terminating list", self._list, node)
is_valid = False
class ArgonThreadContext(DebugContext):
"""! @brief Thread context for Argon."""
# SP is handled specially, so it is not in these dicts.
CORE_REGISTER_OFFSETS = {
# Software stacked
4: 0, # r4
5: 4, # r5
6: 8, # r6
7: 12, # r7
8: 16, # r8
9: 20, # r9
10: 24, # r10
11: 28, # r11
# Hardware stacked
0: 32, # r0
1: 36, # r1
2: 40, # r2
3: 44, # r3
12: 48, # r12
14: 52, # lr
15: 56, # pc
16: 60, # xpsr
}
FPU_EXTENDED_REGISTER_OFFSETS = {
# Software stacked
4: 0, # r4
5: 4, # r5
6: 8, # r6
7: 12, # r7
8: 16, # r8
9: 20, # r9
10: 24, # r10
11: 28, # r11
0x50: 32, # s16
0x51: 36, # s17
0x52: 40, # s18
0x53: 44, # s19
0x54: 48, # s20
0x55: 52, # s21
0x56: 56, # s22
0x57: 60, # s23
0x58: 64, # s24
0x59: 68, # s25
0x5a: 72, # s26
0x5b: 76, # s27
0x5c: 80, # s28
0x5d: 84, # s29
0x5e: 88, # s30
0x5f: 92, # s31
# Hardware stacked
0: 96, # r0
1: 100, # r1
2: 104, # r2
3: 108, # r3
12: 112, # r12
14: 116, # lr
15: 120, # pc
16: 124, # xpsr
0x40: 128, # s0
0x41: 132, # s1
0x42: 136, # s2
0x43: 140, # s3
0x44: 144, # s4
0x45: 148, # s5
0x46: 152, # s6
0x47: 156, # s7
0x48: 160, # s8
0x49: 164, # s9
0x4a: 168, # s10
0x4b: 172, # s11
0x4c: 176, # s12
0x4d: 180, # s13
0x4e: 184, # s14
0x4f: 188, # s15
33: 192, # fpscr
# (reserved word: 196)
}
def __init__(self, parent, thread):
super(ArgonThreadContext, self).__init__(parent)
self._thread = thread
self._has_fpu = self.core.has_fpu
def read_core_registers_raw(self, reg_list):
reg_list = [index_for_reg(reg) for reg in reg_list]
reg_vals = []
isCurrent = self._thread.is_current
inException = isCurrent and self._parent.read_core_register('ipsr') > 0
# If this is the current thread and we're not in an exception, just read the live registers.
if isCurrent and not inException:
return self._parent.read_core_registers_raw(reg_list)
# Because of above tests, from now on, inException implies isCurrent;
# we are generating the thread view for the RTOS thread where the
# exception occurred; the actual Handler Mode thread view is produced
# by HandlerModeThread
if inException:
# Reasonable to assume PSP is still valid
sp = self._parent.read_core_register('psp')
else:
sp = self._thread.get_stack_pointer()
# Determine which register offset table to use and the offsets past the saved state.
hwStacked = 0x20
swStacked = 0x20
table = self.CORE_REGISTER_OFFSETS
if self._has_fpu:
if inException and self.core.is_vector_catch():
# Vector catch has just occurred, take live LR
exceptionLR = self._parent.read_core_register('lr')
# Check bit 4 of the exception LR to determine if FPU registers were stacked.
hasExtendedFrame = (exceptionLR & EXC_RETURN_EXT_FRAME_MASK) == 0
else:
# Can't really rely on finding live LR after initial
# vector catch, so retrieve LR stored by OS on last
# thread switch.
hasExtendedFrame = self._thread.has_extended_frame
if hasExtendedFrame:
table = self.FPU_EXTENDED_REGISTER_OFFSETS
hwStacked = 0x68
swStacked = 0x60
for reg in reg_list:
# Must handle stack pointer specially.
if reg == 13:
if inException:
reg_vals.append(sp + hwStacked)
else:
reg_vals.append(sp + swStacked + hwStacked)
continue
# Look up offset for this register on the stack.
spOffset = table.get(reg, None)
if spOffset is None:
reg_vals.append(self._parent.read_core_register_raw(reg))
continue
if inException:
spOffset -= swStacked
try:
if spOffset >= 0:
reg_vals.append(self._parent.read32(sp + spOffset))
else:
# Not available - try live one
reg_vals.append(self._parent.read_core_register_raw(reg))
except exceptions.TransferError:
reg_vals.append(0)
return reg_vals
class ArgonThread(TargetThread):
"""! @brief Base class representing a thread on the target."""
UNKNOWN = 0
SUSPENDED = 1
READY = 2
RUNNING = 3
BLOCKED = 4
SLEEPING = 5
DONE = 6
STATE_NAMES = {
UNKNOWN : "Unknown",
SUSPENDED : "Suspended",
READY : "Ready",
RUNNING : "Running",
BLOCKED : "Blocked",
SLEEPING : "Sleeping",
DONE : "Done",
}
def __init__(self, targetContext, provider, base):
super(ArgonThread, self).__init__()
self._target_context = targetContext
self._provider = provider
self._base = base
self._thread_context = ArgonThreadContext(self._target_context, self)
self._has_fpu = self._thread_context.core.has_fpu
self._priority = 0
self._state = self.UNKNOWN
self._name = "?"
try:
self.update_info()
ptr = self._target_context.read32(self._base + THREAD_NAME_OFFSET)
self._name = read_c_string(self._target_context, ptr)
LOG.debug("Thread@%x name=%x '%s'", self._base, ptr, self._name)
except exceptions.TransferError:
LOG.debug("Transfer error while reading thread info")
def get_stack_pointer(self):
# Get stack pointer saved in thread struct.
try:
return self._target_context.read32(self._base + THREAD_STACK_POINTER_OFFSET)
except exceptions.TransferError:
LOG.debug("Transfer error while reading thread's stack pointer @ 0x%08x", self._base + THREAD_STACK_POINTER_OFFSET)
return 0
def update_info(self):
try:
self._priority = self._target_context.read8(self._base + THREAD_PRIORITY_OFFSET)
self._state = self._target_context.read8(self._base + THREAD_STATE_OFFSET)
if self._state > self.DONE:
self._state = self.UNKNOWN
except exceptions.TransferError:
LOG.debug("Transfer error while reading thread info")
@property
def state(self):
return self._state
@property
def priority(self):
return self._priority
@property
def unique_id(self):
return self._base
@property
def name(self):
return self._name
@property
def description(self):
return "%s; Priority %d" % (self.STATE_NAMES[self.state], self.priority)
@property
def is_current(self):
return self._provider.get_actual_current_thread_id() == self.unique_id
@property
def context(self):
return self._thread_context
@property
def has_extended_frame(self):
if not self._has_fpu:
return False
try:
flag = self._target_context.read8(self._base + THREAD_EXTENDED_FRAME_OFFSET)
return flag != 0
except exceptions.TransferError:
LOG.debug("Transfer error while reading thread's extended frame flag @ 0x%08x", self._base + THREAD_EXTENDED_FRAME_OFFSET)
return False
def __str__(self):
return "<ArgonThread@0x%08x id=%x name=%s>" % (id(self), self.unique_id, self.name)
def __repr__(self):
return str(self)
class ArgonThreadProvider(ThreadProvider):
"""! @brief Base class for RTOS support plugins."""
def __init__(self, target):
super(ArgonThreadProvider, self).__init__(target)
self.g_ar = None
self.g_ar_objects = None
self._all_threads = None
self._threads = {}
def init(self, symbolProvider):
self.g_ar = symbolProvider.get_symbol_value("g_ar")
if self.g_ar is None:
return False
LOG.debug("Argon: g_ar = 0x%08x", self.g_ar)
self.g_ar_objects = symbolProvider.get_symbol_value("g_ar_objects")
if self.g_ar_objects is None:
return False
LOG.debug("Argon: g_ar_objects = 0x%08x", self.g_ar_objects)
self._all_threads = self.g_ar_objects + ALL_OBJECTS_THREADS_OFFSET
self._target.session.subscribe(self.event_handler, Target.Event.POST_FLASH_PROGRAM)
self._target.session.subscribe(self.event_handler, Target.Event.POST_RESET)
return True
def invalidate(self):
self._threads = {}
def event_handler(self, notification):
# Invalidate threads list if flash is reprogrammed.
LOG.debug("Argon: invalidating threads list: %s" % (repr(notification)))
self.invalidate();
def _build_thread_list(self):
allThreads = TargetList(self._target_context, self._all_threads)
newThreads = {}
for threadBase in allThreads:
try:
# Reuse existing thread objects if possible.
if threadBase in self._threads:
t = self._threads[threadBase]
# Ask the thread object to update its state and priority.
t.update_info()
else:
t = ArgonThread(self._target_context, self, threadBase)
LOG.debug("Thread 0x%08x (%s)", threadBase, t.name)
newThreads[t.unique_id] = t
except exceptions.TransferError:
LOG.debug("TransferError while examining thread 0x%08x", threadBase)
# Create fake handler mode thread.
if self._target_context.read_core_register('ipsr') > 0:
LOG.debug("creating handler mode thread")
t = HandlerModeThread(self._target_context, self)
newThreads[t.unique_id] = t
self._threads = newThreads
def get_threads(self):
if not self.is_enabled:
return []
self.update_threads()
return list(self._threads.values())
def get_thread(self, threadId):
if not self.is_enabled:
return None
self.update_threads()
return self._threads.get(threadId, None)
@property
def is_enabled(self):
return self.g_ar is not None and self.get_is_running()
@property
def current_thread(self):
if not self.is_enabled:
return None
self.update_threads()
id = self.get_current_thread_id()
try:
return self._threads[id]
except KeyError:
LOG.debug("key error getting current thread id=%s; self._threads = %s",
("%x" % id) if (id is not None) else id, repr(self._threads))
return None
def is_valid_thread_id(self, threadId):
if not self.is_enabled:
return False
self.update_threads()
return threadId in self._threads
def get_current_thread_id(self):
if not self.is_enabled:
return None
if self._target_context.read_core_register('ipsr') > 0:
return HandlerModeThread.UNIQUE_ID
return self.get_actual_current_thread_id()
def get_actual_current_thread_id(self):
if not self.is_enabled:
return None
return self._target_context.read32(self.g_ar)
def get_is_running(self):
if self.g_ar is None:
return False
flags = self._target_context.read32(self.g_ar + KERNEL_FLAGS_OFFSET)
return (flags & IS_RUNNING_MASK) != 0
class ArgonTraceEvent(events.TraceEvent):
"""! @brief Argon kernel trace event."""
kArTraceThreadSwitch = 1 # 2 value: 0=previous thread's new state, 1=new thread id
kArTraceThreadCreated = 2 # 1 value
kArTraceThreadDeleted = 3 # 1 value
def __init__(self, eventID, threadID, name, state, ts=0):
super(ArgonTraceEvent, self).__init__("argon", ts)
self._event_id = eventID
self._thread_id = threadID
self._thread_name = name
self._prev_thread_state = state
@property
def event_id(self):
return self._event_id
@property
def thread_id(self):
return self._thread_id
@property
def thread_name(self):
return self._thread_name
@property
def prev_thread_state(self):
return self._prev_thread_state
def __str__(self):
if self.event_id == ArgonTraceEvent.kArTraceThreadSwitch:
stateName = ArgonThread.STATE_NAMES.get(self.prev_thread_state, "<invalid state>")
desc = "New thread = {}; old thread state = {}".format(self.thread_name, stateName)
elif self.event_id == ArgonTraceEvent.kArTraceThreadCreated:
desc = "Created thread {}".format(self.thread_id)
elif self.event_id == ArgonTraceEvent.kArTraceThreadDeleted:
desc = "Deleted thread {}".format(self.thread_id)
else:
desc = "Unknown kernel event #{}".format(self.event_id)
return "[{}] Argon: {}".format(self.timestamp, desc)
class ArgonTraceEventFilter(TraceEventFilter):
"""! @brief Trace event filter to identify Argon kernel trace events sent via ITM.
As Argon kernel trace events are identified, the ITM trace events are replaced with instances
of ArgonTraceEvent.
"""
def __init__(self, threads):
super(ArgonTraceEventFilter, self).__init__()
self._threads = threads
self._is_thread_event_pending = False
self._pending_event = None
def filter(self, event):
if isinstance(event, events.TraceITMEvent):
if event.port == 31:
eventID = event.data >> 24
if eventID in (ArgonTraceEvent.kArTraceThreadSwitch, ArgonTraceEvent.kArTraceThreadCreated, ArgonTraceEvent.kArTraceThreadDeleted):
self._is_thread_event_pending = True
self._pending_event = event
# Swallow the event.
return
elif event.port == 30 and self._is_thread_event_pending:
eventID = self._pending_event.data >> 24
threadID = event.data
name = self._threads.get(threadID, "<unknown thread>")
state = self._pending_event.data & 0x00ffffff
# Create the Argon event.
event = ArgonTraceEvent(eventID, threadID, name, state, self._pending_event.timestamp)
self._is_thread_event_pending = False
self._pending_event = None
return event
class ArgonPlugin(Plugin):
"""! @brief Plugin class for the Argon RTOS."""
def load(self):
return ArgonThreadProvider
@property
def name(self):
return "argon"
@property
def description(self):
return "Argon RTOS"
| |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NUAllGateway(NURESTObject):
""" Represents a AllGateway in the VSD
Notes:
A read-only API to get all gateway objects in the VSD environment. Use the ID field to then actually manage the gateway using the gateway API entity.
"""
__rest_name__ = "allgateway"
__resource_name__ = "allgateways"
## Constants
CONST_FAMILY_NSG_C = "NSG_C"
CONST_PERMITTED_ACTION_ALL = "ALL"
CONST_FAMILY_NSG_E = "NSG_E"
CONST_PERSONALITY_EVDF = "EVDF"
CONST_PERSONALITY_NUAGE_210_WBX_32_Q = "NUAGE_210_WBX_32_Q"
CONST_ZFB_MATCH_ATTRIBUTE_MAC_ADDRESS = "MAC_ADDRESS"
CONST_PERSONALITY_NSGDUC = "NSGDUC"
CONST_FAMILY_NSG_V = "NSG_V"
CONST_BOOTSTRAP_STATUS_ACTIVE = "ACTIVE"
CONST_FAMILY_NSG_X = "NSG_X"
CONST_FAMILY_NSG_DOCKER = "NSG_DOCKER"
CONST_ZFB_MATCH_ATTRIBUTE_IP_ADDRESS = "IP_ADDRESS"
CONST_FAMILY_VRS = "VRS"
CONST_FAMILY_NSG_E200 = "NSG_E200"
CONST_BOOTSTRAP_STATUS_NOTIFICATION_APP_REQ_SENT = "NOTIFICATION_APP_REQ_SENT"
CONST_PERSONALITY_EVDFB = "EVDFB"
CONST_ZFB_MATCH_ATTRIBUTE_NSGATEWAY_ID = "NSGATEWAY_ID"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_PERSONALITY_OTHER = "OTHER"
CONST_ZFB_MATCH_ATTRIBUTE_HOSTNAME = "HOSTNAME"
CONST_PERSONALITY_VDFG = "VDFG"
CONST_BOOTSTRAP_STATUS_NOTIFICATION_APP_REQ_ACK = "NOTIFICATION_APP_REQ_ACK"
CONST_PERSONALITY_NSG = "NSG"
CONST_PERMITTED_ACTION_EXTEND = "EXTEND"
CONST_PERMITTED_ACTION_INSTANTIATE = "INSTANTIATE"
CONST_PERSONALITY_DC7X50 = "DC7X50"
CONST_BOOTSTRAP_STATUS_CERTIFICATE_SIGNED = "CERTIFICATE_SIGNED"
CONST_FAMILY_NSG_AZ = "NSG_AZ"
CONST_FAMILY_ANY = "ANY"
CONST_ZFB_MATCH_ATTRIBUTE_NONE = "NONE"
CONST_PERSONALITY_VSA = "VSA"
CONST_PERSONALITY_VSG = "VSG"
CONST_PERMITTED_ACTION_READ = "READ"
CONST_PERSONALITY_VRSB = "VRSB"
CONST_PERMITTED_ACTION_USE = "USE"
CONST_PERSONALITY_NETCONF_7X50 = "NETCONF_7X50"
CONST_PERSONALITY_NUAGE_210_WBX_48_S = "NUAGE_210_WBX_48_S"
CONST_FAMILY_NSG_X200 = "NSG_X200"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_FAMILY_NSG_E300 = "NSG_E300"
CONST_PERSONALITY_VRSG = "VRSG"
CONST_ZFB_MATCH_ATTRIBUTE_SERIAL_NUMBER = "SERIAL_NUMBER"
CONST_ZFB_MATCH_ATTRIBUTE_UUID = "UUID"
CONST_PERSONALITY_HARDWARE_VTEP = "HARDWARE_VTEP"
CONST_PERSONALITY_NETCONF_THIRDPARTY_HW_VTEP = "NETCONF_THIRDPARTY_HW_VTEP"
CONST_FAMILY_NSG_AMI = "NSG_AMI"
CONST_PERMITTED_ACTION_DEPLOY = "DEPLOY"
CONST_BOOTSTRAP_STATUS_INACTIVE = "INACTIVE"
CONST_PERSONALITY_NSGBR = "NSGBR"
def __init__(self, **kwargs):
""" Initializes a AllGateway instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> allgateway = NUAllGateway(id=u'xxxx-xxx-xxx-xxx', name=u'AllGateway')
>>> allgateway = NUAllGateway(data=my_dict)
"""
super(NUAllGateway, self).__init__()
# Read/Write Attributes
self._mac_address = None
self._zfb_match_attribute = None
self._zfb_match_value = None
self._bios_release_date = None
self._bios_version = None
self._cpu_type = None
self._uuid = None
self._name = None
self._family = None
self._management_id = None
self._last_updated_by = None
self._datapath_id = None
self._patches = None
self._gateway_connected = None
self._gateway_version = None
self._redundancy_group_id = None
self._peer = None
self._template_id = None
self._pending = None
self._serial_number = None
self._permitted_action = None
self._personality = None
self._description = None
self._libraries = None
self._enterprise_id = None
self._entity_scope = None
self._location_id = None
self._bootstrap_id = None
self._bootstrap_status = None
self._product_name = None
self._use_gateway_vlanvnid = None
self._associated_gateway_security_id = None
self._associated_gateway_security_profile_id = None
self._associated_nsg_info_id = None
self._associated_netconf_profile_id = None
self._vtep = None
self._auto_disc_gateway_id = None
self._external_id = None
self._system_id = None
self.expose_attribute(local_name="mac_address", remote_name="MACAddress", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="zfb_match_attribute", remote_name="ZFBMatchAttribute", attribute_type=str, is_required=False, is_unique=False, choices=[u'HOSTNAME', u'IP_ADDRESS', u'MAC_ADDRESS', u'NONE', u'NSGATEWAY_ID', u'SERIAL_NUMBER', u'UUID'])
self.expose_attribute(local_name="zfb_match_value", remote_name="ZFBMatchValue", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="bios_release_date", remote_name="BIOSReleaseDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="bios_version", remote_name="BIOSVersion", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="cpu_type", remote_name="CPUType", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="uuid", remote_name="UUID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="family", remote_name="family", attribute_type=str, is_required=False, is_unique=False, choices=[u'ANY', u'NSG_AMI', u'NSG_AZ', u'NSG_C', u'NSG_DOCKER', u'NSG_E', u'NSG_E200', u'NSG_E300', u'NSG_V', u'NSG_X', u'NSG_X200', u'VRS'])
self.expose_attribute(local_name="management_id", remote_name="managementID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="datapath_id", remote_name="datapathID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="patches", remote_name="patches", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="gateway_connected", remote_name="gatewayConnected", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="gateway_version", remote_name="gatewayVersion", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="redundancy_group_id", remote_name="redundancyGroupID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="peer", remote_name="peer", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="template_id", remote_name="templateID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="pending", remote_name="pending", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="serial_number", remote_name="serialNumber", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="permitted_action", remote_name="permittedAction", attribute_type=str, is_required=False, is_unique=False, choices=[u'ALL', u'DEPLOY', u'EXTEND', u'INSTANTIATE', u'READ', u'USE'])
self.expose_attribute(local_name="personality", remote_name="personality", attribute_type=str, is_required=False, is_unique=False, choices=[u'DC7X50', u'EVDF', u'EVDFB', u'HARDWARE_VTEP', u'NETCONF_7X50', u'NETCONF_THIRDPARTY_HW_VTEP', u'NSG', u'NSGBR', u'NSGDUC', u'NUAGE_210_WBX_32_Q', u'NUAGE_210_WBX_48_S', u'OTHER', u'VDFG', u'VRSB', u'VRSG', u'VSA', u'VSG'])
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="libraries", remote_name="libraries", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="enterprise_id", remote_name="enterpriseID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="location_id", remote_name="locationID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="bootstrap_id", remote_name="bootstrapID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="bootstrap_status", remote_name="bootstrapStatus", attribute_type=str, is_required=False, is_unique=False, choices=[u'ACTIVE', u'CERTIFICATE_SIGNED', u'INACTIVE', u'NOTIFICATION_APP_REQ_ACK', u'NOTIFICATION_APP_REQ_SENT'])
self.expose_attribute(local_name="product_name", remote_name="productName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="use_gateway_vlanvnid", remote_name="useGatewayVLANVNID", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_gateway_security_id", remote_name="associatedGatewaySecurityID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_gateway_security_profile_id", remote_name="associatedGatewaySecurityProfileID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_nsg_info_id", remote_name="associatedNSGInfoID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_netconf_profile_id", remote_name="associatedNetconfProfileID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="vtep", remote_name="vtep", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="auto_disc_gateway_id", remote_name="autoDiscGatewayID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
self.expose_attribute(local_name="system_id", remote_name="systemID", attribute_type=str, is_required=False, is_unique=False)
# Fetchers
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def mac_address(self):
""" Get mac_address value.
Notes:
MAC Address of the first interface
This attribute is named `MACAddress` in VSD API.
"""
return self._mac_address
@mac_address.setter
def mac_address(self, value):
""" Set mac_address value.
Notes:
MAC Address of the first interface
This attribute is named `MACAddress` in VSD API.
"""
self._mac_address = value
@property
def zfb_match_attribute(self):
""" Get zfb_match_attribute value.
Notes:
The Zero Factor Bootstrapping (ZFB) Attribute that should be used to match the gateway on when it tries to bootstrap.
This attribute is named `ZFBMatchAttribute` in VSD API.
"""
return self._zfb_match_attribute
@zfb_match_attribute.setter
def zfb_match_attribute(self, value):
""" Set zfb_match_attribute value.
Notes:
The Zero Factor Bootstrapping (ZFB) Attribute that should be used to match the gateway on when it tries to bootstrap.
This attribute is named `ZFBMatchAttribute` in VSD API.
"""
self._zfb_match_attribute = value
@property
def zfb_match_value(self):
""" Get zfb_match_value value.
Notes:
The Zero Factor Bootstrapping (ZFB) value that needs to match with the gateway during the bootstrap attempt. This value needs to match with the ZFB Match Attribute.
This attribute is named `ZFBMatchValue` in VSD API.
"""
return self._zfb_match_value
@zfb_match_value.setter
def zfb_match_value(self, value):
""" Set zfb_match_value value.
Notes:
The Zero Factor Bootstrapping (ZFB) value that needs to match with the gateway during the bootstrap attempt. This value needs to match with the ZFB Match Attribute.
This attribute is named `ZFBMatchValue` in VSD API.
"""
self._zfb_match_value = value
@property
def bios_release_date(self):
""" Get bios_release_date value.
Notes:
Release Date of the BIOS. The format can vary based on the manufacturer but normally includes year/month/day or year/week details (eg. 01/01/2011 or 2018/06/15 or 2018/22)
This attribute is named `BIOSReleaseDate` in VSD API.
"""
return self._bios_release_date
@bios_release_date.setter
def bios_release_date(self, value):
""" Set bios_release_date value.
Notes:
Release Date of the BIOS. The format can vary based on the manufacturer but normally includes year/month/day or year/week details (eg. 01/01/2011 or 2018/06/15 or 2018/22)
This attribute is named `BIOSReleaseDate` in VSD API.
"""
self._bios_release_date = value
@property
def bios_version(self):
""" Get bios_version value.
Notes:
BIOS Version (eg. 0.5.1)
This attribute is named `BIOSVersion` in VSD API.
"""
return self._bios_version
@bios_version.setter
def bios_version(self, value):
""" Set bios_version value.
Notes:
BIOS Version (eg. 0.5.1)
This attribute is named `BIOSVersion` in VSD API.
"""
self._bios_version = value
@property
def cpu_type(self):
""" Get cpu_type value.
Notes:
The Processor Type as reported during bootstrapping.
This attribute is named `CPUType` in VSD API.
"""
return self._cpu_type
@cpu_type.setter
def cpu_type(self, value):
""" Set cpu_type value.
Notes:
The Processor Type as reported during bootstrapping.
This attribute is named `CPUType` in VSD API.
"""
self._cpu_type = value
@property
def uuid(self):
""" Get uuid value.
Notes:
UUID of the device
This attribute is named `UUID` in VSD API.
"""
return self._uuid
@uuid.setter
def uuid(self, value):
""" Set uuid value.
Notes:
UUID of the device
This attribute is named `UUID` in VSD API.
"""
self._uuid = value
@property
def name(self):
""" Get name value.
Notes:
Name of the Gateway
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
Name of the Gateway
"""
self._name = value
@property
def family(self):
""" Get family value.
Notes:
The family type of the gateway based on common characteristics with other members of a particular variation of an NSG hardware or of a virtual deployment.
"""
return self._family
@family.setter
def family(self, value):
""" Set family value.
Notes:
The family type of the gateway based on common characteristics with other members of a particular variation of an NSG hardware or of a virtual deployment.
"""
self._family = value
@property
def management_id(self):
""" Get management_id value.
Notes:
The identifier of this gateway's management interface.
This attribute is named `managementID` in VSD API.
"""
return self._management_id
@management_id.setter
def management_id(self, value):
""" Set management_id value.
Notes:
The identifier of this gateway's management interface.
This attribute is named `managementID` in VSD API.
"""
self._management_id = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def datapath_id(self):
""" Get datapath_id value.
Notes:
Identifier of the Gateway, based on the systemID which is generated when the instance is created in VSD.
This attribute is named `datapathID` in VSD API.
"""
return self._datapath_id
@datapath_id.setter
def datapath_id(self, value):
""" Set datapath_id value.
Notes:
Identifier of the Gateway, based on the systemID which is generated when the instance is created in VSD.
This attribute is named `datapathID` in VSD API.
"""
self._datapath_id = value
@property
def patches(self):
""" Get patches value.
Notes:
Patches that have been installed on the NSG
"""
return self._patches
@patches.setter
def patches(self, value):
""" Set patches value.
Notes:
Patches that have been installed on the NSG
"""
self._patches = value
@property
def gateway_connected(self):
""" Get gateway_connected value.
Notes:
A boolean flag indicating the status of the gateway.
This attribute is named `gatewayConnected` in VSD API.
"""
return self._gateway_connected
@gateway_connected.setter
def gateway_connected(self, value):
""" Set gateway_connected value.
Notes:
A boolean flag indicating the status of the gateway.
This attribute is named `gatewayConnected` in VSD API.
"""
self._gateway_connected = value
@property
def gateway_version(self):
""" Get gateway_version value.
Notes:
The Gateway Software Version as reported during bootstrapping.
This attribute is named `gatewayVersion` in VSD API.
"""
return self._gateway_version
@gateway_version.setter
def gateway_version(self, value):
""" Set gateway_version value.
Notes:
The Gateway Software Version as reported during bootstrapping.
This attribute is named `gatewayVersion` in VSD API.
"""
self._gateway_version = value
@property
def redundancy_group_id(self):
""" Get redundancy_group_id value.
Notes:
The Redundancy Gateway Group associated with this Gateway Instance. This is a read only attribute
This attribute is named `redundancyGroupID` in VSD API.
"""
return self._redundancy_group_id
@redundancy_group_id.setter
def redundancy_group_id(self, value):
""" Set redundancy_group_id value.
Notes:
The Redundancy Gateway Group associated with this Gateway Instance. This is a read only attribute
This attribute is named `redundancyGroupID` in VSD API.
"""
self._redundancy_group_id = value
@property
def peer(self):
""" Get peer value.
Notes:
The System ID of the peer gateway associated with this Gateway instance when it is discovered by the network manager (VSD) as being redundant.
"""
return self._peer
@peer.setter
def peer(self, value):
""" Set peer value.
Notes:
The System ID of the peer gateway associated with this Gateway instance when it is discovered by the network manager (VSD) as being redundant.
"""
self._peer = value
@property
def template_id(self):
""" Get template_id value.
Notes:
The ID of the template that this Gateway was created from. This should be set when instantiating a Gateway
This attribute is named `templateID` in VSD API.
"""
return self._template_id
@template_id.setter
def template_id(self, value):
""" Set template_id value.
Notes:
The ID of the template that this Gateway was created from. This should be set when instantiating a Gateway
This attribute is named `templateID` in VSD API.
"""
self._template_id = value
@property
def pending(self):
""" Get pending value.
Notes:
Indicates that this gateway is pending state or state. When in pending state it cannot be modified from REST.
"""
return self._pending
@pending.setter
def pending(self, value):
""" Set pending value.
Notes:
Indicates that this gateway is pending state or state. When in pending state it cannot be modified from REST.
"""
self._pending = value
@property
def serial_number(self):
""" Get serial_number value.
Notes:
The device's serial number
This attribute is named `serialNumber` in VSD API.
"""
return self._serial_number
@serial_number.setter
def serial_number(self, value):
""" Set serial_number value.
Notes:
The device's serial number
This attribute is named `serialNumber` in VSD API.
"""
self._serial_number = value
@property
def permitted_action(self):
""" Get permitted_action value.
Notes:
The permitted action to USE/EXTEND this Gateway.
This attribute is named `permittedAction` in VSD API.
"""
return self._permitted_action
@permitted_action.setter
def permitted_action(self, value):
""" Set permitted_action value.
Notes:
The permitted action to USE/EXTEND this Gateway.
This attribute is named `permittedAction` in VSD API.
"""
self._permitted_action = value
@property
def personality(self):
""" Get personality value.
Notes:
Personality of the Gateway, cannot be changed after creation.
"""
return self._personality
@personality.setter
def personality(self, value):
""" Set personality value.
Notes:
Personality of the Gateway, cannot be changed after creation.
"""
self._personality = value
@property
def description(self):
""" Get description value.
Notes:
A description of the Gateway
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
A description of the Gateway
"""
self._description = value
@property
def libraries(self):
""" Get libraries value.
Notes:
Versions of monitored libraries currently installed on the Gateway.
"""
return self._libraries
@libraries.setter
def libraries(self, value):
""" Set libraries value.
Notes:
Versions of monitored libraries currently installed on the Gateway.
"""
self._libraries = value
@property
def enterprise_id(self):
""" Get enterprise_id value.
Notes:
The enterprise associated with this Gateway. This is a read only attribute
This attribute is named `enterpriseID` in VSD API.
"""
return self._enterprise_id
@enterprise_id.setter
def enterprise_id(self, value):
""" Set enterprise_id value.
Notes:
The enterprise associated with this Gateway. This is a read only attribute
This attribute is named `enterpriseID` in VSD API.
"""
self._enterprise_id = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def location_id(self):
""" Get location_id value.
Notes:
Association to an object which contains location information about this gateway instance.
This attribute is named `locationID` in VSD API.
"""
return self._location_id
@location_id.setter
def location_id(self, value):
""" Set location_id value.
Notes:
Association to an object which contains location information about this gateway instance.
This attribute is named `locationID` in VSD API.
"""
self._location_id = value
@property
def bootstrap_id(self):
""" Get bootstrap_id value.
Notes:
The bootstrap details associated with this Gateway. NOTE: This is a read only property, it can only be set during creation of a gateway.
This attribute is named `bootstrapID` in VSD API.
"""
return self._bootstrap_id
@bootstrap_id.setter
def bootstrap_id(self, value):
""" Set bootstrap_id value.
Notes:
The bootstrap details associated with this Gateway. NOTE: This is a read only property, it can only be set during creation of a gateway.
This attribute is named `bootstrapID` in VSD API.
"""
self._bootstrap_id = value
@property
def bootstrap_status(self):
""" Get bootstrap_status value.
Notes:
The bootstrap status of this Gateway. NOTE: This is a read only property.
This attribute is named `bootstrapStatus` in VSD API.
"""
return self._bootstrap_status
@bootstrap_status.setter
def bootstrap_status(self, value):
""" Set bootstrap_status value.
Notes:
The bootstrap status of this Gateway. NOTE: This is a read only property.
This attribute is named `bootstrapStatus` in VSD API.
"""
self._bootstrap_status = value
@property
def product_name(self):
""" Get product_name value.
Notes:
Product Name as reported during bootstrapping.
This attribute is named `productName` in VSD API.
"""
return self._product_name
@product_name.setter
def product_name(self, value):
""" Set product_name value.
Notes:
Product Name as reported during bootstrapping.
This attribute is named `productName` in VSD API.
"""
self._product_name = value
@property
def use_gateway_vlanvnid(self):
""" Get use_gateway_vlanvnid value.
Notes:
When set, VLAN-VNID mapping must be unique for all the vports of the gateway
This attribute is named `useGatewayVLANVNID` in VSD API.
"""
return self._use_gateway_vlanvnid
@use_gateway_vlanvnid.setter
def use_gateway_vlanvnid(self, value):
""" Set use_gateway_vlanvnid value.
Notes:
When set, VLAN-VNID mapping must be unique for all the vports of the gateway
This attribute is named `useGatewayVLANVNID` in VSD API.
"""
self._use_gateway_vlanvnid = value
@property
def associated_gateway_security_id(self):
""" Get associated_gateway_security_id value.
Notes:
Read only ID of the associated gateway security object.
This attribute is named `associatedGatewaySecurityID` in VSD API.
"""
return self._associated_gateway_security_id
@associated_gateway_security_id.setter
def associated_gateway_security_id(self, value):
""" Set associated_gateway_security_id value.
Notes:
Read only ID of the associated gateway security object.
This attribute is named `associatedGatewaySecurityID` in VSD API.
"""
self._associated_gateway_security_id = value
@property
def associated_gateway_security_profile_id(self):
""" Get associated_gateway_security_profile_id value.
Notes:
Readonly Id of the associated gateway security profile object
This attribute is named `associatedGatewaySecurityProfileID` in VSD API.
"""
return self._associated_gateway_security_profile_id
@associated_gateway_security_profile_id.setter
def associated_gateway_security_profile_id(self, value):
""" Set associated_gateway_security_profile_id value.
Notes:
Readonly Id of the associated gateway security profile object
This attribute is named `associatedGatewaySecurityProfileID` in VSD API.
"""
self._associated_gateway_security_profile_id = value
@property
def associated_nsg_info_id(self):
""" Get associated_nsg_info_id value.
Notes:
Read only ID of the associated gateway information object
This attribute is named `associatedNSGInfoID` in VSD API.
"""
return self._associated_nsg_info_id
@associated_nsg_info_id.setter
def associated_nsg_info_id(self, value):
""" Set associated_nsg_info_id value.
Notes:
Read only ID of the associated gateway information object
This attribute is named `associatedNSGInfoID` in VSD API.
"""
self._associated_nsg_info_id = value
@property
def associated_netconf_profile_id(self):
""" Get associated_netconf_profile_id value.
Notes:
UUID of the Netconf Profile associated to this gateway.
This attribute is named `associatedNetconfProfileID` in VSD API.
"""
return self._associated_netconf_profile_id
@associated_netconf_profile_id.setter
def associated_netconf_profile_id(self, value):
""" Set associated_netconf_profile_id value.
Notes:
UUID of the Netconf Profile associated to this gateway.
This attribute is named `associatedNetconfProfileID` in VSD API.
"""
self._associated_netconf_profile_id = value
@property
def vtep(self):
""" Get vtep value.
Notes:
Represent the system ID or the Virtual IP of a service used by a Gateway (VSG for now) to establish a tunnel with a remote VSG or hypervisor. The format of this field is consistent with an IP address.
"""
return self._vtep
@vtep.setter
def vtep(self, value):
""" Set vtep value.
Notes:
Represent the system ID or the Virtual IP of a service used by a Gateway (VSG for now) to establish a tunnel with a remote VSG or hypervisor. The format of this field is consistent with an IP address.
"""
self._vtep = value
@property
def auto_disc_gateway_id(self):
""" Get auto_disc_gateway_id value.
Notes:
The Auto Discovered Gateway associated with this Gateway Instance
This attribute is named `autoDiscGatewayID` in VSD API.
"""
return self._auto_disc_gateway_id
@auto_disc_gateway_id.setter
def auto_disc_gateway_id(self, value):
""" Set auto_disc_gateway_id value.
Notes:
The Auto Discovered Gateway associated with this Gateway Instance
This attribute is named `autoDiscGatewayID` in VSD API.
"""
self._auto_disc_gateway_id = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
@property
def system_id(self):
""" Get system_id value.
Notes:
Identifier of the Gateway, cannot be modified after creation
This attribute is named `systemID` in VSD API.
"""
return self._system_id
@system_id.setter
def system_id(self, value):
""" Set system_id value.
Notes:
Identifier of the Gateway, cannot be modified after creation
This attribute is named `systemID` in VSD API.
"""
self._system_id = value
| |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
RavenFramework is a tool to test raven inputs.
"""
from __future__ import absolute_import
import os
import subprocess
import sys
import platform
from Tester import Tester
import OrderedCSVDiffer
import UnorderedCSVDiffer
import XMLDiff
import TextDiff
import ExistsDiff
import RAVENImageDiff
# Set this outside the class because the framework directory is constant for
# each instance of this Tester, and in addition, there is a problem with the
# path by the time you call it in __init__ that causes it to think its absolute
# path is somewhere under tests/framework.
# Be aware that if this file changes its location, this variable should also be
# changed.
myDir = os.path.dirname(os.path.realpath(__file__))
RAVENDIR = os.path.abspath(os.path.join(myDir, '..', '..', '..', 'framework'))
#Need to add the directory for AMSC for doing module checks.
os.environ["PYTHONPATH"] = os.path.join(RAVENDIR, 'contrib') +\
os.pathsep + os.environ.get("PYTHONPATH", "")
scriptDir = os.path.abspath(os.path.join(RAVENDIR, '..', 'scripts'))
sys.path.append(scriptDir)
import library_handler
sys.path.pop()
_missingModules, _notQAModules = library_handler.checkLibraries()
_checkVersions = library_handler.checkVersions()
class RavenFramework(Tester):
"""
RavenFramework is the class to use for testing standard raven inputs.
"""
@staticmethod
def get_valid_params():
"""
Returns the parameters that can be used for this class.
@ In, None
@ Out, params, _ValidParameters, return the parameters.
"""
params = Tester.get_valid_params()
params.add_required_param('input', "The input file to use for this test.")
params.add_param('output', '', "List of output files that the input should create.")
params.add_param('csv', '', "List of csv files to check")
params.add_param('UnorderedCsv', '', "List of unordered csv files to check")
params.add_param('xml', '', "List of xml files to check")
params.add_param('UnorderedXml', '', "List of unordered xml files to check")
params.add_param('xmlopts', '', "Options for xml checking")
params.add_param('text', '', "List of generic text files to check")
params.add_param('comment', '-20021986', "Character or string denoting "+
"comments, all text to the right of the symbol will be "+
"ignored in the diff of text files")
params.add_param('image', '', "List of image files to check")
params.add_param('rel_err', '', 'Relative Error for csv files or floats in xml ones')
params.add_param('required_executable', '', 'Skip test if this executable is not found')
params.add_param('required_libraries', '', 'Skip test if any of these libraries are not found')
params.add_param('minimum_library_versions', '',
'Skip test if the library listed is below the supplied'+
' version (e.g. minimum_library_versions = \"name1 version1 name2 version2\")')
params.add_param('skip_if_env', '', 'Skip test if this environmental variable is defined')
params.add_param('skip_if_OS', '', 'Skip test if the operating system defined')
params.add_param('test_interface_only', False,
'Test the interface only (without running the driven code')
params.add_param('check_absolute_value', False,
'if true the values are compared to the tolerance '+
'directectly, instead of relatively.')
params.add_param('zero_threshold', sys.float_info.min*4.0,
'it represents the value below which a float is'+
'considered zero (XML comparison only)')
params.add_param('remove_whitespace', False,
'Removes whitespace before comparing xml node text if True')
params.add_param('remove_unicode_identifier', False,
'if true, then remove u infront of a single quote')
params.add_param('interactive', False,
'if true, then RAVEN will be run with interactivity enabled.')
params.add_param('python3_only', False, 'if true, then only use with Python3')
params.add_param('ignore_sign', False, 'if true, then only compare the absolute values')
return params
def get_command(self):
"""
Gets the raven command to run this test.
@ In, None
@ Out, get_command, string, command to run.
"""
ravenflag = ''
if self.specs['test_interface_only']:
ravenflag += ' interfaceCheck '
if self.specs['interactive']:
ravenflag += ' interactiveCheck '
return self._get_python_command() + " " + self.driver + " " + ravenflag + self.specs["input"]
def __make_differ(self, specName, differClass, extra=None):
"""
This adds a differ if the specName has files.
@ In, specName, string of the list of files to use with the differ.
@ In, differClass, subclass of Differ, for use with the files.
@ In, extra, dictionary, extra parameters
@ Out, None
"""
if len(self.specs[specName]) == 0:
#No files, so quit
return
differParams = dict(self.specs)
differParams["output"] = self.specs[specName]
differParams["type"] = differClass.__name__
if extra is not None:
differParams.update(extra)
self.add_differ(differClass(specName, differParams, self.get_test_dir()))
def __init__(self, name, params):
Tester.__init__(self, name, params)
self.all_files = []
self.__make_differ('output', ExistsDiff.Exists)
self.__make_differ('csv', OrderedCSVDiffer.OrderedCSV)
self.__make_differ('UnorderedCsv', UnorderedCSVDiffer.UnorderedCSV)
self.__make_differ('xml', XMLDiff.XML, {"unordered":False})
self.__make_differ('UnorderedXml', XMLDiff.XML, {"unordered":True})
self.__make_differ('text', TextDiff.Text)
self.__make_differ('image', RAVENImageDiff.ImageDiffer)
self.required_executable = self.specs['required_executable']
self.required_libraries = self.specs['required_libraries'].split(' ')\
if len(self.specs['required_libraries']) > 0 else []
self.minimum_libraries = self.specs['minimum_library_versions'].split(' ')\
if len(self.specs['minimum_library_versions']) > 0 else []
self.required_executable = self.required_executable.replace("%METHOD%",
os.environ.get("METHOD", "opt"))
self.specs['scale_refine'] = False
self.driver = os.path.join(RAVENDIR, 'Driver.py')
def check_runnable(self):
"""
Checks if this test can run.
@ In, None
@ Out, check_runnable, boolean, if True can run this test.
"""
# remove tests based on skipping criteria
## required module is missing
if _missingModules:
self.set_fail('skipped (Missing python modules: '+" ".join([m[0] for m in _missingModules])+
" PYTHONPATH="+os.environ.get("PYTHONPATH", "")+')')
return False
## required module is present, but too old
if _notQAModules and _checkVersions:
self.set_fail('skipped (Incorrectly versioned python modules: ' +
" ".join(['required {}-{}, but found {}'.format(*m) for m in _notQAModules]) +
" PYTHONPATH="+os.environ.get("PYTHONPATH", "")+')')
return False
## an environment varible value causes a skip
if len(self.specs['skip_if_env']) > 0:
envVar = self.specs['skip_if_env']
if envVar in os.environ:
self.set_skip('skipped (found environmental variable "'+envVar+'")')
return False
## OS
if len(self.specs['skip_if_OS']) > 0:
skipOs = [x.strip().lower() for x in self.specs['skip_if_OS'].split(',')]
# get simple-name platform (options are Linux, Windows, Darwin, or SunOS that I've seen)
currentOs = platform.system().lower()
# replace Darwin with more expected "mac"
if currentOs == 'darwin':
currentOs = 'mac'
if currentOs in skipOs:
self.set_skip('skipped (OS is "{}")'.format(currentOs))
return False
for lib in self.required_libraries:
found, _, _ = library_handler.checkSingleLibrary(lib)
if not found:
self.set_skip('skipped (Unable to import library: "{}")'.format(lib))
return False
if self.specs['python3_only'] and not library_handler.inPython3():
self.set_skip('Python 3 only')
return False
i = 0
if len(self.minimum_libraries) % 2:
self.set_skip('skipped (libraries are not matched to versions numbers: '
+str(self.minimum_libraries)+')')
return False
while i < len(self.minimum_libraries):
libraryName = self.minimum_libraries[i]
libraryVersion = self.minimum_libraries[i+1]
found, _, actualVersion = library_handler.checkSingleLibrary(libraryName, version='check')
if not found:
self.set_skip('skipped (Unable to import library: "'+libraryName+'")')
return False
if library_handler.parseVersion(actualVersion) < \
library_handler.parseVersion(libraryVersion):
self.set_skip('skipped (Outdated library: "'+libraryName+'")')
return False
i += 2
if len(self.required_executable) > 0 and \
not os.path.exists(self.required_executable):
self.set_skip('skipped (Missing executable: "'+self.required_executable+'")')
return False
try:
if len(self.required_executable) > 0 and \
subprocess.call([self.required_executable], stdout=subprocess.PIPE) != 0:
self.set_skip('skipped (Failing executable: "'+self.required_executable+'")')
return False
except Exception as exp:
self.set_skip('skipped (Error when trying executable: "'
+self.required_executable+'")'+str(exp))
return False
filenameSet = set()
duplicateFiles = []
for filename in self.__get_created_files():
if filename not in filenameSet:
filenameSet.add(filename)
else:
duplicateFiles.append(filename)
if len(duplicateFiles) > 0:
self.set_skip('[incorrect test] duplicated files specified: '+
" ".join(duplicateFiles))
return False
return True
def __get_created_files(self):
"""
Returns all the files used by this test that need to be created
by the test. Note that they will be deleted at the start of running
the test.
@ In, None
@ Out, createdFiles, [str], list of files created by the test.
"""
runpath = self.get_test_dir()
removeFiles = self.get_differ_remove_files()
return removeFiles+list(os.path.join(runpath, file) for file in self.all_files)
def prepare(self):
"""
Get the test ready to run by removing files that should be created.
@ In, None
@ Out, None
"""
for filename in self.__get_created_files():
if os.path.exists(filename):
os.remove(filename)
def process_results(self, _):
"""
Check to see if the test has passed.
@ In, ignored, string, output of test.
@ Out, None
"""
self.set_success()
| |
#!/usr/bin/env python
from __future__ import division
import os, re, shutil, string, sys, datetime, optparse
def getoptions():
p = optparse.OptionParser(description="Shawkle - Rule-driven maintenance of plain-text lists",
prog="shawkle.py", version="0.5", usage="%prog")
p.add_option("--cloud", action="store", type="string", dest="cloud", default="cloud",
help="file, contents of which to be prefixed to each urlified HTML file; default 'cloud'")
p.add_option("--files2dirs", action="store", type="string", dest="files2dirs", default='.files2dirs',
help="files with corresponding target directories; default '.files2dirs'")
p.add_option("--globalrules", action="store", type="string", dest="globalrules", default='.globalrules',
help="rules used globally (typically an absolute pathname), processed first; default '.globalrules'")
p.add_option("--localrules", action="store", type="string", dest="localrules", default=".rules",
help="rules used locally (typically a relative pathname), processed second; default '.rules'")
p.add_option("--sedtxt", action="store", type="string", dest="sedtxt", default=".sedtxt",
help="stream edits for plain text, eg, expanding drive letters to URIs; default '.sedtxt'")
p.add_option("--sedhtml", action="store", type="string", dest="sedhtml", default=".sedhtml",
help="stream edits for urlified HTML, eg, shortening visible pathnames; default '.sedhtml'")
p.add_option("--htmldir", action="store", type="string", dest="htmldir", default=".html",
help="name of directory for urlified HTML files; default '.html'")
( options, arguments ) = p.parse_args()
return options
def absfilename(filename):
'''2012-05-23. Should this default to zero-length string if none provided?
Are there any circumstances under which this might be called with no string as a parameter?
Currently, this makes an effort to compute the absolute filename, but returns that computed
result only if the filename exists, otherwise it passes through the parameter given, as
output, regardless of whether the file exists or not. Should this function be testing whether
a file exists and, if not, return... what? A zero-length string?'''
filenameexpanded = os.path.abspath(os.path.expanduser(filename))
if os.path.isfile(filenameexpanded):
filename = filenameexpanded
return filename
def absdirname(dirname):
dirnameexpanded = os.path.abspath(os.path.expanduser(dirname))
if os.path.isdir(dirnameexpanded):
dirname = dirnameexpanded
return dirname
def datals():
"""Returns list of files in current directory, excluding dot files and subdirectories.
If swap files, backup files, or non-text files are encountered, exits with error message."""
filelist = []
pathnamelist = os.listdir(os.getcwd())
for pathname in pathnamelist:
if os.path.isfile(pathname):
if pathname[-3:] == "swp":
print 'Detected swap file', repr(pathname), '- close editor and re-run - exiting...'
sys.exit()
if pathname[-1] == "~":
print 'Detected temporary file', repr(pathname), '- delete and re-run - exiting...'
sys.exit()
if pathname[0] != ".":
filelist.append(absfilename(pathname))
return filelist
def removefiles(targetdirectory):
pwd = os.getcwd()
abstargetdir = absdirname(targetdirectory)
if os.path.isdir(abstargetdir):
os.chdir(abstargetdir)
files = datals()
if files:
print 'Clearing out directory', repr(abstargetdir)
for file in files:
os.remove(file)
os.chdir(pwd)
else:
print 'Directory', repr(abstargetdir), 'does not exist - exiting...'
sys.exit()
def movefiles(sourcedirectory, targetdirectory):
pwd = os.getcwd()
abssourcedir = absdirname(sourcedirectory)
abstargetdir = absdirname(targetdirectory)
if os.path.isdir(abssourcedir):
if os.path.isdir(abstargetdir):
os.chdir(abssourcedir)
files = datals()
if files:
print 'Moving files from directory', repr(sourcedirectory), "to directory", repr(targetdirectory)
for file in files:
shutil.copy2(file, abstargetdir)
os.remove(file)
os.chdir(pwd)
else:
print 'Directory', repr(abstargetdir), 'does not exist - exiting...'
sys.exit()
else:
print 'Directory', repr(abssourcedir), 'does not exist - exiting...'
sys.exit()
def movetobackups(filelist):
"""Moves given list of files to directory "$PWD/.backup",
bumping previous backups to ".backupi", ".backupii", and ".backupiii".
2011-04-16: Does not test for an unsuccessful attempt to create a directory
e.g., because of missing permissions."""
if not filelist:
print 'No data here to back up or process - exiting...'
sys.exit()
backupdirs = ['.backup', '.backupi', '.backupii', '.backupiii']
for dir in backupdirs:
if not os.path.isdir(dir):
os.mkdir(dir)
removefiles(backupdirs[3])
movefiles(backupdirs[2], backupdirs[3])
movefiles(backupdirs[1], backupdirs[2])
movefiles(backupdirs[0], backupdirs[1])
for file in filelist:
shutil.move(file, backupdirs[0])
def totalsize():
"""Returns total size in bytes of files in current directory,
silently removing files of length zero."""
totalsize = 0
print 'Removing zero-length files'
for file in os.listdir(os.getcwd()):
if os.path.isfile(file): # ignore directories, especially hidden ("dot") directories
filesize = os.path.getsize(file)
if filesize == 0:
os.remove(file)
else:
if file[0] != ".":
totalsize = totalsize + filesize
return totalsize
def slurpdata(datafileslisted):
"""Calls mustbetext() to confirm that all listed files consist of plain text with no blank lines.
Returns a consolidated, sorted list of lines from all files."""
mustbetext(datafileslisted)
alldatalines = []
for file in datafileslisted:
filelines = list(open(file))
alldatalines = alldatalines + filelines
alldatalines.sort()
return alldatalines
def getrules(globalrulefile, localrulefile):
"""Consolidates the lines of (optional) global and (mandatory) local rule files into one list.
Deletes comments and blank lines. Performs sanity checks to ensure well-formedness of rules.
Returns a consolidated list of rules, each item itself a list of rule components.
@@TODO
-- Test with illegal filenames.
-- Maybe also test for dot files. When used as source or target files,
dot files would throw off the size test in comparesize()."""
globalrulelines = []
globalrulefile = absfilename(globalrulefile)
localrulefile = absfilename(localrulefile)
if globalrulefile:
try:
globalrulelines = list(open(globalrulefile))
print "Using config file:", repr(globalrulefile), "- global rule file"
except:
print 'Optional global rule file', repr(globalrulefile), 'does not exist (or is unusable) - skipping...'
try:
localrulelines = list(open(localrulefile))
print "Using config file:", repr(localrulefile), "- local rule file"
except:
print 'Mandatory rule file', repr(localrulefile), 'does not exist (or is unusable) - exiting...'
sys.exit()
listofrulesraw = globalrulelines + localrulelines
listofrulesparsed = []
for line in listofrulesraw:
linesplitonorbar = line.partition('#')[0].strip().split('|')
if len(linesplitonorbar) == 5:
try:
linesplitonorbar[0] = int(linesplitonorbar[0])
except:
print repr(linesplitonorbar)
print 'First field must be an integer - exiting...'
if linesplitonorbar[0] < 0:
print repr(linesplitonorbar)
print 'First field must be a positive integer - exiting...'
sys.exit()
try:
re.compile(linesplitonorbar[1])
except:
# If string 'linesplitonorbar[1]' is not valid regular expression (eg, contains unmatched parentheses)
# or some other error occurs during compilation.
print 'In rule:', repr(linesplitonorbar)
print '...in order to match the regex string:', repr(linesplitonorbar[1])
catstring = "...the rule component must be escaped as follows: '" + re.escape(linesplitonorbar[1]) + "'"
print catstring
sys.exit()
if len(linesplitonorbar[4]) > 0:
if not linesplitonorbar[4].isdigit():
print repr(linesplitonorbar)
print 'Fifth field must be an integer or zero-length string - exiting...'
sys.exit()
if linesplitonorbar[4] < 1:
print repr(linesplitonorbar)
print 'Fifth field integer must be greater than zero - exiting...'
sys.exit()
if len(linesplitonorbar[1]) > 0:
if len(linesplitonorbar[2]) > 0:
if len(linesplitonorbar[3]) > 0:
listofrulesparsed.append(linesplitonorbar)
else:
print repr(linesplitonorbar)
print 'Fields 2, 3, and 4 must be non-empty - exiting...'
sys.exit()
elif len(linesplitonorbar) > 1:
print linesplitonorbar
print 'Edit to five fields, simply comment out, or escape any orbars in regex string - exiting...'
sys.exit()
createdfiles = []
count = 0
for rule in listofrulesparsed:
sourcefilename = rule[2]
targetfilename = rule[3]
valid_chars = "-_=.%s%s" % (string.ascii_letters, string.digits)
filenames = [ sourcefilename, targetfilename ]
for filename in filenames:
if filename[0] == ".":
print 'Filename', repr(filename), 'should not start with a dot...'
sys.exit()
for c in filename:
if c not in valid_chars:
if ' ' in filename:
print repr(rule)
print 'Filename', repr(filename), 'should have no spaces'
sys.exit()
else:
print repr(rule)
print 'Filename', repr(filename), 'has one or more characters other than:', repr(valid_chars)
sys.exit()
try:
open(filename, 'a+').close() # like "touch" ensures that filename is writable
except:
print 'Cannot open', repr(filename), 'as a file for appending - exiting...'
sys.exit()
createdfiles.append(targetfilename)
if count == 0:
createdfiles.append(sourcefilename)
if sourcefilename == targetfilename:
print 'In rules:', repr(rule)
print 'Source file:', repr(sourcefilename), 'is same as target file:', repr(targetfilename), '- exiting...'
sys.exit()
if not sourcefilename in createdfiles:
print repr(rule)
print 'Source file', repr(sourcefilename), 'has no precedent target file. Exiting...'
sys.exit()
count = count + 1
return listofrulesparsed
def getmappings(mappings, helpmessage):
"""Parses the given file, the lines are supposed to consist of two fields separated by a vertical bar.
Strips comments, commented lines, and blank lines.
Ignores lines with more than two vertical-bar-delimited fields.
Returns list, each item of which is a list of two items ."""
helpmessage = str(helpmessage)
mappings = os.path.expanduser(mappings)
print "Using config file:", repr(mappings), helpmessage
mappingsraw = []
mappingsparsed = []
try:
mappingsraw = list(open(mappings))
except:
print 'Config file', repr(mappings), 'does not exist - skipping...'
return mappingsparsed
for line in mappingsraw:
linesplitonorbar = line.partition('#')[0].strip().split('|')
if len(linesplitonorbar) == 2:
mappingsparsed.append(linesplitonorbar)
return mappingsparsed
def relocatefiles(files2dirs):
"""Given the list of mappings of filenames to target directories:
if file and directory both exist, moves file to directory,
if file exists but not the target directory, reports that the file is staying put."""
timestamp = datetime.datetime.now()
prefix = timestamp.isoformat('.')
for line in files2dirs:
filename = line[0]
dirpath = os.path.expanduser(line[1])
timestampedpathname = dirpath + '/' + prefix[0:13] + prefix[14:16] + prefix[17:19] + '.' + filename
try:
shutil.move(filename, timestampedpathname)
print 'Moving', repr(filename), 'to', repr(timestampedpathname)
except:
if os.path.exists(filename):
print 'Keeping file', repr(filename), 'where it is - directory', dirpath, 'does not exist...'
def shuffle(rules, datalines):
"""Takes as arguments a list of rules and a list of data lines as a starting point.
For the first rule only:
writes data lines matching a regular expression to the target file,
writes data lines not matching the regular expression to the source file.
For each subsequent rule:
reads data lines from source file,
writes lines matching a regular expression to the target file,
writes lines not matching a regular expression to the source file, overwriting the source file."""
rulenumber = 0
for rule in rules:
rulenumber += 1
field = rule[0]
searchkey = rule[1]
source = rule[2]
target = rule[3]
sortorder = rule[4]
sourcelines = []
targetlines = []
if sortorder:
print '%s [%s] "%s" to "%s", sorted by field %s' % (field, searchkey, source, target, sortorder)
else:
print '%s [%s] "%s" to "%s"' % (field, searchkey, source, target)
if rulenumber > 1:
datalines = list(open(source))
if field == 0:
if searchkey == ".":
targetlines = [ line for line in datalines ]
else:
sourcelines = [ line for line in datalines if not re.search(searchkey, line) ]
targetlines = [ line for line in datalines if re.search(searchkey, line) ]
else:
ethfield = field - 1
for line in datalines:
if field > len(line.split()):
sourcelines.append(line)
else:
if re.search(searchkey, line.split()[ethfield]):
targetlines.append(line)
else:
sourcelines.append(line)
sourcefile = open(source, 'w'); sourcefile.writelines(sourcelines); sourcefile.close()
targetfile = open(target, 'a'); targetfile.writelines(targetlines); targetfile.close()
if sortorder:
targetlines = list(open(target))
targetlines = dsusort(targetlines, sortorder)
targetfile = open(target, 'w'); targetfile.writelines(targetlines); targetfile.close()
def comparesize(sizebefore, sizeafter):
"""Given the aggregate size in bytes of files "before" and "after":
reports if sizes are the same, or
warns if sizes are different."""
print 'Size pre was', sizebefore
print 'Size post is', sizeafter, '- includes files, if any, moved to other directories'
if sizebefore == sizeafter:
print 'Done: data shawkled and intact!'
else:
print 'Warning: data may have been lost - revert to backup!'
def urlify(listofdatafiles, sedtxt, sedhtml, htmldir, cloud):
"""For each file in list of files (listofdatafiles):
create a urlified (HTML) file in the specified directory (htmldir),
prepending the contents of an optional cloud file (cloud) to each urlified file,
optionally stream-editing the plain text using before-and-after transforms (sedtxt), and
optionally stream-editing the urlified text using before-and-after transforms (sedhtml).
Note: Need to replace fourth argument of urlify with something like str(arguments.htmldir) - test...
urlify(datafilesaftermove, sedtxtmappings, sedhtmlmappings, '.imac', optionalcloudfile)"""
cloud = absfilename(cloud)
cloudlines = []
if os.path.isfile(cloud):
print "Prepending file", repr(cloud), "to each urlified file"
cloudlines = list(open(cloud))
htmldir = absdirname(htmldir)
if not os.path.isdir(htmldir):
print 'Creating directory', repr(htmldir)
os.mkdir(htmldir)
else:
removefiles(htmldir)
print 'Generating urlified files in directory', repr(htmldir)
for file in listofdatafiles:
try:
openfilelines = list(open(file))
openfilelines = cloudlines + openfilelines
except:
print 'Cannot open', file, '- exiting...'
sys.exit()
urlifiedlines = []
for line in openfilelines:
for sedmap in sedtxt:
try:
old = sedmap[0]
new = sedmap[1]
oldcompiled = re.compile(old)
line = re.sub(oldcompiled, new, line)
except:
pass
line = urlify_string(line)
for visualimprovement in sedhtml:
try:
ugly = visualimprovement[0]
pretty = visualimprovement[1]
line = line.replace(ugly, pretty)
except:
pass
urlifiedlines.append(line)
filehtml = htmldir + '/' + os.path.basename(file) + '.html'
try:
openfilehtml = open(filehtml, 'w')
except:
print 'Cannot open', repr(filehtml), 'for writing - exiting...'
sys.exit()
openfilehtml.write('<PRE>\n')
linenumber = 1
field1before = ''
for urlifiedline in urlifiedlines:
field1 = urlifiedline.split()[0]
if linenumber > 1:
if field1before != field1:
openfilehtml.write('\n')
field1before = field1
linenumber += 1
openfilehtml.write(urlifiedline)
openfilehtml.close()
def dsusort(dlines, field):
"""Given a list of datalines (list "dlines"):
returns list sorted by given field (greater-than-zero integer "field")."""
intfield = int(field)
ethfield = intfield - 1
dlinesdecorated = []
for line in dlines:
linelength = len(line.split())
if intfield > linelength:
fieldsought = ''
else:
fieldsought = line.split()[ethfield]
decoratedline = (fieldsought, line)
dlinesdecorated.append(decoratedline)
dlinesdecorated.sort()
dlinessorted = [] # 2011-03-14: Is this line necessary?
dlinessorted = [ t[1] for t in dlinesdecorated ]
return dlinessorted
def mustbetext(datafiles):
"""Confirms that listed files consist of plain text, with no blank lines,
else exits with helpful error message.
Draws on p.25 recipe from O'Reilly Python Cookbook."""
for file in datafiles:
givenstring = open(file).read(512)
text_characters = "".join(map(chr, range(32, 127))) + "\n\r\t\b"
_null_trans = string.maketrans("", "")
if "\0" in givenstring: # if givenstring contains any null, it's not text
print 'Data file:', repr(file), 'contains a null, ergo is not a text file - exiting...'
sys.exit()
if not givenstring: # an "empty" string is "text" (arbitrary but reasonable choice)
return True
substringwithnontextcharacters = givenstring.translate(_null_trans, text_characters)
lengthsubstringwithnontextcharacters = len(substringwithnontextcharacters)
lengthgivenstring = len(givenstring)
proportion = lengthsubstringwithnontextcharacters / lengthgivenstring
if proportion >= 0.30: # s is 'text' if less than 30% of its characters are non-text ones
print 'Data file', repr(file), 'has more than 30% non-text, ergo is not a text file - exiting...'
sys.exit()
filelines = list(open(file))
for line in filelines:
linestripped = line.strip()
if len(linestripped) == 0:
print 'File', repr(file), 'has blank lines - exiting...'
sys.exit()
def urlify_string(s):
"""Puts HTML links around a URL, i.e., a string ("s") starting
with "http", "file", or "irc", etc.
This code, found on Web, appears to be based on Perl Cookbook, section 6.21 ("urlify")."""
urls = r'(http|https|telnet|gopher|file|wais|ftp|irc)'
ltrs = r'\w';
gunk = r'/#~:.?+=&%@!\-'
punc = r'.:?\-'
any = ltrs + gunk + punc
pat = re.compile(r"""
\b # start at word boundary
( # begin \1 {
%(urls)s : # need resource and a colon
[%(any)s] +? # followed by one or more
# of any valid character, but
# be conservative and take only
# what you need to....
) # end \1 }
(?= # look-ahead non-consumptive assertion
[%(punc)s]* # either 0 or more punctuation
[^%(any)s] # followed by a non-url char
| # or else
$ # then end of the string
)
"""%locals(), re.VERBOSE | re.IGNORECASE)
return re.sub(pat, r"<A HREF=\1>\1</A>", s)
if __name__ == "__main__":
# home = os.environ.get("HOME") # uncomment to use test data
# data = home + '/agit/apple/data/a' # uncomment to use test data
# os.chdir(data) # uncomment to use test data
arguments = getoptions()
rules = getrules(arguments.globalrules, arguments.localrules)
sizebefore = totalsize()
print 'Size of files is', sizebefore
datafilesbefore = datals()
datalines = slurpdata(datafilesbefore)
movetobackups(datafilesbefore)
shuffle(rules, datalines)
sizeafter = totalsize()
filesanddestinations = getmappings(arguments.files2dirs, '- specifies names of files and destination directories')
relocatefiles(filesanddestinations)
datafilesaftermove = datals()
sedtxtmappings = getmappings(arguments.sedtxt, '- specifies stream edits before urlification')
sedhtmlmappings = getmappings(arguments.sedhtml, '- specifies stream edits after urlification')
optionalcloudfile = arguments.cloud
htmldirectory = os.path.abspath(os.path.expanduser(arguments.htmldir))
urlify(datafilesaftermove, sedtxtmappings, sedhtmlmappings, htmldirectory, optionalcloudfile)
comparesize(sizebefore, sizeafter)
| |
# -:- encoding: utf8 -:-
# This file is part of the MapProxy project.
# Copyright (C) 2010 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
from io import BytesIO
from mapproxy.compat.image import Image, ImageDraw
from mapproxy.image import ImageSource, ReadBufWrapper, is_single_color_image
from mapproxy.image import peek_image_format
from mapproxy.image.merge import merge_images
from mapproxy.image import _make_transparent as make_transparent, SubImageSource, img_has_transparency, quantize
from mapproxy.image.opts import ImageOptions
from mapproxy.image.tile import TileMerger, TileSplitter
from mapproxy.image.transform import ImageTransformer
from mapproxy.test.image import is_png, is_jpeg, is_tiff, create_tmp_image_file, check_format, create_debug_img, create_image
from mapproxy.srs import SRS
from nose.tools import eq_
from mapproxy.test.image import assert_img_colors_eq
from nose.plugins.skip import SkipTest
PNG_FORMAT = ImageOptions(format='image/png')
JPEG_FORMAT = ImageOptions(format='image/jpeg')
TIFF_FORMAT = ImageOptions(format='image/tiff')
class TestImageSource(object):
def setup(self):
self.tmp_filename = create_tmp_image_file((100, 100))
def teardown(self):
os.remove(self.tmp_filename)
def test_from_filename(self):
ir = ImageSource(self.tmp_filename, PNG_FORMAT)
assert is_png(ir.as_buffer())
assert ir.as_image().size == (100, 100)
def test_from_file(self):
with open(self.tmp_filename, 'rb') as tmp_file:
ir = ImageSource(tmp_file, 'png')
assert ir.as_buffer() == tmp_file
assert ir.as_image().size == (100, 100)
def test_from_image(self):
img = Image.new('RGBA', (100, 100))
ir = ImageSource(img, (100, 100), PNG_FORMAT)
assert ir.as_image() == img
assert is_png(ir.as_buffer())
def test_from_non_seekable_file(self):
with open(self.tmp_filename, 'rb') as tmp_file:
data = tmp_file.read()
class FileLikeDummy(object):
# "file" without seek, like urlopen response
def read(self):
return data
ir = ImageSource(FileLikeDummy(), 'png')
assert ir.as_buffer(seekable=True).read() == data
assert ir.as_image().size == (100, 100)
assert ir.as_buffer().read() == data
def test_output_formats(self):
img = Image.new('RGB', (100, 100))
for format in ['png', 'gif', 'tiff', 'jpeg', 'GeoTIFF', 'bmp']:
ir = ImageSource(img, (100, 100), image_opts=ImageOptions(format=format))
yield check_format, ir.as_buffer(), format
def test_converted_output(self):
ir = ImageSource(self.tmp_filename, (100, 100), PNG_FORMAT)
assert is_png(ir.as_buffer())
assert is_jpeg(ir.as_buffer(JPEG_FORMAT))
assert is_jpeg(ir.as_buffer())
assert is_tiff(ir.as_buffer(TIFF_FORMAT))
assert is_tiff(ir.as_buffer())
def test_output_formats_greyscale_png(self):
img = Image.new('L', (100, 100))
ir = ImageSource(img, image_opts=PNG_FORMAT)
img = Image.open(ir.as_buffer(ImageOptions(colors=256, transparent=True, format='image/png')))
assert img.mode == 'P'
assert img.getpixel((0, 0)) == 255
def test_output_formats_greyscale_alpha_png(self):
img = Image.new('LA', (100, 100))
ir = ImageSource(img, image_opts=PNG_FORMAT)
img = Image.open(ir.as_buffer(ImageOptions(colors=256, transparent=True, format='image/png')))
assert img.mode == 'LA'
assert img.getpixel((0, 0)) == (0, 0)
def test_output_formats_png8(self):
img = Image.new('RGBA', (100, 100))
ir = ImageSource(img, image_opts=PNG_FORMAT)
img = Image.open(ir.as_buffer(ImageOptions(colors=256, transparent=True, format='image/png')))
assert img.mode == 'P'
assert img.getpixel((0, 0)) == 255
def test_output_formats_png24(self):
img = Image.new('RGBA', (100, 100))
image_opts = PNG_FORMAT.copy()
image_opts.colors = 0 # TODO image_opts
ir = ImageSource(img, image_opts=image_opts)
img = Image.open(ir.as_buffer())
eq_(img.mode, 'RGBA')
assert img.getpixel((0, 0)) == (0, 0, 0, 0)
class TestSubImageSource(object):
def test_full(self):
sub_img = create_image((100, 100), color=[100, 120, 130, 140])
img = SubImageSource(sub_img, size=(100, 100), offset=(0, 0), image_opts=ImageOptions()).as_image()
eq_(img.getcolors(), [(100*100, (100, 120, 130, 140))])
def test_larger(self):
sub_img = create_image((150, 150), color=[100, 120, 130, 140])
img = SubImageSource(sub_img, size=(100, 100), offset=(0, 0), image_opts=ImageOptions()).as_image()
eq_(img.getcolors(), [(100*100, (100, 120, 130, 140))])
def test_negative_offset(self):
sub_img = create_image((150, 150), color=[100, 120, 130, 140])
img = SubImageSource(sub_img, size=(100, 100), offset=(-50, 0), image_opts=ImageOptions()).as_image()
eq_(img.getcolors(), [(100*100, (100, 120, 130, 140))])
def test_overlap_right(self):
sub_img = create_image((50, 50), color=[100, 120, 130, 140])
img = SubImageSource(sub_img, size=(100, 100), offset=(75, 25), image_opts=ImageOptions(transparent=True)).as_image()
eq_(sorted(img.getcolors()), [(25*50, (100, 120, 130, 140)), (100*100-25*50, (255, 255, 255, 0))])
def test_outside(self):
sub_img = create_image((50, 50), color=[100, 120, 130, 140])
img = SubImageSource(sub_img, size=(100, 100), offset=(200, 0), image_opts=ImageOptions(transparent=True)).as_image()
eq_(img.getcolors(), [(100*100, (255, 255, 255, 0))])
class ROnly(object):
def __init__(self):
self.data = [b'Hello World!']
def read(self):
if self.data:
return self.data.pop()
return b''
def __iter__(self):
it = iter(self.data)
self.data = []
return it
class TestReadBufWrapper(object):
def setup(self):
rbuf = ROnly()
self.rbuf_wrapper = ReadBufWrapper(rbuf)
def test_read(self):
assert self.rbuf_wrapper.read() == b'Hello World!'
self.rbuf_wrapper.seek(0)
eq_(self.rbuf_wrapper.read(), b'')
def test_seek_read(self):
self.rbuf_wrapper.seek(0)
assert self.rbuf_wrapper.read() == b'Hello World!'
self.rbuf_wrapper.seek(0)
assert self.rbuf_wrapper.read() == b'Hello World!'
def test_iter(self):
data = list(self.rbuf_wrapper)
eq_(data, [b'Hello World!'])
self.rbuf_wrapper.seek(0)
data = list(self.rbuf_wrapper)
eq_(data, [])
def test_seek_iter(self):
self.rbuf_wrapper.seek(0)
data = list(self.rbuf_wrapper)
eq_(data, [b'Hello World!'])
self.rbuf_wrapper.seek(0)
data = list(self.rbuf_wrapper)
eq_(data, [b'Hello World!'])
def test_hasattr(self):
assert hasattr(self.rbuf_wrapper, 'seek')
assert hasattr(self.rbuf_wrapper, 'readline')
class TestMergeAll(object):
def setup(self):
self.cleanup_tiles = []
def test_full_merge(self):
self.cleanup_tiles = [create_tmp_image_file((100, 100)) for _ in range(9)]
self.tiles = [ImageSource(tile) for tile in self.cleanup_tiles]
m = TileMerger(tile_grid=(3, 3), tile_size=(100, 100))
img_opts = ImageOptions()
result = m.merge(self.tiles, img_opts)
img = result.as_image()
eq_(img.size, (300, 300))
def test_one(self):
self.cleanup_tiles = [create_tmp_image_file((100, 100))]
self.tiles = [ImageSource(self.cleanup_tiles[0])]
m = TileMerger(tile_grid=(1, 1), tile_size=(100, 100))
img_opts = ImageOptions(transparent=True)
result = m.merge(self.tiles, img_opts)
img = result.as_image()
eq_(img.size, (100, 100))
eq_(img.mode, 'RGBA')
def test_missing_tiles(self):
self.cleanup_tiles = [create_tmp_image_file((100, 100))]
self.tiles = [ImageSource(self.cleanup_tiles[0])]
self.tiles.extend([None]*8)
m = TileMerger(tile_grid=(3, 3), tile_size=(100, 100))
img_opts = ImageOptions()
result = m.merge(self.tiles, img_opts)
img = result.as_image()
eq_(img.size, (300, 300))
eq_(img.getcolors(), [(80000, (255, 255, 255)), (10000, (0, 0, 0)), ])
def test_invalid_tile(self):
self.cleanup_tiles = [create_tmp_image_file((100, 100)) for _ in range(9)]
self.tiles = [ImageSource(tile) for tile in self.cleanup_tiles]
invalid_tile = self.tiles[0].source
with open(invalid_tile, 'wb') as tmp:
tmp.write(b'invalid')
m = TileMerger(tile_grid=(3, 3), tile_size=(100, 100))
img_opts = ImageOptions(bgcolor=(200, 0, 50))
result = m.merge(self.tiles, img_opts)
img = result.as_image()
eq_(img.size, (300, 300))
eq_(img.getcolors(), [(10000, (200, 0, 50)), (80000, (0, 0, 0))])
assert not os.path.isfile(invalid_tile)
def test_none_merge(self):
tiles = [None]
m = TileMerger(tile_grid=(1, 1), tile_size=(100, 100))
img_opts = ImageOptions(mode='RGBA', bgcolor=(200, 100, 30, 40))
result = m.merge(tiles, img_opts)
img = result.as_image()
eq_(img.size, (100, 100))
eq_(img.getcolors(), [(100*100, (200, 100, 30, 40))])
def teardown(self):
for tile_fname in self.cleanup_tiles:
if tile_fname and os.path.isfile(tile_fname):
os.remove(tile_fname)
class TestGetCrop(object):
def setup(self):
self.tmp_file = create_tmp_image_file((100, 100), two_colored=True)
self.img = ImageSource(self.tmp_file,
image_opts=ImageOptions(format='image/png'), size=(100, 100))
def teardown(self):
if os.path.exists(self.tmp_file):
os.remove(self.tmp_file)
def test_perfect_match(self):
bbox = (-10, -5, 30, 35)
transformer = ImageTransformer(SRS(4326), SRS(4326))
result = transformer.transform(self.img, bbox, (100, 100), bbox, image_opts=None)
assert self.img == result
def test_simple_resize_nearest(self):
bbox = (-10, -5, 30, 35)
transformer = ImageTransformer(SRS(4326), SRS(4326))
result = transformer.transform(self.img, bbox, (200, 200), bbox,
image_opts=ImageOptions(resampling='nearest'))
img = result.as_image()
eq_(img.size, (200, 200))
eq_(len(img.getcolors()), 2)
def test_simple_resize_bilinear(self):
bbox = (-10, -5, 30, 35)
transformer = ImageTransformer(SRS(4326), SRS(4326))
result = transformer.transform(self.img, bbox, (200, 200), bbox,
image_opts=ImageOptions(resampling='bilinear'))
img = result.as_image()
eq_(img.size, (200, 200))
# some shades of grey with bilinear
assert len(img.getcolors()) >= 4
class TestLayerMerge(object):
def test_opacity_merge(self):
img1 = ImageSource(Image.new('RGB', (10, 10), (255, 0, 255)))
img2 = ImageSource(Image.new('RGB', (10, 10), (0, 255, 255)),
image_opts=ImageOptions(opacity=0.5))
result = merge_images([img1, img2], ImageOptions(transparent=False))
img = result.as_image()
eq_(img.getpixel((0, 0)), (127, 127, 255))
def test_opacity_merge_mixed_modes(self):
img1 = ImageSource(Image.new('RGBA', (10, 10), (255, 0, 255, 255)))
img2 = ImageSource(Image.new('RGB', (10, 10), (0, 255, 255)).convert('P'),
image_opts=ImageOptions(opacity=0.5))
result = merge_images([img1, img2], ImageOptions(transparent=True))
img = result.as_image()
assert_img_colors_eq(img, [
(10*10, (127, 127, 255, 255)),
])
def test_paletted_merge(self):
if not hasattr(Image, 'FASTOCTREE'):
raise SkipTest()
# generate RGBA images with a transparent rectangle in the lower right
img1 = ImageSource(Image.new('RGBA', (50, 50), (0, 255, 0, 255))).as_image()
draw = ImageDraw.Draw(img1)
draw.rectangle((25, 25, 49, 49), fill=(0, 0, 0, 0))
paletted_img = quantize(img1, alpha=True)
assert img_has_transparency(paletted_img)
assert paletted_img.mode == 'P'
rgba_img = Image.new('RGBA', (50, 50), (255, 0, 0, 255))
draw = ImageDraw.Draw(rgba_img)
draw.rectangle((25, 25, 49, 49), fill=(0, 0, 0, 0))
img1 = ImageSource(paletted_img)
img2 = ImageSource(rgba_img)
# generate base image and merge the others above
img3 = ImageSource(Image.new('RGBA', (50, 50), (0, 0, 255, 255)))
result = merge_images([img3, img1, img2], ImageOptions(transparent=True))
img = result.as_image()
assert img.mode == 'RGBA'
eq_(img.getpixel((49, 49)), (0, 0, 255, 255))
eq_(img.getpixel((0, 0)), (255, 0, 0, 255))
def test_solid_merge(self):
img1 = ImageSource(Image.new('RGB', (10, 10), (255, 0, 255)))
img2 = ImageSource(Image.new('RGB', (10, 10), (0, 255, 255)))
result = merge_images([img1, img2], ImageOptions(transparent=False))
img = result.as_image()
eq_(img.getpixel((0, 0)), (0, 255, 255))
class TestLayerCompositeMerge(object):
def test_composite_merge(self):
# http://stackoverflow.com/questions/3374878
if not hasattr(Image, 'alpha_composite'):
raise SkipTest()
img1 = Image.new('RGBA', size=(100, 100), color=(255, 0, 0, 255))
draw = ImageDraw.Draw(img1)
draw.rectangle((33, 0, 66, 100), fill=(255, 0, 0, 128))
draw.rectangle((67, 0, 100, 100), fill=(255, 0, 0, 0))
img1 = ImageSource(img1)
img2 = Image.new('RGBA', size =(100, 100), color=(0, 255, 0, 255))
draw = ImageDraw.Draw(img2)
draw.rectangle((0, 33, 100, 66), fill=(0, 255, 0, 128))
draw.rectangle((0, 67, 100, 100), fill=(0, 255, 0, 0))
img2 = ImageSource(img2)
result = merge_images([img2, img1], ImageOptions(transparent=True))
img = result.as_image()
eq_(img.mode, 'RGBA')
assert_img_colors_eq(img, [
(1089, (0, 255, 0, 255)),
(1089, (255, 255, 255, 0)),
(1122, (0, 255, 0, 128)),
(1122, (128, 126, 0, 255)),
(1122, (255, 0, 0, 128)),
(1156, (170, 84, 0, 191)),
(3300, (255, 0, 0, 255))])
def test_composite_merge_opacity(self):
if not hasattr(Image, 'alpha_composite'):
raise SkipTest()
bg = Image.new('RGBA', size=(100, 100), color=(255, 0, 255, 255))
bg = ImageSource(bg)
fg = Image.new('RGBA', size =(100, 100), color=(0, 0, 0, 0))
draw = ImageDraw.Draw(fg)
draw.rectangle((10, 10, 89, 89), fill=(0, 255, 255, 255))
fg = ImageSource(fg, image_opts=ImageOptions(opacity=0.5))
result = merge_images([bg, fg], ImageOptions(transparent=True))
img = result.as_image()
eq_(img.mode, 'RGBA')
assert_img_colors_eq(img, [
(3600, (255, 0, 255, 255)),
(6400, (128, 127, 255, 255))])
class TestTransform(object):
def setup(self):
self.src_img = ImageSource(create_debug_img((200, 200), transparent=False))
self.src_srs = SRS(31467)
self.dst_size = (100, 150)
self.dst_srs = SRS(4326)
self.dst_bbox = (0.2, 45.1, 8.3, 53.2)
self.src_bbox = self.dst_srs.transform_bbox_to(self.src_srs, self.dst_bbox)
def test_transform(self, mesh_div=4):
transformer = ImageTransformer(self.src_srs, self.dst_srs, mesh_div=mesh_div)
result = transformer.transform(self.src_img, self.src_bbox, self.dst_size, self.dst_bbox,
image_opts=ImageOptions(resampling='nearest'))
assert isinstance(result, ImageSource)
assert result.as_image() != self.src_img
assert result.size == (100, 150)
def _test_compare_mesh_div(self):
"""
Create transformations with different div values.
"""
for div in [1, 2, 4, 6, 8, 12, 16]:
transformer = ImageTransformer(self.src_srs, self.dst_srs, mesh_div=div)
result = transformer.transform(self.src_img, self.src_bbox,
self.dst_size, self.dst_bbox)
result.as_image().save('/tmp/transform-%d.png' % (div,))
class TestSingleColorImage(object):
def test_one_point(self):
img = Image.new('RGB', (100, 100), color='#ff0000')
draw = ImageDraw.Draw(img)
draw.point((99, 99))
del draw
assert not is_single_color_image(img)
def test_solid(self):
img = Image.new('RGB', (100, 100), color='#ff0102')
eq_(is_single_color_image(img), (255, 1, 2))
def test_solid_w_alpha(self):
img = Image.new('RGBA', (100, 100), color='#ff0102')
eq_(is_single_color_image(img), (255, 1, 2, 255))
def test_solid_paletted_image(self):
img = Image.new('P', (100, 100), color=20)
palette = []
for i in range(256):
palette.extend((i, i//2, i%3))
img.putpalette(palette)
eq_(is_single_color_image(img), (20, 10, 2))
class TestMakeTransparent(object):
def _make_test_image(self):
img = Image.new('RGB', (50, 50), (130, 140, 120))
draw = ImageDraw.Draw(img)
draw.rectangle((10, 10, 39, 39), fill=(130, 150, 120))
return img
def _make_transp_test_image(self):
img = Image.new('RGBA', (50, 50), (130, 140, 120, 100))
draw = ImageDraw.Draw(img)
draw.rectangle((10, 10, 39, 39), fill=(130, 150, 120, 120))
return img
def test_result(self):
img = self._make_test_image()
img = make_transparent(img, (130, 150, 120), tolerance=5)
assert img.mode == 'RGBA'
assert img.size == (50, 50)
colors = img.getcolors()
assert colors == [(1600, (130, 140, 120, 255)), (900, (130, 150, 120, 0))]
def test_with_color_fuzz(self):
img = self._make_test_image()
img = make_transparent(img, (128, 154, 121), tolerance=5)
assert img.mode == 'RGBA'
assert img.size == (50, 50)
colors = img.getcolors()
assert colors == [(1600, (130, 140, 120, 255)), (900, (130, 150, 120, 0))]
def test_no_match(self):
img = self._make_test_image()
img = make_transparent(img, (130, 160, 120), tolerance=5)
assert img.mode == 'RGBA'
assert img.size == (50, 50)
colors = img.getcolors()
assert colors == [(1600, (130, 140, 120, 255)), (900, (130, 150, 120, 255))]
def test_from_paletted(self):
img = self._make_test_image().quantize(256)
img = make_transparent(img, (130, 150, 120), tolerance=5)
assert img.mode == 'RGBA'
assert img.size == (50, 50)
colors = img.getcolors()
eq_(colors, [(1600, (130, 140, 120, 255)), (900, (130, 150, 120, 0))])
def test_from_transparent(self):
img = self._make_transp_test_image()
draw = ImageDraw.Draw(img)
draw.rectangle((0, 0, 4, 4), fill=(130, 100, 120, 0))
draw.rectangle((5, 5, 9, 9), fill=(130, 150, 120, 255))
img = make_transparent(img, (130, 150, 120, 120), tolerance=5)
assert img.mode == 'RGBA'
assert img.size == (50, 50)
colors = sorted(img.getcolors(), reverse=True)
eq_(colors, [(1550, (130, 140, 120, 100)), (900, (130, 150, 120, 0)),
(25, (130, 150, 120, 255)), (25, (130, 100, 120, 0))])
class TestTileSplitter(object):
def test_background_larger_crop(self):
img = ImageSource(Image.new('RGB', (356, 266), (130, 140, 120)))
img_opts = ImageOptions('RGB')
splitter = TileSplitter(img, img_opts)
tile = splitter.get_tile((0, 0), (256, 256))
eq_(tile.size, (256, 256))
colors = tile.as_image().getcolors()
eq_(colors, [(256*256, (130, 140, 120))])
tile = splitter.get_tile((256, 256), (256, 256))
eq_(tile.size, (256, 256))
colors = tile.as_image().getcolors()
eq_(sorted(colors), [(10*100, (130, 140, 120)), (256*256-10*100, (255, 255, 255))])
def test_background_larger_crop_with_transparent(self):
img = ImageSource(Image.new('RGBA', (356, 266), (130, 140, 120, 255)))
img_opts = ImageOptions('RGBA', transparent=True)
splitter = TileSplitter(img, img_opts)
tile = splitter.get_tile((0, 0), (256, 256))
eq_(tile.size, (256, 256))
colors = tile.as_image().getcolors()
eq_(colors, [(256*256, (130, 140, 120, 255))])
tile = splitter.get_tile((256, 256), (256, 256))
eq_(tile.size, (256, 256))
colors = tile.as_image().getcolors()
eq_(sorted(colors), [(10*100, (130, 140, 120, 255)), (256*256-10*100, (255, 255, 255, 0))])
class TestHasTransparency(object):
def test_rgb(self):
if not hasattr(Image, 'FASTOCTREE'):
raise SkipTest()
img = Image.new('RGB', (10, 10))
assert not img_has_transparency(img)
img = quantize(img, alpha=False)
assert not img_has_transparency(img)
def test_rbga(self):
if not hasattr(Image, 'FASTOCTREE'):
raise SkipTest()
img = Image.new('RGBA', (10, 10), (100, 200, 50, 255))
img.paste((255, 50, 50, 0), (3, 3, 7, 7))
assert img_has_transparency(img)
img = quantize(img, alpha=True)
assert img_has_transparency(img)
class TestPeekImageFormat(object):
def test_peek(self):
yield self.check, 'png', 'png'
yield self.check, 'tiff', 'tiff'
yield self.check, 'gif', 'gif'
yield self.check, 'jpeg', 'jpeg'
yield self.check, 'bmp', None
def check(self, format, expected_format):
buf = BytesIO()
Image.new('RGB', (100, 100)).save(buf, format)
eq_(peek_image_format(buf), expected_format)
| |
#Copyright [2014] [Google]
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
__author__ = 'johnnylee'
__version__ = '2014.10.29'
import struct
import numpy as np
import sys
import os
import re
import math
import random
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
import argparse
viewRotation = 0
subsample = 10000
viewTopDown = False
resultOnlyOutput = False
class ply:
version = 1.0
format = "binary_little_endian"
rendervertices = np.array(0)
vertices = np.array(0)
rendercount = subsample
isCorner = False
mean = np.array([0, 0, 0])
normal = np.array([0, 0, 1])
cornerIntersection = np.array([0, 0, 1])
totalBelow1percent = 0
leftMean = np.array([0, 0, 0])
leftNormal = np.array([0, 0, 1])
leftBelow1Percent = 0
leftBelow4Percent = 0
rightMean = np.array([0, 0, 0])
rightNormal = np.array([0, 0, 1])
rightBelow1Percent = 0
rightBelow4Percent = 0
cornerAngle = 0
propertyBytes = {'char': 1, 'uchar': 1, 'short': 2, 'ushort': 2, 'int': 4, 'uint': 4, 'float': 4, 'double': 8}
propertyFormatKey = {'char': 'b', 'uchar': 'B', 'short': 'h', 'ushort': 'H', 'int': 'i', 'uint': 'I', 'float': 'f', 'double': 'd'}
elements = {}
def __init__(self):
return
def cleanLine(self, line):
return re.sub(' +', ' ', line.rstrip()) #removes redundant white space
def parseElement(self, plyfile, element):
properties = element[1]
for i in range(10):
line = plyfile.readline()
parts = self.cleanLine(line).split(' ')
if parts[0] == "element":
plyfile.seek(-len(line),1) #rewind to the beginning of the line
return 0
if parts[0] == "end_header":
plyfile.seek(-len(line),1) #rewind to the beginning of the line
return 0
if parts[0] == "property":
if parts[1] == 'list':
if not resultOnlyOutput:
print "List Properties unsupported"
else:
properties.append((self.propertyFormatKey[parts[1]], self.propertyBytes[parts[1]], parts[2]))
def parseHeader(self, plyfile):
line = self.cleanLine(plyfile.readline())
if line != "ply":
if not resultOnlyOutput:
print "Not a valid ply file"
return -1
while True:
line = plyfile.readline()
parts = self.cleanLine(line).split(' ')
if parts[0] == "comment":
continue
if parts[0] == "format":
if parts[1] == "binary_little_endian":
self.format = parts[1]
self.version = parts[2]
continue
if parts[1] == "binary_big_endian":
print "ERROR: only supports binary little endian format currently"
return -1
if parts[1] == "ascii":
self.format = parts[1]
self.version = parts[2]
continue
print "ERROR: unknown format"
return -1
if parts[0] == "element":
self.elements[parts[1]] = (int(parts[2]), [])
if self.parseElement(plyfile, self.elements[parts[1]]) != 0:
break
if parts[0] == "end_header":
break
return 0
def load(self, plyfile, xOffsetMM, minHorzAngle, maxHorzAngle, minVertAngle, maxVertAngle, rotz, scale):
global subsample
if self.parseHeader(plyfile) != 0:
print "load error"
return
#load vertices
vertexCount = self.elements['vertex'][0]
properties = self.elements['vertex'][1]
fmt = ""
tot = 0
for p in properties:
fmt += p[0]
tot += p[1]
self.rendervertices = np.zeros((self.rendercount, 3))
self.vertices = np.zeros((vertexCount, 3))
if not resultOnlyOutput:
print "Vertices: ", vertexCount
outIndex = 0
for i in range(vertexCount):
if ((i+1) % 100000) == 0:
print(" loading: {0:.2f}%".format(100*float(i)/vertexCount))
if self.format == "binary_little_endian":
v = struct.unpack(fmt, plyfile.read(tot))
if self.format == "ascii":
parts = plyfile.readline().split(' ')
v = np.array((float(parts[0]), float(parts[1]), float(parts[2])))
if rotz:
temp = v[1]
v[1] = -v[0]
v[0] = temp
v *= scale
#gets rid of things at zero distance or on z=0 plane
if math.fabs(v[2]) < sys.float_info.epsilon:
continue
mag = math.sqrt(v[0]*v[0] + v[1]*v[1] + v[2]*v[2])
if mag < sys.float_info.epsilon:
continue
#angle crop
horzAngle = 180*math.atan(v[0]/-v[2])/math.pi
vertAngle = 180*math.atan(v[1]/-v[2])/math.pi
if(horzAngle < minHorzAngle) or (horzAngle > maxHorzAngle):
continue
if(vertAngle < minVertAngle) or (vertAngle > maxVertAngle):
continue
#include vertex and apply offset
self.vertices[outIndex, :3] = v[:3]
self.vertices[outIndex, 0] += xOffsetMM
outIndex += 1
#truncate
self.vertices = self.vertices[:outIndex]
#subsample for rendering
for i in range(self.rendercount):
self.rendervertices[i] = self.vertices[random.randrange(0, outIndex-1)]
plyfile.close()
if not resultOnlyOutput:
print "Horz Offset: ", xOffsetMM
print "CroppingAngles: ", (minHorzAngle, maxHorzAngle), (minVertAngle, maxVertAngle)
print "Done Loading. Uncropped Vertices: ", outIndex
def computeAnalysis(self, isCorner, bestfit, dist):
global flatWallAngleThreshold
self.isCorner = isCorner
self.mean = np.mean(self.vertices, 0)
if self.mean[2] > 0:
for v in self.vertices:
v[2] *= -1
for v in self.rendervertices:
v[2] *= -1
if not isCorner:
if bestfit:
self.mean, self.normal = self.planeFit(self.vertices.transpose())
else:
self.mean = np.array((0, 0, dist))
self.normal = np.array((0, 0, -1))
self.totalBelow1percent = self.computePercentageAboveError(self.vertices, self.mean, self.normal, 0.01)
self.totalBelow4percent = self.computePercentageAboveError(self.vertices, self.mean, self.normal, 0.04)
else:
#does not do a best fit corner, just splits along x = 0, does not reject outliers
#left plane
leftPoints = np.zeros(self.vertices.shape)
index = 0
for p in self.vertices:
if p[0] < 0:
leftPoints[index] = p
index += 1
leftPoints = leftPoints[:index]
if bestfit:
self.leftMean, self.leftNormal = self.planeFit(leftPoints.transpose())
else:
self.leftMean = np.array((-500, 0, dist+500))
self.leftNormal = np.array((math.sqrt(2)/2, 0, math.sqrt(2)/2))
self.leftBelow1Percent = self.computePercentageAboveError(leftPoints, self.leftMean, self.leftNormal, 0.01)
self.leftBelow4Percent = self.computePercentageAboveError(leftPoints, self.leftMean, self.leftNormal, 0.04)
#right plane
rightPoints = np.zeros(self.vertices.shape)
index = 0
for p in self.vertices:
if p[0] > 0:
rightPoints[index] = p
index += 1
rightPoints = rightPoints[:index]
if bestfit:
self.rightMean, self.rightNormal = self.planeFit(rightPoints.transpose())
else:
self.rightMean = np.array((500, 0, dist+500))
self.rightNormal = np.array((-math.sqrt(2)/2, 0, math.sqrt(2)/2))
self.rightBelow1Percent = self.computePercentageAboveError(rightPoints, self.rightMean, self.rightNormal, 0.01)
self.rightBelow4Percent = self.computePercentageAboveError(rightPoints, self.rightMean, self.rightNormal, 0.04)
#compute corner line intersection with y=0 plane
lpDir = np.cross(np.array((0, 1, 0)), self.leftNormal)
rpDir = np.cross(np.array((0, 1, 0)), self.rightNormal)
#project the means to the y=0 plane
lp = self.leftMean
lp[1] = 0
rp = self.rightMean
rp[1] = 0
#compute the intersection point of two lines in the y=0 plane
z = rp - lp
a = np.linalg.norm(np.cross(z, rpDir))
b = np.linalg.norm(np.cross(lpDir, rpDir))
if b > sys.float_info.epsilon:
if lpDir.dot(z) > 0:
self.cornerIntersection = lp + (a/b)*lpDir
else:
self.cornerIntersection = lp - (a/b)*lpDir
self.cornerAngle = 180 - 180*math.acos(self.leftNormal.dot(self.rightNormal))/math.pi
def computePercentageAboveError(self, points, mean, normal, errorPercentThreshold):
totalBelowThresh = 0
totalCount = 0
for p in points:
#divides by distance to camera
div = math.sqrt(p.dot(p))
if math.fabs(div) > sys.float_info.epsilon:
err = math.fabs((p-mean).dot(normal)/div)
if err < errorPercentThreshold:
totalBelowThresh += 1
totalCount += 1
return float(100.0)*totalBelowThresh/float(totalCount)
def planeFit(self, points):
from numpy.linalg import svd
points = np.reshape(points, (points.shape[0], -1))
assert points.shape[0] < points.shape[1]
ctr = points.mean(axis=1)
x = points - ctr[:, None]
M = np.dot(x, x.T)
n = svd(M)[0][:, -1]
#flip the normal if needed so it is facing toward the origin
if ctr.dot(n) > 0:
n *= -1
return ctr, n
def printAnalyis(self, filename, result_only):
if result_only:
if(self.isCorner):
print filename, self.vertices.shape[0], self.cornerIntersection[2], (self.leftBelow1Percent + self.rightBelow1Percent)/2.0, (self.leftBelow4Percent + self.rightBelow4Percent)/2.0, self.cornerAngle
else:
print filename, self.vertices.shape[0], self.mean[2], self.totalBelow1percent, self.totalBelow4percent
return
if self.isCorner:
print "Test Corner Intersection (mm): ", self.cornerIntersection
print "Corner Angle(deg): ", self.cornerAngle
print "Number of points: ", self.vertices.shape[0]
print "Percent of points below 1% err: ", (self.leftBelow1Percent + self.rightBelow1Percent)/2.0
print "Percent of points below 4% err: ", (self.leftBelow4Percent + self.rightBelow4Percent)/2.0
else:
print "Normal Vector: ", self.normal
print "Test Plane Position (mm): ", self.mean
print "Number of points: ", self.vertices.shape[0]
print "Percent of points below 1% err: ", self.totalBelow1percent
print "Percent of points below 4% err: ", self.totalBelow4percent
def generatePlyFile(isCorner):
gridsize = 100
dist = 4000
scalediv = 2
step = (2*dist/scalediv)/gridsize
noise = 0.012
genPly = "ply\n"
genPly += "format binary_little_endian 1.0\n"
genPly += "element vertex " + str(gridsize*gridsize) + "\n"
genPly += "property float x\n"
genPly += "property float y\n"
genPly += "property float z\n"
genPly += "end_header\n"
counter = 0
if isCorner:
for x in range(-dist/scalediv, dist/scalediv, step):
for y in range(-dist/scalediv, dist/scalediv, step):
counter += 1
v = np.array((float(x), float(y), float(-dist)))
if x < 0:
v[2] = -dist - v[0]
else:
v[2] = -dist + v[0]
mag = math.sqrt(v.dot(v))
v /= mag
mag += random.uniform(-noise*mag, noise*mag) #average will be half
genPly += struct.pack("fff", v[0]*mag, v[1]*mag, v[2]*mag)
else: #flat wall
for x in range(-dist/scalediv, dist/scalediv, step):
for y in range(-dist/scalediv, dist/scalediv, step):
counter += 1
v = np.array((float(x), float(y), float(-dist)))
mag = math.sqrt(v.dot(v))
v /= mag
mag += random.uniform(-noise*mag, noise*mag) #average will be half
genPly += struct.pack("fff", v[0]*mag, v[1]*mag, v[2]*mag)
file = open("testPly.ply", 'wb')
file.write(genPly)
file.close()
file = open("testPly.ply", 'rb')
return file
def renderPlane(point, normal, size):
forward = np.array([0, 0, 1])
normal = normal/np.linalg.norm(normal)
rotVect = np.cross(forward, normal)
if 1 + forward.dot(normal) < sys.float_info.epsilon:
angle = 0
rotVect = normal
else:
angle = 180*math.acos(forward.dot(normal))/math.pi
rotVect = rotVect/np.linalg.norm(rotVect)
glColor3f(1.0, 1.0, 1.0)
glPushMatrix()
glTranslatef(point[0], point[1], point[2])
glRotatef(angle, rotVect[0], rotVect[1], rotVect[2])
glBegin(GL_LINES)
glVertex3f(0, 0, 0)
glVertex3f(0, 0, size/2)
glVertex3f(-size, -size, 0)
glVertex3f(+size, +size, 0)
glVertex3f(+size, -size, 0)
glVertex3f(-size, +size, 0)
glVertex3f(-size, -size, 0)
glVertex3f(+size, -size, 0)
glVertex3f(-size, +size, 0)
glVertex3f(+size, +size, 0)
glVertex3f(-size, -size, 0)
glVertex3f(-size, +size, 0)
glVertex3f(+size, -size, 0)
glVertex3f(+size, +size, 0)
glEnd()
glPopMatrix()
def initPersectiveView():
glClearColor(0.0, 0.0, 0.0, 0.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(70, 1, 0.1, 10000)
gluLookAt(0, 1000, 4000, 0, 0, 0, 0, 1, 0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def initTopDownView():
glClearColor(0.0, 0.0, 0.0, 0.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(-3000, 3000, -3000, 3000, -10000, 10000)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glRotatef(90, 1, 0, 0)
def displayFunc():
global viewRotation, myPly, viewTopDown
glClear(GL_COLOR_BUFFER_BIT)
if viewTopDown:
initTopDownView()
viewRotation = 0
else:
initPersectiveView()
viewRotation += 1.0
glRotatef(viewRotation, 0, 1, 0)
if myPly.mean[2] > 0:
glTranslatef(myPly.mean[0], myPly.mean[1], myPly.mean[2])
else:
glTranslatef(-myPly.mean[0], -myPly.mean[1], -myPly.mean[2])
#render points
glPointSize(2.0)
glColor3f(1.0, 0.0, 0.0)
glPushMatrix()
glBegin(GL_POINTS)
for v in myPly.rendervertices:
if(myPly.isCorner):
if(v[0] < 0):
glColor3f(1.0, 0.0, 0.0)
else:
glColor3f(0.0, 1.0, 1.0)
glVertex3f(v[0], v[1], v[2])
glEnd()
#render best fit planes
if myPly.isCorner:
renderPlane(myPly.leftMean, myPly.leftNormal, 500)
renderPlane(myPly.rightMean, myPly.rightNormal, 500)
else:
renderPlane(myPly.mean, myPly.normal, 500)
glPopMatrix()
#render frustum lines
glColor3f(0.4, 0.4, 0.4)
frustumDist = -4000
frustumWidthRatio = 0.5
glBegin(GL_LINES)
glVertex3f(0.0, 0.0, 0.0)
glVertex3f(0.0, 0.0, frustumDist)
glVertex3f(0.0, 0.0, 0.0)
glVertex3f(-frustumWidthRatio*frustumDist, 0.0, frustumDist)
glVertex3f(0.0, 0.0, 0.0)
glVertex3f(frustumWidthRatio*frustumDist, 0.0, frustumDist)
for i in range(-1000, frustumDist-1, -1000):
glVertex3f(frustumWidthRatio*i, 0.0, i)
glVertex3f(-frustumWidthRatio*i, 0.0, i)
glEnd()
#render Z- label
glPushMatrix()
labelSize = 100
glTranslatef(0, 0, frustumDist - 2*labelSize)
glBegin(GL_LINES)
glVertex3f(labelSize, 0.0, labelSize)
glVertex3f(-labelSize, 0.0, labelSize)
glVertex3f(-labelSize, 0.0, labelSize)
glVertex3f(labelSize, 0.0, -labelSize)
glVertex3f(-labelSize, 0.0, -labelSize)
glVertex3f(labelSize, 0.0, -labelSize)
glVertex3f(-2*labelSize, 0.0, 0.0)
glVertex3f(- labelSize, 0.0, 0.0)
glEnd()
glPopMatrix()
glutSwapBuffers()
glutPostRedisplay()
def keyboardFunc(key, x, y):
global viewTopDown
if(viewTopDown):
viewTopDown = False
else:
viewTopDown = True
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', action='store', dest='filename', help='PLY file (binary little endian or ascii)')
parser.add_argument('-x', dest='xOffset', help='Horz offset for corner split', default=0)
parser.add_argument('-c', nargs=4, dest='angle', help='Crop angles in degrees (minH, maxH, minV, maxV)')
parser.add_argument('-d', dest='distance', help='Known distance to wall or corner point(mm)', default=-4000)
parser.add_argument('-s', dest='scale', help='Adjust scale of the data', default=1)
parser.add_argument('-rotz', action="store_true", default=False, help='Rot 90 deg on Z axis')
parser.add_argument('-corner', action="store_true", default=False, help='Corner Dataset')
parser.add_argument('-best_fit', action="store_true", default=False, help='Use Best Fit Plane')
parser.add_argument('-no_vis', action="store_true", default=False, help='No visualization')
parser.add_argument('-result_only', action="store_true", default=False, help='Result text only')
results = parser.parse_args()
np.set_printoptions(precision=3)
np.set_printoptions(suppress=True)
resultOnlyOutput = results.result_only
xOffsetMM = 0
cropAngles = [-90, 90, -90, 90]
if results.angle is not None:
cropAngles[0] = float(results.angle[0])
cropAngles[1] = float(results.angle[1])
cropAngles[2] = float(results.angle[2])
cropAngles[3] = float(results.angle[3])
if results.xOffset is not None:
xOffsetMM = float(results.xOffset)
myPly = ply()
if results.filename is None:
if not resultOnlyOutput:
print "Generating Synthetic Data"
file = generatePlyFile(results.corner)
else:
filename = results.filename
if not resultOnlyOutput:
print "Loading ", filename
file = open(filename, 'rb')
myPly.load(file, xOffsetMM, cropAngles[0], cropAngles[1], cropAngles[2], cropAngles[3], results.rotz, int(results.scale))
myPly.computeAnalysis(results.corner, results.best_fit, int(results.distance))
myPly.printAnalyis(results.filename, results.result_only)
if not results.no_vis and not resultOnlyOutput:
print " - press any key to toggle top down view"
print " - use host OS keyboard command to quit"
glutInit()
glutInitWindowSize(800, 800)
glutCreateWindow("Ply Data")
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB)
glutDisplayFunc(displayFunc)
glutKeyboardFunc(keyboardFunc)
initPersectiveView()
glutMainLoop()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.